gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# coding=utf-8
"""
Concurrency support for pyRobot.
This module provides:
- an implementation of :class:`SignalingThread` (threads that explicitely
handle signals like cancelation)
- heavily modified Python futures to support robot action management.
- A future executor that simply spawn one thread per future (action) instead of
a thread pool.
These objects should not be directly used. Users should instead rely on the
:meth:`~robots.concurrency.action.action` decorator.
Helpful debugging commands::
>>> sys._current_frames()
>>> inspect.getouterframes(sys._current_frames()[<id>])[0][0].f_locals
"""
import logging; logger = logging.getLogger("robots.actions")
import sys
import uuid
MAX_FUTURES = 20
MAX_TIME_TO_COMPLETE = 1 # sec: time allowed to tasks to complete when cancelled. If they take more than that, force termination.
ACTIVE_SLEEP_RESOLUTION = 0.1 # sec
try:
from concurrent.futures import Future, TimeoutError
except ImportError:
import sys
sys.stderr.write("[error] install python-concurrent.futures\n")
sys.exit(1)
import os.path #for basename
import weakref
import threading
import thread # for get_ident
from collections import deque
import traceback
from .signals import ActionCancelled, ActionPaused
class SignalingThread(threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.debugger_trace = None
def cancel(self):
self.__cancel = True
def pause(self):
self.__pause = True
def _Thread__bootstrap(self):
""" The name come from Python name mangling for
__double_leading_underscore_names
Note that in Python3, __bootstrap becomes _bootstrap, thus
making it easier to override.
"""
if threading._trace_hook is not None:
self.debugger_trace = threading._trace_hook
else:
self.debugger_trace = None
self.__cancel = False
self.__pause = False
sys.settrace(self.__signal_emitter)
self.name = "Ranger action thread (initialization)"
super(SignalingThread, self)._Thread__bootstrap()
def __signal_emitter(self, frame, event, arg):
if self.__cancel:
if frame.f_globals["__name__"] == "threading":
# Raising exception at uncontrolled time is a dangerous sport,
# especially if the thread is in the middle of locking/unlocking shared resources
# like (in our case) setting result in futures and reading them.
# After thinking about it for a day, I could not find any good solution except for
# postponing raising the signals until out of the threading module. This is not
# very nice, but seem to work out well.
#logger.debug("Thread <%s> in threading module. Postponing cancelation" % self.name)
pass
else:
self.__cancel = False
desc = "Cancelling thread <%s>:\n" % self.name
tb = traceback.extract_stack(frame, limit = 6)
for f in tb:
file, line, fn, instruction = f
desc += " - in <%s> (l.%s of %s): %s\n" % (fn, line, os.path.basename(file), instruction)
logger.debug(desc)
raise ActionCancelled()
if self.__pause:
self.__pause = False
logger.debug("Pausing thread <%s>" % self.name)
raise ActionPaused()
if self.debugger_trace:
return self.debugger_trace
else:
return self.__signal_emitter
class RobotActionThread(SignalingThread):
def __init__(self, future, initialized, fn, args, kwargs):
SignalingThread.__init__(self)
initialized.set()
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(self.future, str(self.future),*self.args, **self.kwargs)
self.future.set_result(result)
logger.debug("Action <%s>: completed." % str(self.future))
except BaseException:
e = sys.exc_info()[1]
logger.error("Exception in action <%s>: %s"%(str(self.future), e)) #self.fn.__name__
logger.error(traceback.format_exc())
self.future.set_exception(e)
class RobotAction(Future):
def __init__(self, actionname):
Future.__init__(self)
self.actionname = actionname
self.thread = None
self.id = uuid.uuid4()
self.subactions = []
self.parent_action = None
self.has_acquired_resource = False
def add_subaction(self, action):
self.subactions = [a for a in self.subactions if a() is not None and a().thread() is not None]
self.subactions.append(action)
logger.debug("Added sub-action %s to action %s" % (str(action()), str(self)))#.actionname)) 1: action().actionname
def set_parent(self, action):
self.parent_action = action
def childof(self, action):
""" Returns true if this action is a child of the given action, ie, has
been spawned from the given action or any of its descendants.
"""
parent = self.parent_action() # weakref!
if parent is None:
return False
if parent is action:
return True
return parent.childof(action)
def set_thread(self, thread):
self.thread = thread
def cancel(self):
# we do not call the 'standard' Future.cancel method since we do not have a thread pool (ie,
# the future can not be 'pending for execution'), which is the only useful usecase (well, with callback on
# cancellation, I imagine... so those are not supported for now)
thread = self.thread() # weakref!
if thread is None:
logger.debug("Action <%s>: already done" % self)
return
# first, cancel myself (to make sure I won't restart subactions)
logger.debug("Action <%s>: signaling cancelation to action's thread" % self)
thread.cancel()
# then, tell all the subactions that they should stop
# (can not do that in the thread's cancel (_signal_emitter), because the
# thread may hold locks that are not released until the exception is raised and
# the context manager are left)
logger.debug("Action <%s>: %s subactions to cancel" % (self, len(self.subactions))) #self.actionname
for weak_subaction in self.subactions:
subaction = weak_subaction()
if subaction:
logger.debug("Action <%s>: Cancelling subaction %s..." % (self, subaction))
subaction.cancel()
# then, make sure everybody actually terminates
logger.debug("Action <%s>: now waiting for completion" % self)
try:
self.exception(timeout = MAX_TIME_TO_COMPLETE) # waits this amount of time for the task to effectively complete
except TimeoutError:
raise RuntimeError("Unable to cancel action %s (still running %s after cancellation)!" % (self, MAX_TIME_TO_COMPLETE))
logger.debug("Action <%s>: successfully cancelled" % self)
#t = 0
#while t < MAX_TIME_TO_COMPLETE:
# time.sleep(ACTIVE_SLEEP_RESOLUTION)
# t+=ACTIVE_SLEEP_RESOLUTION
# if self.thread() is None:
# logger.debug("Action <%s>: successfully cancelled" % self.actionname)
# return
#raise RuntimeError("Unable to cancel action %s (still running %s after cancellation)!" % (self.actionname, MAX_TIME_TO_COMPLETE))
def result(self):
if self.parent_action and self.parent_action():
threading.current_thread().name = "Action %s (waiting for sub-action %s)" % (self.parent_action(), self)
else:
threading.current_thread().name = "Main thread (waiting for sub-action %s)" % self
# active wait! Instead of blocking on the condition variable in super.result()
# we do an active wait to make sure we can cancel/suspend the action via our
# __signal_emitter trace function
while True:
try:
return super(RobotAction, self).result(ACTIVE_SLEEP_RESOLUTION)
except TimeoutError:
pass
def wait(self):
""" alias for result()
"""
return self.result()
def __lt__(self, other):
""" Overrides the comparision operator (used by ==, !=, <, >) to
first wait for the result of the future.
"""
return self.result().__lt__(other)
def __le__(self, other):
return self.result().__le__(other)
def __eq__(self, other):
return self.result().__eq__(other)
def __ne__(self, other):
return self.result().__ne__(other)
def __gt__(self, other):
return self.result().__gt__(other)
def __ge__(self, other):
return self.result().__ge__(other)
def __repr__(self):
return str(self.id)
def __str__(self):
return self.actionname + "[" + self.__repr__() + "]"
class FakeFuture:
""" Used in the 'immediate' mode.
"""
def __init__(self, result):
self._result = result
def result(self):
return self._result
def wait(self):
return self._result
class RobotActionExecutor():
def __init__(self):
# Attention, RobotActionExecutor must be thread-safe
self.futures = []
self.futures_lock = threading.Lock()
def submit(self, fn, *args, **kwargs):
with self.futures_lock:
self.futures = [f for f in self.futures if not f.done()]
name = fn.__name__
if args and not kwargs:
name += "(%s)" % ", ".join([str(a) for a in args[1:]]) # start at 1 because 0 is the robot instance
elif kwargs and not args:
name += "(%s)" % ", ".join(["%s=%s" % (str(k), str(v)) for k, v in kwargs.items()])
elif args and kwargs:
name += "(%s, " % ", ".join([str(a) for a in args[1:]])
name += "%s)" % ", ".join(["%s=%s" % (str(k), str(v)) for k, v in kwargs.items()])
if len([f for f in self.futures if f.has_acquired_resource]) > MAX_FUTURES:
raise RuntimeError("You have more than %s actions running in parallel! Likely a bug in your application logic!" % MAX_FUTURES)
f = RobotAction(name)
initialized = threading.Event()
t = RobotActionThread(f, initialized, fn, args, kwargs)
f.set_thread(weakref.ref(t))
current_action = self.get_current_action()
if current_action:
f.set_parent(weakref.ref(current_action))
current_action.add_subaction(weakref.ref(f))
t.start()
while not initialized.is_set():
# waits for the thread to actually start
pass
with self.futures_lock:
self.futures.append(f)
return f
def get_current_action(self):
"""Returns the RobotAction linked to the current thread.
"""
thread_id = threading.current_thread().ident
with self.futures_lock:
for f in self.futures:
if not f.done():
thread = f.thread() # weak ref
if thread is not None and thread.ident == thread_id:
return f
logger.debug("The current thread (<%s>) is not a robot action (main thread?)" % threading.current_thread().name)
return None
def cancel_all(self):
""" Blocks until all the currently running actions are actually stopped.
"""
with self.futures_lock:
for f in self.futures:
if not f.done():
f.cancel()
self.futures = []
def cancel_all_others(self):
""" Blocks until all the currently running actions *except the calling
one* are actually stopped.
"""
thread_id = threading.current_thread().ident
with self.futures_lock:
for f in self.futures:
if not f.done():
thread = f.thread() # weak ref
if thread is not None and thread.ident == thread_id:
myself = f
continue
f.cancel()
self.futures = [myself]
def actioninfo(self, future_id):
with self.futures_lock:
future = [f for f in self.futures if id(f) == future_id]
if not future:
return "No task with ID %s. Maybe the task is already done?" % future_id
future = future[0]
desc = "Task <%s>\n" % future
thread = future.thread() # weak ref
if thread:
frame = sys._current_frames()[thread.ident]
tb = traceback.extract_stack(frame, limit = 6)
for f in tb:
file, line, fn, instruction = f
desc += " - in <%s> (l.%s of %s): %s\n" % (fn, line, os.path.basename(file), instruction)
return desc
else:
return "Task ID %s is already done." % future_id
def __str__(self):
with self.futures_lock:
return "Running tasks:\n" + \
"\n".join(["Task %s (id: %s, thread: <%s>)" % (f, id(f), str(f.thread())) for f in self.futures if not f.done()])
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for converting between v3 and v1 datastore protocol buffers.
This module is internal and should not be used by client applications.
"""
from google.appengine.datastore import entity_pb
from google.appengine.datastore import datastore_v4_pb
from google.appengine.datastore import entity_v4_pb
_MIN_CLOUD_DATASTORE_VERSION = (4, 0, 0, 'b1')
_CLOUD_DATASTORE_ENABLED = False
try:
import googledatastore
if googledatastore.VERSION >= _MIN_CLOUD_DATASTORE_VERSION:
_CLOUD_DATASTORE_ENABLED = True
except ImportError:
pass
except AttributeError:
pass
MISSING_CLOUD_DATASTORE_MESSAGE = (
'Could not import googledatastore. This library must be installed with '
'version >= %s to use the Cloud Datastore API.' %
'.'.join([str(v) for v in _MIN_CLOUD_DATASTORE_VERSION]))
MEANING_ATOM_CATEGORY = 1
MEANING_URL = 2
MEANING_ATOM_TITLE = 3
MEANING_ATOM_CONTENT = 4
MEANING_ATOM_SUMMARY = 5
MEANING_ATOM_AUTHOR = 6
MEANING_GD_EMAIL = 8
MEANING_GEORSS_POINT = 9
MEANING_GD_IM = 10
MEANING_GD_PHONENUMBER = 11
MEANING_GD_POSTALADDRESS = 12
MEANING_PERCENT = 13
MEANING_TEXT = 15
MEANING_BYTESTRING = 16
MEANING_BLOBKEY = 17
MEANING_INDEX_ONLY = 18
MEANING_PREDEFINED_ENTITY_USER = 20
MEANING_PREDEFINED_ENTITY_POINT = 21
MEANING_ZLIB = 22
MEANING_POINT_WITHOUT_V3_MEANING = 23
MEANING_EMPTY_LIST = 24
URI_MEANING_ZLIB = 'ZLIB'
MAX_URL_CHARS = 2083
MAX_INDEXED_STRING_CHARS = 500
MAX_INDEXED_BLOB_BYTES = 500
MAX_PARTITION_ID_LENGTH = 100
MAX_DATASET_ID_SECTION_LENGTH = 100
MAX_DATASET_ID_LENGTH = MAX_DATASET_ID_SECTION_LENGTH * 3 + 2
MAX_KEY_PATH_LENGTH = 100
PROPERTY_NAME_X = 'x'
PROPERTY_NAME_Y = 'y'
PROPERTY_NAME_EMAIL = 'email'
PROPERTY_NAME_AUTH_DOMAIN = 'auth_domain'
PROPERTY_NAME_USER_ID = 'user_id'
PROPERTY_NAME_INTERNAL_ID = 'internal_id'
PROPERTY_NAME_FEDERATED_IDENTITY = 'federated_identity'
PROPERTY_NAME_FEDERATED_PROVIDER = 'federated_provider'
PROPERTY_NAME_KEY = '__key__'
DEFAULT_GAIA_ID = 0
def v4_key_to_string(v4_key):
"""Generates a string representing a key's path.
The output makes no effort to qualify special characters in strings.
The key need not be valid, but if any of the key path elements have
both a name and an ID the name is ignored.
Args:
v4_key: an entity_v4_pb.Key
Returns:
a string representing the key's path
"""
path_element_strings = []
for path_element in v4_key.path_element_list():
if path_element.has_id():
id_or_name = str(path_element.id())
elif path_element.has_name():
id_or_name = path_element.name()
else:
id_or_name = ''
path_element_strings.append('%s: %s' % (path_element.kind(), id_or_name))
return '[%s]' % ', '.join(path_element_strings)
def is_complete_v4_key(v4_key):
"""Returns True if a key specifies an ID or name, False otherwise.
Args:
v4_key: an entity_v4_pb.Key
Returns:
True if the key specifies an ID or name, False otherwise.
"""
assert len(v4_key.path_element_list()) >= 1
last_element = v4_key.path_element(len(v4_key.path_element_list()) - 1)
return last_element.has_id() or last_element.has_name()
def v1_key_to_string(v1_key):
"""Generates a string representing a key's path.
The output makes no effort to qualify special characters in strings.
The key need not be valid, but if any of the key path elements have
both a name and an ID the name is ignored.
Args:
v1_key: an googledatastore.Key
Returns:
a string representing the key's path
"""
path_element_strings = []
for path_element in v1_key.path:
field = path_element.WhichOneof('id_type')
if field == 'id':
id_or_name = str(path_element.id)
elif field == 'name':
id_or_name = path_element.name
else:
id_or_name = ''
path_element_strings.append('%s: %s' % (path_element.kind, id_or_name))
return '[%s]' % ', '.join(path_element_strings)
def is_complete_v1_key(v1_key):
"""Returns True if a key specifies an ID or name, False otherwise.
Args:
v1_key: an googledatastore.Key
Returns:
True if the key specifies an ID or name, False otherwise.
"""
assert len(v1_key.path) >= 1
last_element = v1_key.path[len(v1_key.path) - 1]
return last_element.WhichOneof('id_type') is not None
def is_complete_v3_key(v3_key):
"""Returns True if a key specifies an ID or name, False otherwise.
Args:
v3_key: a datastore_pb.Reference
Returns:
True if the key specifies an ID or name, False otherwise.
"""
assert v3_key.path().element_size() >= 1
last_element = v3_key.path().element_list()[-1]
return ((last_element.has_id() and last_element.id() != 0) or
(last_element.has_name() and last_element.name() != ''))
def get_v1_mutation_key_and_entity(v1_mutation):
"""Returns the v1 key and entity for a v1 mutation proto, if applicable.
Args:
v1_mutation: a googledatastore.Mutation
Returns:
a tuple (googledatastore.Key for this mutation,
googledatastore.Entity or None if the mutation is a deletion)
"""
if v1_mutation.HasField('delete'):
return v1_mutation.delete, None
else:
v1_entity = getattr(v1_mutation, v1_mutation.WhichOneof('operation'))
return v1_entity.key, v1_entity
def is_valid_utf8(s):
if isinstance(s, str):
return True
try:
s.decode('utf-8')
return True
except UnicodeDecodeError:
return False
def check_conversion(condition, message):
"""Asserts a conversion condition and raises an error if it's not met.
Args:
condition: (boolean) condition to enforce
message: error message
Raises:
InvalidConversionError: if condition is not met
"""
if not condition:
raise InvalidConversionError(message)
class InvalidConversionError(Exception):
"""Raised when conversion fails."""
pass
class IdResolver(object):
"""A class that can handle project id <--> application id transformations."""
def __init__(self, app_ids=()):
"""Create a new IdResolver.
Args:
app_ids: A list of application ids with application id shard set. i.e.
s~my_app or e~my_app.
"""
resolver_map = {}
for app_id in app_ids:
resolver_map[self.resolve_project_id(app_id)] = app_id
self._resolver_map = resolver_map
def resolve_project_id(self, app_id):
"""Converts an application id to a project id.
Args:
app_id: The application id.
Returns:
The project id.
"""
return app_id.rsplit('~')[-1]
def resolve_app_id(self, project_id):
"""Converts a project id to an application id.
Args:
project_id: The project id.
Returns:
The application id.
Raises:
InvalidConversionError: if the application is unknown for the project id.
"""
check_conversion(project_id in self._resolver_map,
'Cannot determine application id for provided project id: '
'"%s".'
% project_id)
return self._resolver_map[project_id]
class _IdentityIdResolver(IdResolver):
"""An IdResolver that resolve app_id == project_id."""
def resolve_project_id(self, app_id):
return app_id
def resolve_app_id(self, project_id):
return project_id
class _EntityConverter(object):
"""Converter for entities and keys."""
def __init__(self, id_resolver):
"""Creates a new EntityConverter.
Args:
id_resolver: an IdResolver object for converting
project_id <--> application_id
"""
self._id_resolver = id_resolver
def v4_to_v3_reference(self, v4_key, v3_ref):
"""Converts a v4 Key to a v3 Reference.
Args:
v4_key: an entity_v4_pb.Key
v3_ref: an entity_pb.Reference to populate
"""
v3_ref.Clear()
if v4_key.has_partition_id():
if v4_key.partition_id().has_dataset_id():
v3_ref.set_app(v4_key.partition_id().dataset_id())
if v4_key.partition_id().has_namespace():
v3_ref.set_name_space(v4_key.partition_id().namespace())
for v4_element in v4_key.path_element_list():
v3_element = v3_ref.mutable_path().add_element()
v3_element.set_type(v4_element.kind())
if v4_element.has_id():
v3_element.set_id(v4_element.id())
if v4_element.has_name():
v3_element.set_name(v4_element.name())
def v4_to_v3_references(self, v4_keys):
"""Converts a list of v4 Keys to a list of v3 References.
Args:
v4_keys: a list of entity_v4_pb.Key objects
Returns:
a list of entity_pb.Reference objects
"""
v3_refs = []
for v4_key in v4_keys:
v3_ref = entity_pb.Reference()
self.v4_to_v3_reference(v4_key, v3_ref)
v3_refs.append(v3_ref)
return v3_refs
def v3_to_v4_key(self, v3_ref, v4_key):
"""Converts a v3 Reference to a v4 Key.
Args:
v3_ref: an entity_pb.Reference
v4_key: an entity_v4_pb.Key to populate
"""
v4_key.Clear()
if not v3_ref.app():
return
v4_key.mutable_partition_id().set_dataset_id(v3_ref.app())
if v3_ref.name_space():
v4_key.mutable_partition_id().set_namespace(v3_ref.name_space())
for v3_element in v3_ref.path().element_list():
v4_element = v4_key.add_path_element()
v4_element.set_kind(v3_element.type())
if v3_element.has_id():
v4_element.set_id(v3_element.id())
if v3_element.has_name():
v4_element.set_name(v3_element.name())
def v3_to_v4_keys(self, v3_refs):
"""Converts a list of v3 References to a list of v4 Keys.
Args:
v3_refs: a list of entity_pb.Reference objects
Returns:
a list of entity_v4_pb.Key objects
"""
v4_keys = []
for v3_ref in v3_refs:
v4_key = entity_v4_pb.Key()
self.v3_to_v4_key(v3_ref, v4_key)
v4_keys.append(v4_key)
return v4_keys
def v4_to_v3_entity(self, v4_entity, v3_entity, is_projection=False):
"""Converts a v4 Entity to a v3 EntityProto.
Args:
v4_entity: an entity_v4_pb.Entity
v3_entity: an entity_pb.EntityProto to populate
is_projection: True if the v4_entity is from a projection query.
"""
v3_entity.Clear()
for v4_property in v4_entity.property_list():
property_name = v4_property.name()
v4_value = v4_property.value()
if v4_value.list_value_list():
for v4_sub_value in v4_value.list_value_list():
self.__add_v3_property_from_v4(
property_name, True, is_projection, v4_sub_value, v3_entity)
else:
self.__add_v3_property_from_v4(
property_name, False, is_projection, v4_value, v3_entity)
if v4_entity.has_key():
v4_key = v4_entity.key()
self.v4_to_v3_reference(v4_key, v3_entity.mutable_key())
v3_ref = v3_entity.key()
self.v3_reference_to_group(v3_ref, v3_entity.mutable_entity_group())
else:
pass
def v3_to_v4_entity(self, v3_entity, v4_entity):
"""Converts a v3 EntityProto to a v4 Entity.
Args:
v3_entity: an entity_pb.EntityProto
v4_entity: an entity_v4_pb.Proto to populate
"""
v4_entity.Clear()
self.v3_to_v4_key(v3_entity.key(), v4_entity.mutable_key())
if not v3_entity.key().has_app():
v4_entity.clear_key()
v4_properties = {}
for v3_property in v3_entity.property_list():
self.__add_v4_property_to_entity(v4_entity, v4_properties, v3_property,
True)
for v3_property in v3_entity.raw_property_list():
self.__add_v4_property_to_entity(v4_entity, v4_properties, v3_property,
False)
def v4_value_to_v3_property_value(self, v4_value, v3_value):
"""Converts a v4 Value to a v3 PropertyValue.
Args:
v4_value: an entity_v4_pb.Value
v3_value: an entity_pb.PropertyValue to populate
"""
v3_value.Clear()
if v4_value.has_boolean_value():
v3_value.set_booleanvalue(v4_value.boolean_value())
elif v4_value.has_integer_value():
v3_value.set_int64value(v4_value.integer_value())
elif v4_value.has_double_value():
v3_value.set_doublevalue(v4_value.double_value())
elif v4_value.has_timestamp_microseconds_value():
v3_value.set_int64value(v4_value.timestamp_microseconds_value())
elif v4_value.has_key_value():
v3_ref = entity_pb.Reference()
self.v4_to_v3_reference(v4_value.key_value(), v3_ref)
self.v3_reference_to_v3_property_value(v3_ref, v3_value)
elif v4_value.has_blob_key_value():
v3_value.set_stringvalue(v4_value.blob_key_value())
elif v4_value.has_string_value():
v3_value.set_stringvalue(v4_value.string_value())
elif v4_value.has_blob_value():
v3_value.set_stringvalue(v4_value.blob_value())
elif v4_value.has_entity_value():
v4_entity_value = v4_value.entity_value()
v4_meaning = v4_value.meaning()
if (v4_meaning == MEANING_GEORSS_POINT
or v4_meaning == MEANING_PREDEFINED_ENTITY_POINT):
self.__v4_to_v3_point_value(v4_entity_value,
v3_value.mutable_pointvalue())
elif v4_meaning == MEANING_PREDEFINED_ENTITY_USER:
self.v4_entity_to_v3_user_value(v4_entity_value,
v3_value.mutable_uservalue())
else:
v3_entity_value = entity_pb.EntityProto()
self.v4_to_v3_entity(v4_entity_value, v3_entity_value)
v3_value.set_stringvalue(v3_entity_value.SerializePartialToString())
elif v4_value.has_geo_point_value():
point_value = v3_value.mutable_pointvalue()
point_value.set_x(v4_value.geo_point_value().latitude())
point_value.set_y(v4_value.geo_point_value().longitude())
else:
pass
def v3_property_to_v4_value(self, v3_property, indexed, v4_value):
"""Converts a v3 Property to a v4 Value.
Args:
v3_property: an entity_pb.Property
indexed: whether the v3 property is indexed
v4_value: an entity_v4_pb.Value to populate
"""
v4_value.Clear()
v3_property_value = v3_property.value()
v3_meaning = v3_property.meaning()
v3_uri_meaning = None
if v3_property.meaning_uri():
v3_uri_meaning = v3_property.meaning_uri()
if not self.__is_v3_property_value_union_valid(v3_property_value):
v3_meaning = None
v3_uri_meaning = None
elif v3_meaning == entity_pb.Property.NO_MEANING:
v3_meaning = None
elif not self.__is_v3_property_value_meaning_valid(v3_property_value,
v3_meaning):
v3_meaning = None
is_zlib_value = False
if v3_uri_meaning:
if v3_uri_meaning == URI_MEANING_ZLIB:
if v3_property_value.has_stringvalue():
is_zlib_value = True
if v3_meaning != entity_pb.Property.BLOB:
v3_meaning = entity_pb.Property.BLOB
else:
pass
else:
pass
if v3_property_value.has_booleanvalue():
v4_value.set_boolean_value(v3_property_value.booleanvalue())
elif v3_property_value.has_int64value():
if v3_meaning == entity_pb.Property.GD_WHEN:
v4_value.set_timestamp_microseconds_value(
v3_property_value.int64value())
v3_meaning = None
else:
v4_value.set_integer_value(v3_property_value.int64value())
elif v3_property_value.has_doublevalue():
v4_value.set_double_value(v3_property_value.doublevalue())
elif v3_property_value.has_referencevalue():
v3_ref = entity_pb.Reference()
self.__v3_reference_value_to_v3_reference(
v3_property_value.referencevalue(), v3_ref)
self.v3_to_v4_key(v3_ref, v4_value.mutable_key_value())
elif v3_property_value.has_stringvalue():
if v3_meaning == entity_pb.Property.ENTITY_PROTO:
serialized_entity_v3 = v3_property_value.stringvalue()
v3_entity = entity_pb.EntityProto()
v3_entity.ParsePartialFromString(serialized_entity_v3)
self.v3_to_v4_entity(v3_entity, v4_value.mutable_entity_value())
v3_meaning = None
elif (v3_meaning == entity_pb.Property.BLOB
or v3_meaning == entity_pb.Property.BYTESTRING):
v4_value.set_blob_value(v3_property_value.stringvalue())
if indexed or v3_meaning == entity_pb.Property.BLOB:
v3_meaning = None
else:
string_value = v3_property_value.stringvalue()
if is_valid_utf8(string_value):
if v3_meaning == entity_pb.Property.BLOBKEY:
v4_value.set_blob_key_value(string_value)
v3_meaning = None
else:
v4_value.set_string_value(string_value)
else:
v4_value.set_blob_value(string_value)
if v3_meaning != entity_pb.Property.INDEX_VALUE:
v3_meaning = None
elif v3_property_value.has_pointvalue():
if v3_meaning == MEANING_GEORSS_POINT:
point_value = v3_property_value.pointvalue()
v4_value.mutable_geo_point_value().set_latitude(point_value.x())
v4_value.mutable_geo_point_value().set_longitude(point_value.y())
else:
self.__v3_to_v4_point_entity(v3_property_value.pointvalue(),
v4_value.mutable_entity_value())
v4_value.set_meaning(MEANING_PREDEFINED_ENTITY_POINT)
v3_meaning = None
elif v3_property_value.has_uservalue():
self.v3_user_value_to_v4_entity(v3_property_value.uservalue(),
v4_value.mutable_entity_value())
v4_value.set_meaning(MEANING_PREDEFINED_ENTITY_USER)
v3_meaning = None
else:
pass
if is_zlib_value:
v4_value.set_meaning(MEANING_ZLIB)
elif v3_meaning:
v4_value.set_meaning(v3_meaning)
if indexed != v4_value.indexed():
v4_value.set_indexed(indexed)
def v4_to_v3_property(self, property_name, is_multi, is_projection,
v4_value, v3_property):
"""Converts info from a v4 Property to a v3 Property.
v4_value must not have a list_value.
Args:
property_name: the name of the property
is_multi: whether the property contains multiple values
is_projection: whether the property is projected
v4_value: an entity_v4_pb.Value
v3_property: an entity_pb.Property to populate
"""
assert not v4_value.list_value_list(), 'v4 list_value not convertable to v3'
v3_property.Clear()
v3_property.set_name(property_name)
if v4_value.has_meaning() and v4_value.meaning() == MEANING_EMPTY_LIST:
v3_property.set_meaning(MEANING_EMPTY_LIST)
v3_property.set_multiple(False)
v3_property.mutable_value()
return
v3_property.set_multiple(is_multi)
self.v4_value_to_v3_property_value(v4_value, v3_property.mutable_value())
v4_meaning = None
if v4_value.has_meaning():
v4_meaning = v4_value.meaning()
if v4_value.has_timestamp_microseconds_value():
v3_property.set_meaning(entity_pb.Property.GD_WHEN)
elif v4_value.has_blob_key_value():
v3_property.set_meaning(entity_pb.Property.BLOBKEY)
elif v4_value.has_blob_value():
if v4_meaning == MEANING_ZLIB:
v3_property.set_meaning_uri(URI_MEANING_ZLIB)
if v4_meaning == entity_pb.Property.BYTESTRING:
if v4_value.indexed():
pass
else:
if v4_value.indexed():
v3_property.set_meaning(entity_pb.Property.BYTESTRING)
else:
v3_property.set_meaning(entity_pb.Property.BLOB)
v4_meaning = None
elif v4_value.has_entity_value():
if v4_meaning != MEANING_GEORSS_POINT:
if (v4_meaning != MEANING_PREDEFINED_ENTITY_POINT
and v4_meaning != MEANING_PREDEFINED_ENTITY_USER):
v3_property.set_meaning(entity_pb.Property.ENTITY_PROTO)
v4_meaning = None
elif v4_value.has_geo_point_value():
v3_property.set_meaning(MEANING_GEORSS_POINT)
else:
pass
if v4_meaning is not None:
v3_property.set_meaning(v4_meaning)
if is_projection:
v3_property.set_meaning(entity_pb.Property.INDEX_VALUE)
def __add_v3_property_from_v4(self, property_name, is_multi, is_projection,
v4_value, v3_entity):
"""Adds a v3 Property to an Entity based on information from a v4 Property.
Args:
property_name: the name of the property
is_multi: whether the property contains multiple values
is_projection: whether the property is a projection
v4_value: an entity_v4_pb.Value
v3_entity: an entity_pb.EntityProto
"""
if v4_value.indexed():
self.v4_to_v3_property(property_name, is_multi, is_projection,
v4_value, v3_entity.add_property())
else:
self.v4_to_v3_property(property_name, is_multi, is_projection,
v4_value, v3_entity.add_raw_property())
def __build_name_to_v4_property_map(self, v4_entity):
property_map = {}
for prop in v4_entity.property_list():
property_map[prop.name()] = prop
return property_map
def __add_v4_property_to_entity(self, v4_entity, property_map, v3_property,
indexed):
"""Adds a v4 Property to an entity or modifies an existing one.
property_map is used to track of properties that have already been added.
The same dict should be used for all of an entity's properties.
Args:
v4_entity: an entity_v4_pb.Entity
property_map: a dict of name -> v4_property
v3_property: an entity_pb.Property to convert to v4 and add to the dict
indexed: whether the property is indexed
"""
property_name = v3_property.name()
if property_name in property_map:
v4_property = property_map[property_name]
else:
v4_property = v4_entity.add_property()
v4_property.set_name(property_name)
property_map[property_name] = v4_property
if v3_property.multiple():
self.v3_property_to_v4_value(v3_property, indexed,
v4_property.mutable_value().add_list_value())
else:
self.v3_property_to_v4_value(v3_property, indexed,
v4_property.mutable_value())
def __get_v4_integer_value(self, v4_property):
"""Returns an integer value from a v4 Property.
Args:
v4_property: an entity_v4_pb.Property
Returns:
an integer
Raises:
InvalidConversionError: if the property doesn't contain an integer value
"""
check_conversion(v4_property.value().has_integer_value(),
'Property does not contain an integer value.')
return v4_property.value().integer_value()
def __get_v4_double_value(self, v4_property):
"""Returns a double value from a v4 Property.
Args:
v4_property: an entity_v4_pb.Property
Returns:
a double
Raises:
InvalidConversionError: if the property doesn't contain a double value
"""
check_conversion(v4_property.value().has_double_value(),
'Property does not contain a double value.')
return v4_property.value().double_value()
def __get_v4_string_value(self, v4_property):
"""Returns an string value from a v4 Property.
Args:
v4_property: an entity_v4_pb.Property
Returns:
a string
Throws:
InvalidConversionError: if the property doesn't contain a string value
"""
check_conversion(v4_property.value().has_string_value(),
'Property does not contain a string value.')
return v4_property.value().string_value()
def __v4_integer_property(self, name, value, indexed):
"""Creates a single-integer-valued v4 Property.
Args:
name: the property name
value: the integer value of the property
indexed: whether the value should be indexed
Returns:
an entity_v4_pb.Property
"""
v4_property = entity_v4_pb.Property()
v4_property.set_name(name)
v4_value = v4_property.mutable_value()
v4_value.set_indexed(indexed)
v4_value.set_integer_value(value)
return v4_property
def __v4_double_property(self, name, value, indexed):
"""Creates a single-double-valued v4 Property.
Args:
name: the property name
value: the double value of the property
indexed: whether the value should be indexed
Returns:
an entity_v4_pb.Property
"""
v4_property = entity_v4_pb.Property()
v4_property.set_name(name)
v4_value = v4_property.mutable_value()
v4_value.set_indexed(indexed)
v4_value.set_double_value(value)
return v4_property
def __v4_string_property(self, name, value, indexed):
"""Creates a single-string-valued v4 Property.
Args:
name: the property name
value: the string value of the property
indexed: whether the value should be indexed
Returns:
an entity_v4_pb.Property
"""
v4_property = entity_v4_pb.Property()
v4_property.set_name(name)
v4_value = v4_property.mutable_value()
v4_value.set_indexed(indexed)
v4_value.set_string_value(value)
return v4_property
def __v4_to_v3_point_value(self, v4_point_entity, v3_point_value):
"""Converts a v4 point Entity to a v3 PointValue.
Args:
v4_point_entity: an entity_v4_pb.Entity representing a point
v3_point_value: an entity_pb.Property_PointValue to populate
"""
v3_point_value.Clear()
name_to_v4_property = self.__build_name_to_v4_property_map(v4_point_entity)
v3_point_value.set_x(
self.__get_v4_double_value(name_to_v4_property['x']))
v3_point_value.set_y(
self.__get_v4_double_value(name_to_v4_property['y']))
def __v3_to_v4_point_entity(self, v3_point_value, v4_entity):
"""Converts a v3 UserValue to a v4 user Entity.
Args:
v3_point_value: an entity_pb.Property_PointValue
v4_entity: an entity_v4_pb.Entity to populate
"""
v4_entity.Clear()
v4_entity.property_list().append(
self.__v4_double_property(PROPERTY_NAME_X, v3_point_value.x(), False))
v4_entity.property_list().append(
self.__v4_double_property(PROPERTY_NAME_Y, v3_point_value.y(), False))
def v4_entity_to_v3_user_value(self, v4_user_entity, v3_user_value):
"""Converts a v4 user Entity to a v3 UserValue.
Args:
v4_user_entity: an entity_v4_pb.Entity representing a user
v3_user_value: an entity_pb.Property_UserValue to populate
"""
v3_user_value.Clear()
name_to_v4_property = self.__build_name_to_v4_property_map(v4_user_entity)
v3_user_value.set_email(self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_EMAIL]))
v3_user_value.set_auth_domain(self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_AUTH_DOMAIN]))
if PROPERTY_NAME_USER_ID in name_to_v4_property:
v3_user_value.set_obfuscated_gaiaid(
self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_USER_ID]))
if PROPERTY_NAME_INTERNAL_ID in name_to_v4_property:
v3_user_value.set_gaiaid(self.__get_v4_integer_value(
name_to_v4_property[PROPERTY_NAME_INTERNAL_ID]))
else:
v3_user_value.set_gaiaid(0)
if PROPERTY_NAME_FEDERATED_IDENTITY in name_to_v4_property:
v3_user_value.set_federated_identity(
self.__get_v4_string_value(name_to_v4_property[
PROPERTY_NAME_FEDERATED_IDENTITY]))
if PROPERTY_NAME_FEDERATED_PROVIDER in name_to_v4_property:
v3_user_value.set_federated_provider(
self.__get_v4_string_value(name_to_v4_property[
PROPERTY_NAME_FEDERATED_PROVIDER]))
def v3_user_value_to_v4_entity(self, v3_user_value, v4_entity):
"""Converts a v3 UserValue to a v4 user Entity.
Args:
v3_user_value: an entity_pb.Property_UserValue
v4_entity: an entity_v4_pb.Entity to populate
"""
v4_entity.Clear()
v4_entity.property_list().append(
self.__v4_string_property(PROPERTY_NAME_EMAIL, v3_user_value.email(),
False))
v4_entity.property_list().append(self.__v4_string_property(
PROPERTY_NAME_AUTH_DOMAIN,
v3_user_value.auth_domain(), False))
if v3_user_value.gaiaid() != 0:
v4_entity.property_list().append(self.__v4_integer_property(
PROPERTY_NAME_INTERNAL_ID,
v3_user_value.gaiaid(),
False))
if v3_user_value.has_obfuscated_gaiaid():
v4_entity.property_list().append(self.__v4_string_property(
PROPERTY_NAME_USER_ID,
v3_user_value.obfuscated_gaiaid(),
False))
if v3_user_value.has_federated_identity():
v4_entity.property_list().append(self.__v4_string_property(
PROPERTY_NAME_FEDERATED_IDENTITY,
v3_user_value.federated_identity(),
False))
if v3_user_value.has_federated_provider():
v4_entity.property_list().append(self.__v4_string_property(
PROPERTY_NAME_FEDERATED_PROVIDER,
v3_user_value.federated_provider(),
False))
def v1_to_v3_reference(self, v1_key, v3_ref):
"""Converts a v1 Key to a v3 Reference.
Args:
v1_key: an googledatastore.Key
v3_ref: an entity_pb.Reference to populate
"""
v3_ref.Clear()
if v1_key.HasField('partition_id'):
project_id = v1_key.partition_id.project_id
if project_id:
app_id = self._id_resolver.resolve_app_id(project_id)
v3_ref.set_app(app_id)
if v1_key.partition_id.namespace_id:
v3_ref.set_name_space(v1_key.partition_id.namespace_id)
for v1_element in v1_key.path:
v3_element = v3_ref.mutable_path().add_element()
v3_element.set_type(v1_element.kind.encode('utf-8'))
id_type = v1_element.WhichOneof('id_type')
if id_type == 'id':
v3_element.set_id(v1_element.id)
elif id_type == 'name':
v3_element.set_name(v1_element.name.encode('utf-8'))
def v1_to_v3_references(self, v1_keys):
"""Converts a list of v1 Keys to a list of v3 References.
Args:
v1_keys: a list of googledatastore.Key objects
Returns:
a list of entity_pb.Reference objects
"""
v3_refs = []
for v1_key in v1_keys:
v3_ref = entity_pb.Reference()
self.v1_to_v3_reference(v1_key, v3_ref)
v3_refs.append(v3_ref)
return v3_refs
def v3_to_v1_key(self, v3_ref, v1_key):
"""Converts a v3 Reference to a v1 Key.
Args:
v3_ref: an entity_pb.Reference
v1_key: an googledatastore.Key to populate
"""
v1_key.Clear()
if not v3_ref.app():
return
project_id = self._id_resolver.resolve_project_id(v3_ref.app())
v1_key.partition_id.project_id = project_id
if v3_ref.name_space():
v1_key.partition_id.namespace_id = v3_ref.name_space()
for v3_element in v3_ref.path().element_list():
v1_element = v1_key.path.add()
v1_element.kind = v3_element.type()
if v3_element.has_id():
v1_element.id = v3_element.id()
if v3_element.has_name():
v1_element.name = v3_element.name()
def v3_to_v1_keys(self, v3_refs):
"""Converts a list of v3 References to a list of v1 Keys.
Args:
v3_refs: a list of entity_pb.Reference objects
Returns:
a list of googledatastore.Key objects
"""
v1_keys = []
for v3_ref in v3_refs:
v1_key = googledatastore.Key()
self.v3_to_v1_key(v3_ref, v1_key)
v1_keys.append(v1_key)
return v1_keys
def project_to_app_id(self, project_id):
"""Converts a string project id to a string app id."""
return self._id_resolver.resolve_app_id(project_id)
def app_to_project_id(self, app_id):
"""Converts a string app id to a string project id."""
return self._id_resolver.resolve_project_id(app_id)
def __new_v3_property(self, v3_entity, is_indexed):
if is_indexed:
return v3_entity.add_property()
else:
return v3_entity.add_raw_property()
def v1_to_v3_entity(self, v1_entity, v3_entity, is_projection=False):
"""Converts a v1 Entity to a v3 EntityProto.
Args:
v1_entity: an googledatastore.Entity
v3_entity: an entity_pb.EntityProto to populate
is_projection: True if the v1_entity is from a projection query.
"""
v3_entity.Clear()
for property_name, v1_value in v1_entity.properties.items():
if v1_value.HasField('array_value'):
if len(v1_value.array_value.values) == 0:
empty_list = self.__new_v3_property(v3_entity,
not v1_value.exclude_from_indexes)
empty_list.set_name(property_name.encode('utf-8'))
empty_list.set_multiple(False)
empty_list.set_meaning(MEANING_EMPTY_LIST)
empty_list.mutable_value()
else:
for v1_sub_value in v1_value.array_value.values:
list_element = self.__new_v3_property(
v3_entity, not v1_sub_value.exclude_from_indexes)
self.v1_to_v3_property(
property_name, True, is_projection, v1_sub_value, list_element)
else:
value_property = self.__new_v3_property(
v3_entity, not v1_value.exclude_from_indexes)
self.v1_to_v3_property(
property_name, False, is_projection, v1_value, value_property)
if v1_entity.HasField('key'):
v1_key = v1_entity.key
self.v1_to_v3_reference(v1_key, v3_entity.mutable_key())
v3_ref = v3_entity.key()
self.v3_reference_to_group(v3_ref, v3_entity.mutable_entity_group())
else:
pass
def v3_to_v1_entity(self, v3_entity, v1_entity):
"""Converts a v3 EntityProto to a v1 Entity.
Args:
v3_entity: an entity_pb.EntityProto
v1_entity: an googledatastore.Proto to populate
"""
v1_entity.Clear()
self.v3_to_v1_key(v3_entity.key(), v1_entity.key)
if not v3_entity.key().has_app():
v1_entity.ClearField('key')
for v3_property in v3_entity.property_list():
self.__add_v1_property_to_entity(v1_entity, v3_property, True)
for v3_property in v3_entity.raw_property_list():
self.__add_v1_property_to_entity(v1_entity, v3_property, False)
def v1_value_to_v3_property_value(self, v1_value, v3_value):
"""Converts a v1 Value to a v3 PropertyValue.
Args:
v1_value: an googledatastore.Value
v3_value: an entity_pb.PropertyValue to populate
"""
v3_value.Clear()
field = v1_value.WhichOneof('value_type')
if field == 'boolean_value':
v3_value.set_booleanvalue(v1_value.boolean_value)
elif field == 'integer_value':
v3_value.set_int64value(v1_value.integer_value)
elif field == 'double_value':
v3_value.set_doublevalue(v1_value.double_value)
elif field == 'timestamp_value':
v3_value.set_int64value(
googledatastore.helper.micros_from_timestamp(
v1_value.timestamp_value))
elif field == 'key_value':
v3_ref = entity_pb.Reference()
self.v1_to_v3_reference(v1_value.key_value, v3_ref)
self.v3_reference_to_v3_property_value(v3_ref, v3_value)
elif field == 'string_value':
v3_value.set_stringvalue(v1_value.string_value.encode('utf-8'))
elif field == 'blob_value':
v3_value.set_stringvalue(v1_value.blob_value)
elif field == 'entity_value':
v1_entity_value = v1_value.entity_value
v1_meaning = v1_value.meaning
if v1_meaning == MEANING_PREDEFINED_ENTITY_USER:
self.v1_entity_to_v3_user_value(v1_entity_value,
v3_value.mutable_uservalue())
else:
v3_entity_value = entity_pb.EntityProto()
self.v1_to_v3_entity(v1_entity_value, v3_entity_value)
v3_value.set_stringvalue(v3_entity_value.SerializePartialToString())
elif field == 'geo_point_value':
point_value = v3_value.mutable_pointvalue()
point_value.set_x(v1_value.geo_point_value.latitude)
point_value.set_y(v1_value.geo_point_value.longitude)
elif field == 'null_value':
pass
else:
pass
def v3_property_to_v1_value(self, v3_property, indexed, v1_value):
"""Converts a v3 Property to a v1 Value.
Args:
v3_property: an entity_pb.Property
indexed: whether the v3 property is indexed
v1_value: an googledatastore.Value to populate
"""
v1_value.Clear()
v3_property_value = v3_property.value()
v3_meaning = v3_property.meaning()
v3_uri_meaning = None
if v3_property.meaning_uri():
v3_uri_meaning = v3_property.meaning_uri()
if not self.__is_v3_property_value_union_valid(v3_property_value):
v3_meaning = None
v3_uri_meaning = None
elif v3_meaning == entity_pb.Property.NO_MEANING:
v3_meaning = None
elif not self.__is_v3_property_value_meaning_valid(v3_property_value,
v3_meaning):
v3_meaning = None
is_zlib_value = False
if v3_uri_meaning:
if v3_uri_meaning == URI_MEANING_ZLIB:
if v3_property_value.has_stringvalue():
is_zlib_value = True
if v3_meaning != entity_pb.Property.BLOB:
v3_meaning = entity_pb.Property.BLOB
else:
pass
else:
pass
if v3_property.meaning() == entity_pb.Property.EMPTY_LIST:
v1_value.array_value.values.extend([])
v3_meaning = None
elif v3_property_value.has_booleanvalue():
v1_value.boolean_value = v3_property_value.booleanvalue()
elif v3_property_value.has_int64value():
if v3_meaning == entity_pb.Property.GD_WHEN:
googledatastore.helper.micros_to_timestamp(
v3_property_value.int64value(), v1_value.timestamp_value)
v3_meaning = None
else:
v1_value.integer_value = v3_property_value.int64value()
elif v3_property_value.has_doublevalue():
v1_value.double_value = v3_property_value.doublevalue()
elif v3_property_value.has_referencevalue():
v3_ref = entity_pb.Reference()
self.__v3_reference_value_to_v3_reference(
v3_property_value.referencevalue(), v3_ref)
self.v3_to_v1_key(v3_ref, v1_value.key_value)
elif v3_property_value.has_stringvalue():
if v3_meaning == entity_pb.Property.ENTITY_PROTO:
serialized_entity_v3 = v3_property_value.stringvalue()
v3_entity = entity_pb.EntityProto()
v3_entity.ParsePartialFromString(serialized_entity_v3)
self.v3_to_v1_entity(v3_entity, v1_value.entity_value)
v3_meaning = None
elif (v3_meaning == entity_pb.Property.BLOB
or v3_meaning == entity_pb.Property.BYTESTRING):
v1_value.blob_value = v3_property_value.stringvalue()
if indexed or v3_meaning == entity_pb.Property.BLOB:
v3_meaning = None
else:
string_value = v3_property_value.stringvalue()
if is_valid_utf8(string_value):
v1_value.string_value = string_value
else:
v1_value.blob_value = string_value
if v3_meaning != entity_pb.Property.INDEX_VALUE:
v3_meaning = None
elif v3_property_value.has_pointvalue():
if v3_meaning != MEANING_GEORSS_POINT:
v1_value.meaning = MEANING_POINT_WITHOUT_V3_MEANING
point_value = v3_property_value.pointvalue()
v1_value.geo_point_value.latitude = point_value.x()
v1_value.geo_point_value.longitude = point_value.y()
v3_meaning = None
elif v3_property_value.has_uservalue():
self.v3_user_value_to_v1_entity(v3_property_value.uservalue(),
v1_value.entity_value)
v1_value.meaning = MEANING_PREDEFINED_ENTITY_USER
v3_meaning = None
else:
v1_value.null_value = googledatastore.NULL_VALUE
if is_zlib_value:
v1_value.meaning = MEANING_ZLIB
elif v3_meaning:
v1_value.meaning = v3_meaning
if indexed == v1_value.exclude_from_indexes:
v1_value.exclude_from_indexes = not indexed
def v1_to_v3_property(self, property_name, is_multi, is_projection,
v1_value, v3_property):
"""Converts info from a v1 Property to a v3 Property.
v1_value must not have an array_value.
Args:
property_name: the name of the property, unicode
is_multi: whether the property contains multiple values
is_projection: whether the property is projected
v1_value: an googledatastore.Value
v3_property: an entity_pb.Property to populate
"""
v1_value_type = v1_value.WhichOneof('value_type')
if v1_value_type == 'array_value':
assert False, 'v1 array_value not convertable to v3'
v3_property.Clear()
v3_property.set_name(property_name.encode('utf-8'))
v3_property.set_multiple(is_multi)
self.v1_value_to_v3_property_value(v1_value, v3_property.mutable_value())
v1_meaning = None
if v1_value.meaning:
v1_meaning = v1_value.meaning
if v1_value_type == 'timestamp_value':
v3_property.set_meaning(entity_pb.Property.GD_WHEN)
elif v1_value_type == 'blob_value':
if v1_meaning == MEANING_ZLIB:
v3_property.set_meaning_uri(URI_MEANING_ZLIB)
if v1_meaning == entity_pb.Property.BYTESTRING:
if not v1_value.exclude_from_indexes:
pass
else:
if not v1_value.exclude_from_indexes:
v3_property.set_meaning(entity_pb.Property.BYTESTRING)
else:
v3_property.set_meaning(entity_pb.Property.BLOB)
v1_meaning = None
elif v1_value_type == 'entity_value':
if v1_meaning != MEANING_PREDEFINED_ENTITY_USER:
v3_property.set_meaning(entity_pb.Property.ENTITY_PROTO)
v1_meaning = None
elif v1_value_type == 'geo_point_value':
if v1_meaning != MEANING_POINT_WITHOUT_V3_MEANING:
v3_property.set_meaning(MEANING_GEORSS_POINT)
v1_meaning = None
else:
pass
if v1_meaning is not None:
v3_property.set_meaning(v1_meaning)
if is_projection:
v3_property.set_meaning(entity_pb.Property.INDEX_VALUE)
def __add_v1_property_to_entity(self, v1_entity, v3_property, indexed):
"""Adds a v1 Property to an entity or modifies an existing one.
Args:
v1_entity: an googledatastore.Entity
v3_property: an entity_pb.Property to convert to v1 and add to the dict
indexed: whether the property is indexed
"""
property_name = v3_property.name()
v1_value = v1_entity.properties[property_name]
if v3_property.multiple():
self.v3_property_to_v1_value(v3_property, indexed,
v1_value.array_value.values.add())
else:
self.v3_property_to_v1_value(v3_property, indexed, v1_value)
def __get_v1_integer_value(self, v1_value):
"""Returns an integer value from a v1 Value.
Args:
v1_value: a googledatastore.Value
Returns:
an integer
Raises:
InvalidConversionError: if the value doesn't contain an integer value
"""
check_conversion(v1_value.HasField('integer_value'),
'Value does not contain an integer value.')
return v1_value.integer_value
def __get_v1_double_value(self, v1_value):
"""Returns a double value from a v1 Value.
Args:
v1_value: an googledatastore.Value
Returns:
a double
Raises:
InvalidConversionError: if the value doesn't contain a double value
"""
check_conversion(v1_value.HasField('double_value'),
'Value does not contain a double value.')
return v1_value.double_value
def __get_v1_string_value(self, v1_value):
"""Returns an string value from a v1 Value.
Args:
v1_value: an googledatastore.Value
Returns:
a string
Throws:
InvalidConversionError: if the value doesn't contain a string value
"""
check_conversion(v1_value.HasField('string_value'),
'Value does not contain a string value.')
return v1_value.string_value
def __v1_integer_property(self, entity, name, value, indexed):
"""Populates a single-integer-valued v1 Property.
Args:
entity: the entity to populate
name: the name of the property to populate
value: the integer value of the property
indexed: whether the value should be indexed
"""
v1_value = entity.properties[name]
v1_value.exclude_from_indexes = not indexed
v1_value.integer_value = value
def __v1_double_property(self, entity, name, value, indexed):
"""Populates a single-double-valued v1 Property.
Args:
entity: the entity to populate
name: the name of the property to populate
value: the double value of the property
indexed: whether the value should be indexed
"""
v1_value = entity.properties[name]
v1_value.exclude_from_indexes = not indexed
v1_value.double_value = value
def __v1_string_property(self, entity, name, value, indexed):
"""Populates a single-string-valued v1 Property.
Args:
entity: the entity to populate
name: the name of the property to populate
value: the string value of the property
indexed: whether the value should be indexed
"""
v1_value = entity.properties[name]
v1_value.exclude_from_indexes = not indexed
v1_value.string_value = value
def v1_entity_to_v3_user_value(self, v1_user_entity, v3_user_value):
"""Converts a v1 user Entity to a v3 UserValue.
Args:
v1_user_entity: an googledatastore.Entity representing a user
v3_user_value: an entity_pb.Property_UserValue to populate
"""
v3_user_value.Clear()
properties = v1_user_entity.properties
v3_user_value.set_email(self.__get_v1_string_value(
properties[PROPERTY_NAME_EMAIL]))
v3_user_value.set_auth_domain(self.__get_v1_string_value(
properties[PROPERTY_NAME_AUTH_DOMAIN]))
if PROPERTY_NAME_USER_ID in properties:
v3_user_value.set_obfuscated_gaiaid(
self.__get_v1_string_value(properties[PROPERTY_NAME_USER_ID]))
if PROPERTY_NAME_INTERNAL_ID in properties:
v3_user_value.set_gaiaid(self.__get_v1_integer_value(
properties[PROPERTY_NAME_INTERNAL_ID]))
else:
v3_user_value.set_gaiaid(0)
if PROPERTY_NAME_FEDERATED_IDENTITY in properties:
v3_user_value.set_federated_identity(
self.__get_v1_string_value(properties[
PROPERTY_NAME_FEDERATED_IDENTITY]))
if PROPERTY_NAME_FEDERATED_PROVIDER in properties:
v3_user_value.set_federated_provider(
self.__get_v1_string_value(properties[
PROPERTY_NAME_FEDERATED_PROVIDER]))
def v3_user_value_to_v1_entity(self, v3_user_value, v1_entity):
"""Converts a v3 UserValue to a v1 user Entity.
Args:
v3_user_value: an entity_pb.Property_UserValue
v1_entity: an googledatastore.Entity to populate
"""
v1_entity.Clear()
self.__v1_string_property(v1_entity, PROPERTY_NAME_EMAIL,
v3_user_value.email(), False)
self.__v1_string_property(v1_entity, PROPERTY_NAME_AUTH_DOMAIN,
v3_user_value.auth_domain(), False)
if v3_user_value.gaiaid() != 0:
self.__v1_integer_property(
v1_entity,
PROPERTY_NAME_INTERNAL_ID,
v3_user_value.gaiaid(),
False)
if v3_user_value.has_obfuscated_gaiaid():
self.__v1_string_property(
v1_entity,
PROPERTY_NAME_USER_ID,
v3_user_value.obfuscated_gaiaid(),
False)
if v3_user_value.has_federated_identity():
self.__v1_string_property(
v1_entity,
PROPERTY_NAME_FEDERATED_IDENTITY,
v3_user_value.federated_identity(),
False)
if v3_user_value.has_federated_provider():
self.__v1_string_property(
v1_entity,
PROPERTY_NAME_FEDERATED_PROVIDER,
v3_user_value.federated_provider(),
False)
def __is_v3_property_value_union_valid(self, v3_property_value):
"""Returns True if the v3 PropertyValue's union is valid."""
num_sub_values = (v3_property_value.has_booleanvalue()
+ v3_property_value.has_int64value()
+ v3_property_value.has_doublevalue()
+ v3_property_value.has_referencevalue()
+ v3_property_value.has_stringvalue()
+ v3_property_value.has_pointvalue()
+ v3_property_value.has_uservalue())
return num_sub_values <= 1
def __is_v3_property_value_meaning_valid(self, v3_property_value, v3_meaning):
"""Returns True if the v3 PropertyValue's type value matches its meaning."""
def ReturnTrue():
return True
def HasStringValue():
return v3_property_value.has_stringvalue()
def HasInt64Value():
return v3_property_value.has_int64value()
def HasPointValue():
return v3_property_value.has_pointvalue()
def ReturnFalse():
return False
value_checkers = {
entity_pb.Property.NO_MEANING: ReturnTrue,
entity_pb.Property.INDEX_VALUE: ReturnTrue,
entity_pb.Property.BLOB: HasStringValue,
entity_pb.Property.TEXT: HasStringValue,
entity_pb.Property.BYTESTRING: HasStringValue,
entity_pb.Property.ATOM_CATEGORY: HasStringValue,
entity_pb.Property.ATOM_LINK: HasStringValue,
entity_pb.Property.ATOM_TITLE: HasStringValue,
entity_pb.Property.ATOM_CONTENT: HasStringValue,
entity_pb.Property.ATOM_SUMMARY: HasStringValue,
entity_pb.Property.ATOM_AUTHOR: HasStringValue,
entity_pb.Property.GD_EMAIL: HasStringValue,
entity_pb.Property.GD_IM: HasStringValue,
entity_pb.Property.GD_PHONENUMBER: HasStringValue,
entity_pb.Property.GD_POSTALADDRESS: HasStringValue,
entity_pb.Property.BLOBKEY: HasStringValue,
entity_pb.Property.ENTITY_PROTO: HasStringValue,
entity_pb.Property.GD_WHEN: HasInt64Value,
entity_pb.Property.GD_RATING: HasInt64Value,
entity_pb.Property.GEORSS_POINT: HasPointValue,
entity_pb.Property.EMPTY_LIST: ReturnTrue,
}
default = ReturnFalse
return value_checkers.get(v3_meaning, default)()
def __v3_reference_has_id_or_name(self, v3_ref):
"""Determines if a v3 Reference specifies an ID or name.
Args:
v3_ref: an entity_pb.Reference
Returns:
boolean: True if the last path element specifies an ID or name.
"""
path = v3_ref.path()
assert path.element_size() >= 1
last_element = path.element(path.element_size() - 1)
return last_element.has_id() or last_element.has_name()
def v3_reference_to_group(self, v3_ref, group):
"""Converts a v3 Reference to a v3 Path representing the entity group.
The entity group is represented as an entity_pb.Path containing only the
first element in the provided Reference.
Args:
v3_ref: an entity_pb.Reference
group: an entity_pb.Path to populate
"""
group.Clear()
path = v3_ref.path()
assert path.element_size() >= 1
group.add_element().CopyFrom(path.element(0))
def v3_reference_to_v3_property_value(self, v3_ref, v3_property_value):
"""Converts a v3 Reference to a v3 PropertyValue.
Args:
v3_ref: an entity_pb.Reference
v3_property_value: an entity_pb.PropertyValue to populate
"""
v3_property_value.Clear()
reference_value = v3_property_value.mutable_referencevalue()
if v3_ref.has_app():
reference_value.set_app(v3_ref.app())
if v3_ref.has_name_space():
reference_value.set_name_space(v3_ref.name_space())
for v3_path_element in v3_ref.path().element_list():
v3_ref_value_path_element = reference_value.add_pathelement()
if v3_path_element.has_type():
v3_ref_value_path_element.set_type(v3_path_element.type())
if v3_path_element.has_id():
v3_ref_value_path_element.set_id(v3_path_element.id())
if v3_path_element.has_name():
v3_ref_value_path_element.set_name(v3_path_element.name())
def __v3_reference_value_to_v3_reference(self, v3_ref_value, v3_ref):
"""Converts a v3 ReferenceValue to a v3 Reference.
Args:
v3_ref_value: an entity_pb.PropertyValue_ReferenceValue
v3_ref: an entity_pb.Reference to populate
"""
v3_ref.Clear()
if v3_ref_value.has_app():
v3_ref.set_app(v3_ref_value.app())
if v3_ref_value.has_name_space():
v3_ref.set_name_space(v3_ref_value.name_space())
for v3_ref_value_path_element in v3_ref_value.pathelement_list():
v3_path_element = v3_ref.mutable_path().add_element()
if v3_ref_value_path_element.has_type():
v3_path_element.set_type(v3_ref_value_path_element.type())
if v3_ref_value_path_element.has_id():
v3_path_element.set_id(v3_ref_value_path_element.id())
if v3_ref_value_path_element.has_name():
v3_path_element.set_name(v3_ref_value_path_element.name())
class _QueryConverter(object):
"""Base converter for v3 and v1 queries."""
def __init__(self, entity_converter):
self._entity_converter = entity_converter
def get_entity_converter(self):
return self._entity_converter
def _v3_filter_to_v1_property_filter(self, v3_filter, v1_property_filter):
"""Converts a v3 Filter to a v1 PropertyFilter.
Args:
v3_filter: a datastore_pb.Filter
v1_property_filter: a googledatastore.PropertyFilter to populate
Raises:
InvalidConversionError if the filter cannot be converted
"""
check_conversion(v3_filter.property_size() == 1,
'invalid filter')
check_conversion(v3_filter.op() <= 5,
'unsupported filter op: %d' % v3_filter.op())
v1_property_filter.Clear()
v1_property_filter.op = v3_filter.op()
v1_property_filter.property.name = v3_filter.property(0).name()
self._entity_converter.v3_property_to_v1_value(
v3_filter.property(0), True, v1_property_filter.value)
def _v3_query_to_v1_ancestor_filter(self, v3_query, v1_property_filter):
"""Converts a v3 Query to a v1 ancestor PropertyFilter.
Args:
v3_query: a datastore_pb.Query
v1_property_filter: a googledatastore.PropertyFilter to populate
"""
v1_property_filter.Clear()
v1_property_filter.set_operator(
googledatastore.PropertyFilter.HAS_ANCESTOR)
prop = v1_property_filter.property
prop.set_name(PROPERTY_NAME_KEY)
self._entity_converter.v3_to_v1_key(
v3_query.ancestor(),
v1_property_filter.value.mutable_key_value)
def v3_order_to_v1_order(self, v3_order, v1_order):
"""Converts a v3 Query order to a v1 PropertyOrder.
Args:
v3_order: a datastore_pb.Query.Order
v1_order: a googledatastore.PropertyOrder to populate
"""
v1_order.property.name = v3_order.property()
if v3_order.has_direction():
v1_order.direction = v3_order.direction()
def _v3_filter_to_v4_property_filter(self, v3_filter, v4_property_filter):
"""Converts a v3 Filter to a v4 PropertyFilter.
Args:
v3_filter: a datastore_pb.Filter
v4_property_filter: a datastore_v4_pb.PropertyFilter to populate
Raises:
InvalidConversionError if the filter cannot be converted
"""
check_conversion(v3_filter.property_size() == 1,
'invalid filter')
check_conversion(v3_filter.op() <= 5,
'unsupported filter op: %d' % v3_filter.op())
v4_property_filter.Clear()
v4_property_filter.set_operator(v3_filter.op())
v4_property_filter.mutable_property().set_name(v3_filter.property(0).name())
self._entity_converter.v3_property_to_v4_value(
v3_filter.property(0), True, v4_property_filter.mutable_value())
def _v3_query_to_v4_ancestor_filter(self, v3_query, v4_property_filter):
"""Converts a v3 Query to a v4 ancestor PropertyFilter.
Args:
v3_query: a datastore_pb.Query
v4_property_filter: a datastore_v4_pb.PropertyFilter to populate
"""
v4_property_filter.Clear()
v4_property_filter.set_operator(
datastore_v4_pb.PropertyFilter.HAS_ANCESTOR)
prop = v4_property_filter.mutable_property()
prop.set_name(PROPERTY_NAME_KEY)
self._entity_converter.v3_to_v4_key(
v3_query.ancestor(),
v4_property_filter.mutable_value().mutable_key_value())
def v3_order_to_v4_order(self, v3_order, v4_order):
"""Converts a v3 Query order to a v4 PropertyOrder.
Args:
v3_order: a datastore_pb.Query.Order
v4_order: a datastore_v4_pb.PropertyOrder to populate
"""
v4_order.mutable_property().set_name(v3_order.property())
if v3_order.has_direction():
v4_order.set_direction(v3_order.direction())
def get_entity_converter(id_resolver=None):
"""Returns a converter for v3 and v1 entities and keys.
Args:
id_resolver: An IdResolver for project id resolution.
"""
id_resolver = id_resolver or _IdentityIdResolver()
return _EntityConverter(id_resolver)
|
|
import copy
from django.utils import six
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __bool__(self):
return any(self.dicts)
def __nonzero__(self):
return type(self).__bool__(self)
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError(key)
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
# This is used by MergeDicts of MultiValueDicts.
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_:
return dict_.getlist(key)
return []
def _iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in six.iteritems(dict_):
k = item[0]
if k in seen:
continue
seen.add(k)
yield item
def _iterkeys(self):
for k, v in self._iteritems():
yield k
def _itervalues(self):
for k, v in self._iteritems():
yield v
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = _iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None or isinstance(data, dict):
data = data or []
super(SortedDict, self).__init__(data)
self.keyOrder = list(data) if data else []
else:
super(SortedDict, self).__init__()
super_set = super(SortedDict, self).__setitem__
for key, value in data:
# Take the ordering from first key
if key not in self:
self.keyOrder.append(key)
# But override with last value in data (dict() does this)
super_set(key, value)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.items()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def __reversed__(self):
return reversed(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def _iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def _iterkeys(self):
for key in self.keyOrder:
yield key
def _itervalues(self):
for key in self.keyOrder:
yield self[key]
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return [(k, self[k]) for k in self.keyOrder]
def keys(self):
return self.keyOrder[:]
def values(self):
return [self[k] for k in self.keyOrder]
def update(self, dict_):
for k, v in six.iteritems(dict_):
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in six.iteritems(self)])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key, default=None):
"""
Returns the list of values for the passed key. If key doesn't exist,
then a default value is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if default is None:
return []
return default
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
# Do not return default here because __setitem__() may store
# another value -- QueryDict.__setitem__() does. Look it up.
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
# Do not return default_list here because setlist() may store
# another value -- QueryDict.setlist() does. Look it up.
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def _iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def _iterlists(self):
"""Yields (key, list) pairs."""
return six.iteritems(super(MultiValueDict, self))
def _itervalues(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key]
if six.PY3:
items = _iteritems
lists = _iterlists
values = _itervalues
else:
iteritems = _iteritems
iterlists = _iterlists
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def lists(self):
return list(self.iterlists())
def values(self):
return list(self.itervalues())
def copy(self):
"""Returns a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in six.iteritems(kwargs):
self.setlistdefault(key).append(value)
def dict(self):
"""
Returns current object as a dict with singular values.
"""
return dict((key, self[key]) for key in self)
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
|
|
# -*- coding: utf-8 -*-
import furl
import httplib as http
import urllib
import markupsafe
from django.utils import timezone
from flask import request
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from modularodm.exceptions import ValidationError
from modularodm.exceptions import ValidationValueError
from framework import forms, sentry, status
from framework import auth as framework_auth
from framework.auth import exceptions
from framework.auth import cas, campaigns
from framework.auth import logout as osf_logout
from framework.auth import get_user
from framework.auth.exceptions import DuplicateEmailError, ExpiredTokenError, InvalidTokenError
from framework.auth.core import generate_verification_key
from framework.auth.decorators import block_bing_preview, collect_auth, must_be_logged_in
from framework.auth.forms import ResendConfirmationForm, ForgotPasswordForm, ResetPasswordForm
from framework.auth.utils import ensure_external_identity_uniqueness, validate_recaptcha
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.sessions.utils import remove_sessions_for_user, remove_session
from framework.sessions import get_session
from website import settings, mails, language
from website.models import User
from website.util import web_url_for
from website.util.time import throttle_period_expired
from website.util.sanitize import strip_html
from osf.models.preprint_provider import PreprintProvider
@block_bing_preview
@collect_auth
def reset_password_get(auth, uid=None, token=None):
"""
View for user to land on the reset password page.
HTTp Method: GET
:param auth: the authentication state
:param uid: the user id
:param token: the token in verification key
:return
:raises: HTTPError(http.BAD_REQUEST) if verification key for the user is invalid, has expired or was used
"""
# if users are logged in, log them out and redirect back to this page
if auth.logged_in:
return auth_logout(redirect_url=request.url)
# Check if request bears a valid pair of `uid` and `token`
user_obj = User.load(uid)
if not (user_obj and user_obj.verify_password_token(token=token)):
error_data = {
'message_short': 'Invalid Request.',
'message_long': 'The requested URL is invalid, has expired, or was already used',
}
raise HTTPError(http.BAD_REQUEST, data=error_data)
# refresh the verification key (v2)
user_obj.verification_key_v2 = generate_verification_key(verification_type='password')
user_obj.save()
return {
'uid': user_obj._id,
'token': user_obj.verification_key_v2['token'],
}
def reset_password_post(uid=None, token=None):
"""
View for user to submit reset password form.
HTTP Method: POST
:param uid: the user id
:param token: the token in verification key
:return:
:raises: HTTPError(http.BAD_REQUEST) if verification key for the user is invalid, has expired or was used
"""
form = ResetPasswordForm(request.form)
# Check if request bears a valid pair of `uid` and `token`
user_obj = User.load(uid)
if not (user_obj and user_obj.verify_password_token(token=token)):
error_data = {
'message_short': 'Invalid Request.',
'message_long': 'The requested URL is invalid, has expired, or was already used',
}
raise HTTPError(http.BAD_REQUEST, data=error_data)
if not form.validate():
# Don't go anywhere
forms.push_errors_to_status(form.errors)
else:
# clear verification key (v2)
user_obj.verification_key_v2 = {}
# new verification key (v1) for CAS
user_obj.verification_key = generate_verification_key(verification_type=None)
try:
user_obj.set_password(form.password.data)
user_obj.save()
except exceptions.ChangePasswordError as error:
for message in error.messages:
status.push_status_message(message, kind='warning', trust=False)
else:
status.push_status_message('Password reset', kind='success', trust=False)
# redirect to CAS and authenticate the user automatically with one-time verification key.
return redirect(cas.get_login_url(
web_url_for('user_account', _absolute=True),
username=user_obj.username,
verification_key=user_obj.verification_key
))
return {
'uid': user_obj._id,
'token': user_obj.verification_key_v2['token'],
}
@collect_auth
def forgot_password_get(auth):
"""
View for user to land on the forgot password page.
HTTP Method: GET
:param auth: the authentication context
:return
"""
# if users are logged in, log them out and redirect back to this page
if auth.logged_in:
return auth_logout(redirect_url=request.url)
return {}
def forgot_password_post():
"""
View for user to submit forgot password form.
HTTP Method: POST
:return {}
"""
form = ForgotPasswordForm(request.form, prefix='forgot_password')
if not form.validate():
# Don't go anywhere
forms.push_errors_to_status(form.errors)
else:
email = form.email.data
status_message = ('If there is an OSF account associated with {0}, an email with instructions on how to '
'reset the OSF password has been sent to {0}. If you do not receive an email and believe '
'you should have, please contact OSF Support. ').format(email)
kind = 'success'
# check if the user exists
user_obj = get_user(email=email)
if user_obj:
# rate limit forgot_password_post
if not throttle_period_expired(user_obj.email_last_sent, settings.SEND_EMAIL_THROTTLE):
status_message = 'You have recently requested to change your password. Please wait a few minutes ' \
'before trying again.'
kind = 'error'
else:
# TODO [OSF-6673]: Use the feature in [OSF-6998] for user to resend claim email.
# if the user account is not claimed yet
if (user_obj.is_invited and
user_obj.unclaimed_records and
not user_obj.date_last_login and
not user_obj.is_claimed and
not user_obj.is_registered):
status_message = 'You cannot reset password on this account. Please contact OSF Support.'
kind = 'error'
else:
# new random verification key (v2)
user_obj.verification_key_v2 = generate_verification_key(verification_type='password')
user_obj.email_last_sent = timezone.now()
user_obj.save()
reset_link = furl.urljoin(
settings.DOMAIN,
web_url_for(
'reset_password_get',
uid=user_obj._id,
token=user_obj.verification_key_v2['token']
)
)
mails.send_mail(
to_addr=email,
mail=mails.FORGOT_PASSWORD,
reset_link=reset_link
)
status.push_status_message(status_message, kind=kind, trust=False)
return {}
def login_and_register_handler(auth, login=True, campaign=None, next_url=None, logout=None):
"""
Non-view helper to handle `login` and `register` requests.
:param auth: the auth context
:param login: `True` if `GET /login`, `False` if `GET /register`
:param campaign: a target campaign defined in `auth.campaigns`
:param next_url: the service url for CAS login or redirect url for OSF
:param logout: used only for `claim_user_registered`
:return: data object that contains actions for `auth_register` and `auth_login`
:raises: http.BAD_REQUEST
"""
# Only allow redirects which are relative root or full domain. Disallows external redirects.
if next_url and not validate_next_url(next_url):
raise HTTPError(http.BAD_REQUEST)
data = {
'status_code': http.FOUND if login else http.OK,
'next_url': next_url,
'campaign': None,
'must_login_warning': False,
}
# login or register with campaign parameter
if campaign:
if validate_campaign(campaign):
# GET `/register` or '/login` with `campaign=institution`
# unlike other campaigns, institution login serves as an alternative for authentication
if campaign == 'institution':
next_url = web_url_for('dashboard', _absolute=True)
data['status_code'] = http.FOUND
if auth.logged_in:
data['next_url'] = next_url
else:
data['next_url'] = cas.get_login_url(next_url, campaign='institution')
# for non-institution campaigns
else:
destination = next_url if next_url else campaigns.campaign_url_for(campaign)
if auth.logged_in:
# if user is already logged in, go to the campaign landing page
data['status_code'] = http.FOUND
data['next_url'] = destination
else:
# if user is logged out, go to the osf register page with campaign context
if login:
# `GET /login?campaign=...`
data['next_url'] = web_url_for('auth_register', campaign=campaign, next=destination)
else:
# `GET /register?campaign=...`
data['campaign'] = campaign
if campaigns.is_proxy_login(campaign):
data['next_url'] = web_url_for(
'auth_login',
next=destination,
_absolute=True
)
else:
data['next_url'] = destination
else:
# invalid campaign, inform sentry and redirect to non-campaign sign up or sign in
redirect_view = 'auth_login' if login else 'auth_register'
data['status_code'] = http.FOUND
data['next_url'] = web_url_for(redirect_view, campaigns=None, next=next_url)
data['campaign'] = None
sentry.log_message(
'{} is not a valid campaign. Please add it if this is a new one'.format(campaign)
)
# login or register with next parameter
elif next_url:
if logout:
# handle `claim_user_registered`
data['next_url'] = next_url
if auth.logged_in:
# log user out and come back
data['status_code'] = 'auth_logout'
else:
# after logout, land on the register page with "must_login" warning
data['status_code'] = http.OK
data['must_login_warning'] = True
elif auth.logged_in:
# if user is already logged in, redirect to `next_url`
data['status_code'] = http.FOUND
data['next_url'] = next_url
elif login:
# `/login?next=next_url`: go to CAS login page with current request url as service url
data['status_code'] = http.FOUND
data['next_url'] = cas.get_login_url(request.url)
else:
# `/register?next=next_url`: land on OSF register page with request url as next url
data['status_code'] = http.OK
data['next_url'] = request.url
else:
# `/login/` or `/register/` without any parameter
if auth.logged_in:
data['status_code'] = http.FOUND
data['next_url'] = web_url_for('dashboard', _absolute=True)
return data
@collect_auth
def auth_login(auth):
"""
View (no template) for OSF Login.
Redirect user based on `data` returned from `login_and_register_handler`.
`/login` only takes valid campaign, valid next, or no query parameter
`login_and_register_handler()` handles the following cases:
if campaign and logged in, go to campaign landing page (or valid next_url if presents)
if campaign and logged out, go to campaign register page (with next_url if presents)
if next_url and logged in, go to next url
if next_url and logged out, go to cas login page with current request url as service parameter
if none, go to `/dashboard` which is decorated by `@must_be_logged_in`
:param auth: the auth context
:return: redirects
"""
campaign = request.args.get('campaign')
next_url = request.args.get('next')
data = login_and_register_handler(auth, login=True, campaign=campaign, next_url=next_url)
if data['status_code'] == http.FOUND:
return redirect(data['next_url'])
@collect_auth
def auth_register(auth):
"""
View for OSF register. Land on the register page, redirect or go to `auth_logout`
depending on `data` returned by `login_and_register_handler`.
`/register` only takes a valid campaign, a valid next, the logout flag or no query parameter
`login_and_register_handler()` handles the following cases:
if campaign and logged in, go to campaign landing page (or valid next_url if presents)
if campaign and logged out, go to campaign register page (with next_url if presents)
if next_url and logged in, go to next url
if next_url and logged out, go to cas login page with current request url as service parameter
if next_url and logout flag, log user out first and then go to the next_url
if none, go to `/dashboard` which is decorated by `@must_be_logged_in`
:param auth: the auth context
:return: land, redirect or `auth_logout`
:raise: http.BAD_REQUEST
"""
context = {}
# a target campaign in `auth.campaigns`
campaign = request.args.get('campaign')
# the service url for CAS login or redirect url for OSF
next_url = request.args.get('next')
# used only for `claim_user_registered`
logout = request.args.get('logout')
# logout must have next_url
if logout and not next_url:
raise HTTPError(http.BAD_REQUEST)
data = login_and_register_handler(auth, login=False, campaign=campaign, next_url=next_url, logout=logout)
# land on register page
if data['status_code'] == http.OK:
if data['must_login_warning']:
status.push_status_message(language.MUST_LOGIN, trust=False)
destination = cas.get_login_url(data['next_url'])
# "Already have and account?" link
context['non_institution_login_url'] = destination
# "Sign In" button in navigation bar, overwrite the default value set in routes.py
context['login_url'] = destination
# "Login through your institution" link
context['institution_login_url'] = cas.get_login_url(data['next_url'], campaign='institution')
context['preprint_campaigns'] = {k._id + '-preprints': {
'id': k._id,
'name': k.name,
'logo_path': settings.PREPRINTS_ASSETS + k._id + '/square_color_no_transparent.png'
} for k in PreprintProvider.objects.all() if k._id != 'osf'}
context['campaign'] = data['campaign']
return context, http.OK
# redirect to url
elif data['status_code'] == http.FOUND:
return redirect(data['next_url'])
# go to other views
elif data['status_code'] == 'auth_logout':
return auth_logout(redirect_url=data['next_url'])
raise HTTPError(http.BAD_REQUEST)
@collect_auth
def auth_logout(auth, redirect_url=None, next_url=None):
"""
Log out, delete current session and remove OSF cookie.
If next url is valid and auth is logged in, redirect to CAS logout endpoint with the current request url as service.
If next url is valid and auth is logged out, redirect directly to the next url.
Otherwise, redirect to CAS logout or login endpoint with redirect url as service.
The CAS logout endpoint which clears sessions and cookies for CAS and Shibboleth.
HTTP Method: GET
Note 1: OSF tells CAS where it wants to be redirected back after successful logout. However, CAS logout flow may not
respect this url if user is authenticated through remote identity provider.
Note 2: The name of the query parameter is `next`, `next_url` is used to avoid python reserved word.
:param auth: the authentication context
:param redirect_url: url to DIRECTLY redirect after CAS logout, default is `OSF/goodbye`
:param next_url: url to redirect after OSF logout, which is after CAS logout
:return: the response
"""
# For `?next=`:
# takes priority
# the url must be a valid OSF next url,
# the full request url is set to CAS service url,
# does not support `reauth`
# For `?redirect_url=`:
# the url must be valid CAS service url
# the redirect url is set to CAS service url.
# support `reauth`
# logout/?next=<an OSF verified next url>
next_url = next_url or request.args.get('next', None)
if next_url and validate_next_url(next_url):
cas_logout_endpoint = cas.get_logout_url(request.url)
if auth.logged_in:
resp = redirect(cas_logout_endpoint)
else:
resp = redirect(next_url)
# logout/ or logout/?redirect_url=<a CAS verified redirect url>
else:
redirect_url = redirect_url or request.args.get('redirect_url') or web_url_for('goodbye', _absolute=True)
# set redirection to CAS log out (or log in if `reauth` is present)
if 'reauth' in request.args:
cas_endpoint = cas.get_login_url(redirect_url)
else:
cas_endpoint = cas.get_logout_url(redirect_url)
resp = redirect(cas_endpoint)
# perform OSF logout
osf_logout()
# set response to delete OSF cookie
resp.delete_cookie(settings.COOKIE_NAME, domain=settings.OSF_COOKIE_DOMAIN)
return resp
def auth_email_logout(token, user):
"""
When a user is adding an email or merging an account, add the email to the user and log them out.
"""
redirect_url = cas.get_logout_url(service_url=cas.get_login_url(service_url=web_url_for('index', _absolute=True)))
try:
unconfirmed_email = user.get_unconfirmed_email_for_token(token)
except InvalidTokenError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Bad token',
'message_long': 'The provided token is invalid.'
})
except ExpiredTokenError:
status.push_status_message('The private link you used is expired.')
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Expired link',
'message_long': 'The private link you used is expired.'
})
try:
user_merge = User.find_one(Q('emails', 'eq', unconfirmed_email))
except NoResultsFound:
user_merge = False
if user_merge:
remove_sessions_for_user(user_merge)
user.email_verifications[token]['confirmed'] = True
user.save()
remove_sessions_for_user(user)
resp = redirect(redirect_url)
resp.delete_cookie(settings.COOKIE_NAME, domain=settings.OSF_COOKIE_DOMAIN)
return resp
@block_bing_preview
@collect_auth
def external_login_confirm_email_get(auth, uid, token):
"""
View for email confirmation links when user first login through external identity provider.
HTTP Method: GET
When users click the confirm link, they are expected not to be logged in. If not, they will be logged out first and
redirected back to this view. After OSF verifies the link and performs all actions, they will be automatically
logged in through CAS and redirected back to this view again being authenticated.
:param auth: the auth context
:param uid: the user's primary key
:param token: the verification token
"""
user = User.load(uid)
if not user:
raise HTTPError(http.BAD_REQUEST)
destination = request.args.get('destination')
if not destination:
raise HTTPError(http.BAD_REQUEST)
# if user is already logged in
if auth and auth.user:
# if it is a wrong user
if auth.user._id != user._id:
return auth_logout(redirect_url=request.url)
# if it is the expected user
new = request.args.get('new', None)
if destination in campaigns.get_campaigns():
# external domain takes priority
campaign_url = campaigns.external_campaign_url_for(destination)
if not campaign_url:
campaign_url = campaigns.campaign_url_for(destination)
return redirect(campaign_url)
if new:
status.push_status_message(language.WELCOME_MESSAGE, kind='default', jumbotron=True, trust=True)
return redirect(web_url_for('dashboard'))
# token is invalid
if token not in user.email_verifications:
raise HTTPError(http.BAD_REQUEST)
verification = user.email_verifications[token]
email = verification['email']
provider = verification['external_identity'].keys()[0]
provider_id = verification['external_identity'][provider].keys()[0]
# wrong provider
if provider not in user.external_identity:
raise HTTPError(http.BAD_REQUEST)
external_status = user.external_identity[provider][provider_id]
try:
ensure_external_identity_uniqueness(provider, provider_id, user)
except ValidationError as e:
raise HTTPError(http.FORBIDDEN, e.message)
if not user.is_registered:
user.register(email)
if email.lower() not in user.emails:
user.emails.append(email.lower())
user.date_last_logged_in = timezone.now()
user.external_identity[provider][provider_id] = 'VERIFIED'
user.social[provider.lower()] = provider_id
del user.email_verifications[token]
user.verification_key = generate_verification_key()
user.save()
service_url = request.url
if external_status == 'CREATE':
mails.send_mail(
to_addr=user.username,
mail=mails.WELCOME,
mimetype='html',
user=user
)
service_url += '&{}'.format(urllib.urlencode({'new': 'true'}))
elif external_status == 'LINK':
mails.send_mail(
user=user,
to_addr=user.username,
mail=mails.EXTERNAL_LOGIN_LINK_SUCCESS,
external_id_provider=provider,
)
# redirect to CAS and authenticate the user with the verification key
return redirect(cas.get_login_url(
service_url,
username=user.username,
verification_key=user.verification_key
))
@block_bing_preview
@collect_auth
def confirm_email_get(token, auth=None, **kwargs):
"""
View for email confirmation links. Authenticates and redirects to user settings page if confirmation is successful,
otherwise shows an "Expired Link" error.
HTTP Method: GET
"""
user = User.load(kwargs['uid'])
is_merge = 'confirm_merge' in request.args
is_initial_confirmation = not user.date_confirmed
log_out = request.args.get('logout', None)
if user is None:
raise HTTPError(http.NOT_FOUND)
# if the user is merging or adding an email (they already are an osf user)
if log_out:
return auth_email_logout(token, user)
if auth and auth.user and (auth.user._id == user._id or auth.user._id == user.merged_by._id):
if not is_merge:
# determine if the user registered through a campaign
campaign = campaigns.campaign_for_user(user)
if campaign:
return redirect(campaigns.campaign_url_for(campaign))
# go to home page with push notification
if len(auth.user.emails) == 1 and len(auth.user.email_verifications) == 0:
status.push_status_message(language.WELCOME_MESSAGE, kind='default', jumbotron=True, trust=True)
if token in auth.user.email_verifications:
status.push_status_message(language.CONFIRM_ALTERNATE_EMAIL_ERROR, kind='danger', trust=True)
return redirect(web_url_for('index'))
status.push_status_message(language.MERGE_COMPLETE, kind='success', trust=False)
return redirect(web_url_for('user_account'))
try:
user.confirm_email(token, merge=is_merge)
except exceptions.EmailConfirmTokenError as e:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': e.message_short,
'message_long': e.message_long
})
if is_initial_confirmation:
user.update_date_last_login()
user.save()
# send out our welcome message
mails.send_mail(
to_addr=user.username,
mail=mails.WELCOME,
mimetype='html',
user=user
)
# new random verification key, allows CAS to authenticate the user w/o password one-time only.
user.verification_key = generate_verification_key()
user.save()
# redirect to CAS and authenticate the user with a verification key.
return redirect(cas.get_login_url(
request.url,
username=user.username,
verification_key=user.verification_key
))
@must_be_logged_in
def unconfirmed_email_remove(auth=None):
"""
Called at login if user cancels their merge or email add.
HTTP Method: DELETE
"""
user = auth.user
json_body = request.get_json()
try:
given_token = json_body['token']
except KeyError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Missing token',
'message_long': 'Must provide a token'
})
user.clean_email_verifications(given_token=given_token)
user.save()
return {
'status': 'success',
'removed_email': json_body['address']
}, 200
@must_be_logged_in
def unconfirmed_email_add(auth=None):
"""
Called at login if user confirms their merge or email add.
HTTP Method: PUT
"""
user = auth.user
json_body = request.get_json()
try:
token = json_body['token']
except KeyError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Missing token',
'message_long': 'Must provide a token'
})
try:
user.confirm_email(token, merge=True)
except exceptions.InvalidTokenError:
raise InvalidTokenError(http.BAD_REQUEST, data={
'message_short': 'Invalid user token',
'message_long': 'The user token is invalid'
})
except exceptions.EmailConfirmTokenError as e:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': e.message_short,
'message_long': e.message_long
})
user.save()
return {
'status': 'success',
'removed_email': json_body['address']
}, 200
def send_confirm_email(user, email, renew=False, external_id_provider=None, external_id=None, destination=None):
"""
Sends `user` a confirmation to the given `email`.
:param user: the user
:param email: the email
:param renew: refresh the token
:param external_id_provider: user's external id provider
:param external_id: user's external id
:param destination: the destination page to redirect after confirmation
:return:
:raises: KeyError if user does not have a confirmation token for the given email.
"""
confirmation_url = user.get_confirmation_url(
email,
external=True,
force=True,
renew=renew,
external_id_provider=external_id_provider,
destination=destination
)
try:
merge_target = User.find_one(Q('emails', 'eq', email))
except NoResultsFound:
merge_target = None
campaign = campaigns.campaign_for_user(user)
branded_preprints_provider = None
# Choose the appropriate email template to use and add existing_user flag if a merge or adding an email.
if external_id_provider and external_id:
# First time login through external identity provider, link or create an OSF account confirmation
if user.external_identity[external_id_provider][external_id] == 'CREATE':
mail_template = mails.EXTERNAL_LOGIN_CONFIRM_EMAIL_CREATE
elif user.external_identity[external_id_provider][external_id] == 'LINK':
mail_template = mails.EXTERNAL_LOGIN_CONFIRM_EMAIL_LINK
elif merge_target:
# Merge account confirmation
mail_template = mails.CONFIRM_MERGE
confirmation_url = '{}?logout=1'.format(confirmation_url)
elif user.is_active:
# Add email confirmation
mail_template = mails.CONFIRM_EMAIL
confirmation_url = '{}?logout=1'.format(confirmation_url)
elif campaign:
# Account creation confirmation: from campaign
mail_template = campaigns.email_template_for_campaign(campaign)
if campaigns.is_proxy_login(campaign) and campaigns.get_service_provider(campaign) != 'OSF':
branded_preprints_provider = campaigns.get_service_provider(campaign)
else:
# Account creation confirmation: from OSF
mail_template = mails.INITIAL_CONFIRM_EMAIL
mails.send_mail(
email,
mail_template,
'plain',
user=user,
confirmation_url=confirmation_url,
email=email,
merge_target=merge_target,
external_id_provider=external_id_provider,
branded_preprints_provider=branded_preprints_provider
)
def register_user(**kwargs):
"""
Register new user account.
HTTP Method: POST
:param-json str email1:
:param-json str email2:
:param-json str password:
:param-json str fullName:
:param-json str campaign:
:raises: HTTPError(http.BAD_REQUEST) if validation fails or user already exists
"""
# Verify that email address match.
# Note: Both `landing.mako` and `register.mako` already have this check on the form. Users can not submit the form
# if emails do not match. However, this check should not be removed given we may use the raw api call directly.
json_data = request.get_json()
if str(json_data['email1']).lower() != str(json_data['email2']).lower():
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='Email addresses must match.')
)
# Verify that captcha is valid
if settings.RECAPTCHA_SITE_KEY and not validate_recaptcha(json_data.get('g-recaptcha-response'), remote_ip=request.remote_addr):
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='Invalid Captcha')
)
try:
full_name = request.json['fullName']
full_name = strip_html(full_name)
campaign = json_data.get('campaign')
if campaign and campaign not in campaigns.get_campaigns():
campaign = None
user = framework_auth.register_unconfirmed(
request.json['email1'],
request.json['password'],
full_name,
campaign=campaign,
)
framework_auth.signals.user_registered.send(user)
except (ValidationValueError, DuplicateEmailError):
raise HTTPError(
http.BAD_REQUEST,
data=dict(
message_long=language.ALREADY_REGISTERED.format(
email=markupsafe.escape(request.json['email1'])
)
)
)
except ValidationError as e:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long=e.message)
)
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=user.username)
message = language.REGISTRATION_SUCCESS.format(email=user.username)
return {'message': message}
else:
return {'message': 'You may now log in.'}
@collect_auth
def resend_confirmation_get(auth):
"""
View for user to land on resend confirmation page.
HTTP Method: GET
"""
# If user is already logged in, log user out
if auth.logged_in:
return auth_logout(redirect_url=request.url)
form = ResendConfirmationForm(request.form)
return {
'form': form,
}
@collect_auth
def resend_confirmation_post(auth):
"""
View for user to submit resend confirmation form.
HTTP Method: POST
"""
# If user is already logged in, log user out
if auth.logged_in:
return auth_logout(redirect_url=request.url)
form = ResendConfirmationForm(request.form)
if form.validate():
clean_email = form.email.data
user = get_user(email=clean_email)
status_message = ('If there is an OSF account associated with this unconfirmed email {0}, '
'a confirmation email has been resent to it. If you do not receive an email and believe '
'you should have, please contact OSF Support.').format(clean_email)
kind = 'success'
if user:
if throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
try:
send_confirm_email(user, clean_email, renew=True)
except KeyError:
# already confirmed, redirect to dashboard
status_message = 'This email {0} has already been confirmed.'.format(clean_email)
kind = 'warning'
user.email_last_sent = timezone.now()
user.save()
else:
status_message = ('You have recently requested to resend your confirmation email. '
'Please wait a few minutes before trying again.')
kind = 'error'
status.push_status_message(status_message, kind=kind, trust=False)
else:
forms.push_errors_to_status(form.errors)
# Don't go anywhere
return {'form': form}
def external_login_email_get():
"""
Landing view for first-time oauth-login user to enter their email address.
HTTP Method: GET
"""
form = ResendConfirmationForm(request.form)
session = get_session()
if not session.is_external_first_login:
raise HTTPError(http.UNAUTHORIZED)
external_id_provider = session.data['auth_user_external_id_provider']
return {
'form': form,
'external_id_provider': external_id_provider
}
def external_login_email_post():
"""
View to handle email submission for first-time oauth-login user.
HTTP Method: POST
"""
form = ResendConfirmationForm(request.form)
session = get_session()
if not session.is_external_first_login:
raise HTTPError(http.UNAUTHORIZED)
external_id_provider = session.data['auth_user_external_id_provider']
external_id = session.data['auth_user_external_id']
fullname = session.data['auth_user_fullname']
service_url = session.data['service_url']
# TODO: @cslzchen use user tags instead of destination
destination = 'dashboard'
for campaign in campaigns.get_campaigns():
if campaign != 'institution':
# Handle different url encoding schemes between `furl` and `urlparse/urllib`.
# OSF use `furl` to parse service url during service validation with CAS. However, `web_url_for()` uses
# `urlparse/urllib` to generate service url. `furl` handles `urlparser/urllib` generated urls while ` but
# not vice versa.
campaign_url = furl.furl(campaigns.campaign_url_for(campaign)).url
external_campaign_url = furl.furl(campaigns.external_campaign_url_for(campaign)).url
if campaigns.is_proxy_login(campaign):
# proxy campaigns: OSF Preprints and branded ones
if check_service_url_with_proxy_campaign(str(service_url), campaign_url, external_campaign_url):
destination = campaign
# continue to check branded preprints even service url matches osf preprints
if campaign != 'osf-preprints':
break
elif service_url.startswith(campaign_url):
# osf campaigns: OSF Prereg and ERPC
destination = campaign
break
if form.validate():
clean_email = form.email.data
user = get_user(email=clean_email)
external_identity = {
external_id_provider: {
external_id: None,
},
}
try:
ensure_external_identity_uniqueness(external_id_provider, external_id, user)
except ValidationError as e:
raise HTTPError(http.FORBIDDEN, e.message)
if user:
# 1. update user oauth, with pending status
external_identity[external_id_provider][external_id] = 'LINK'
if external_id_provider in user.external_identity:
user.external_identity[external_id_provider].update(external_identity[external_id_provider])
else:
user.external_identity.update(external_identity)
# 2. add unconfirmed email and send confirmation email
user.add_unconfirmed_email(clean_email, external_identity=external_identity)
user.save()
send_confirm_email(
user,
clean_email,
external_id_provider=external_id_provider,
external_id=external_id,
destination=destination
)
# 3. notify user
message = language.EXTERNAL_LOGIN_EMAIL_LINK_SUCCESS.format(
external_id_provider=external_id_provider,
email=user.username
)
kind = 'success'
# 4. remove session and osf cookie
remove_session(session)
else:
# 1. create unconfirmed user with pending status
external_identity[external_id_provider][external_id] = 'CREATE'
user = User.create_unconfirmed(
username=clean_email,
password=None,
fullname=fullname,
external_identity=external_identity,
campaign=None
)
# TODO: [#OSF-6934] update social fields, verified social fields cannot be modified
user.save()
# 3. send confirmation email
send_confirm_email(
user,
user.username,
external_id_provider=external_id_provider,
external_id=external_id,
destination=destination
)
# 4. notify user
message = language.EXTERNAL_LOGIN_EMAIL_CREATE_SUCCESS.format(
external_id_provider=external_id_provider,
email=user.username
)
kind = 'success'
# 5. remove session
remove_session(session)
status.push_status_message(message, kind=kind, trust=False)
else:
forms.push_errors_to_status(form.errors)
# Don't go anywhere
return {
'form': form,
'external_id_provider': external_id_provider
}
def validate_campaign(campaign):
"""
Non-view helper function that validates `campaign`.
:param campaign: the campaign to validate
:return: True if valid, False otherwise
"""
return campaign and campaign in campaigns.get_campaigns()
def validate_next_url(next_url):
"""
Non-view helper function that checks `next_url`.
Only allow redirects which are relative root or full domain (CAS, OSF and MFR).
Disallows external redirects.
:param next_url: the next url to check
:return: True if valid, False otherwise
"""
# disable external domain using `//`: the browser allows `//` as a shortcut for non-protocol specific requests
# like http:// or https:// depending on the use of SSL on the page already.
if next_url.startswith('//'):
return False
# only OSF, MFR, CAS and Branded Preprints domains are allowed
if next_url[0] == '/' or next_url.startswith(settings.DOMAIN):
# OSF
return True
if next_url.startswith(settings.CAS_SERVER_URL) or next_url.startswith(settings.MFR_SERVER_URL):
# CAS or MFR
return True
for url in campaigns.get_external_domains():
# Branded Preprints Phase 2
if next_url.startswith(url):
return True
return False
def check_service_url_with_proxy_campaign(service_url, campaign_url, external_campaign_url=None):
"""
Check if service url belongs to proxy campaigns: OSF Preprints and branded ones.
Both service_url and campaign_url are parsed using `furl` encoding scheme.
:param service_url: the `furl` formatted service url
:param campaign_url: the `furl` formatted campaign url
:param external_campaign_url: the `furl` formatted external campaign url
:return: the matched object or None
"""
prefix_1 = settings.DOMAIN + 'login/?next=' + campaign_url
prefix_2 = settings.DOMAIN + 'login?next=' + campaign_url
valid = service_url.startswith(prefix_1) or service_url.startswith(prefix_2)
valid_external = False
if external_campaign_url:
prefix_3 = settings.DOMAIN + 'login/?next=' + external_campaign_url
prefix_4 = settings.DOMAIN + 'login?next=' + external_campaign_url
valid_external = service_url.startswith(prefix_3) or service_url.startswith(prefix_4)
return valid or valid_external
|
|
# -*- coding: utf-8 -*-
"""
migrate
~~~~~~~
A simple generic database migration tool
:copyright: (c) 2014 Francis Asante <kofrasa@gmail.com>
:license: MIT
"""
from __future__ import print_function
import os
import sys
import argparse
import glob
import string
import subprocess
import tempfile
from datetime import datetime
__all__ = ['Migrate', 'MigrateException']
__version__ = '0.3.8'
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
try:
import pwd
def get_login_name(): return pwd.getpwuid(os.getuid())[0]
except ModuleNotFoundError:
def get_login_name(): return os.getlogin()
COMMANDS = {
'postgres': "psql -w --host {host} --port {port} --username {user} -d {database}",
'mysql': "mysql --host {host} --port {port} --user {user} -D {database}",
'sqlite3': "sqlite3 {database}"
}
PORTS = dict(postgres=5432, mysql=3306)
class MigrateException(Exception):
pass
class Migrate(object):
"""A simple generic database migration helper
"""
def __init__(self, path='./migrations', host=None, port=None, user=None, password=None, database=None,
rev=None, command=None, message=None, engine=None, verbose=False, debug=False,
skip_errors=False, **kwargs):
# assign configuration for easy lookup
self._migration_path = os.path.abspath(path)
self._host = host
self._port = port
self._user = user
self._password = password
self._database = database
self._rev = rev
self._command = command
self._message = message
self._engine = engine
self._verbose = int(verbose)
self._debug = debug
self._skip_errors = skip_errors
assert os.path.exists(self._migration_path) and os.path.isdir(self._migration_path), \
"migration folder does not exist: %s" % self._migration_path
current_dir = os.path.abspath(os.getcwd())
os.chdir(self._migration_path)
# cache ordered list of the names of all revision folders
self._revisions = list(map(str,
sorted(map(int, filter(lambda x: x.isdigit(), glob.glob("*"))))))
os.chdir(current_dir)
def _log(self, level, msg):
"""Simple logging for the given verbosity level"""
if self._verbose >= level:
print(msg)
def _cmd_create(self):
"""Create a migration in the current or new revision folder
"""
assert self._message, "need to supply a message for the \"create\" command"
if not self._revisions:
self._revisions.append("1")
# get the migration folder
rev_folder = self._revisions[-1]
full_rev_path = os.path.join(self._migration_path, rev_folder)
if not os.path.exists(full_rev_path):
os.mkdir(full_rev_path)
else:
count = len(glob.glob(os.path.join(full_rev_path, "*")))
# create next revision folder if needed
if count and self._rev and int(self._rev) == 0:
rev_folder = str(int(rev_folder) + 1)
full_rev_path = os.path.join(self._migration_path, rev_folder)
os.mkdir(full_rev_path)
self._revisions.append(rev_folder)
# format file name
filename = '_'.join([s.lower() for s in self._message.split(' ') if s.strip()])
for p in string.punctuation:
if p in filename:
filename = filename.replace(p, '_')
filename = "%s_%s" % (datetime.utcnow().strftime("%Y%m%d%H%M%S"), filename.replace('__', '_'))
# create the migration files
self._log(0, "creating files: ")
for s in ('up', 'down'):
file_path = os.path.join(full_rev_path, "%s.%s.sql" % (filename, s))
with open(file_path, 'a+') as w:
w.write('\n'.join([
'-- *** %s ***' % s.upper(),
'-- file: %s' % os.path.join(rev_folder, filename),
'-- comment: %s' % self._message]))
self._log(0, file_path)
def _cmd_up(self):
"""Upgrade to a revision"""
revision = self._get_revision()
if not self._rev:
self._log(0, "upgrading current revision")
else:
self._log(0, "upgrading from revision %s" % revision)
for rev in self._revisions[int(revision) - 1:]:
sql_files = glob.glob(os.path.join(self._migration_path, rev, "*.up.sql"))
sql_files.sort()
self._exec(sql_files, rev)
self._log(0, "done: upgraded revision to %s\n" % rev)
def _cmd_down(self):
"""Downgrade to a revision"""
revision = self._get_revision()
if not self._rev:
self._log(0, "downgrading current revision")
else:
self._log(0, "downgrading to revision %s" % revision)
# execute from latest to oldest revision
for rev in reversed(self._revisions[int(revision) - 1:]):
sql_files = glob.glob(os.path.join(self._migration_path, rev, "*.down.sql"))
sql_files.sort(reverse=True)
self._exec(sql_files, rev)
self._log(0, "done: downgraded revision to %s" % rev)
def _cmd_reset(self):
"""Downgrade and re-run revisions"""
self._cmd_down()
self._cmd_up()
def _get_revision(self):
"""Validate and return the revision to use for current command
"""
assert self._revisions, "no migration revision exist"
revision = self._rev or self._revisions[-1]
# revision count must be less or equal since revisions are ordered
assert revision in self._revisions, "invalid revision specified"
return revision
def _get_command(self, **kwargs):
return COMMANDS[self._engine].format(
host=self._host,
user=self._user,
database=self._database,
port=self._port or PORTS.get(self._engine, None))
def _exec(self, files, rev=0):
cmd = self._get_command()
func = globals()["exec_%s" % self._engine]
assert callable(func), "no exec function found for " + self._engine
for f in files:
self._log(1, "applying: %s/%s" % (rev, os.path.basename(f)))
try:
func(cmd, f, self._password, self._debug)
except MigrateException as e:
if not self._skip_errors:
raise e
def run(self):
# check for availability of target command line tool
cmd_name = self._get_command().split()[0]
cmd_path = subprocess.check_output(["which", cmd_name]).strip()
assert os.path.exists(cmd_path), "no %s command found on path" % cmd_name
{
'create': lambda: self._cmd_create(),
'up': lambda: self._cmd_up(),
'down': lambda: self._cmd_down(),
'reset': lambda: self._cmd_reset()
}.get(self._command)()
def print_debug(msg):
print("[debug] %s" % msg)
def exec_mysql(cmd, filename, password=None, debug=False):
if password:
cmd = cmd + ' -p' + password
if debug:
print_debug("%s < %s" % (cmd, filename))
with open(filename) as f:
try:
return subprocess.check_call(cmd.split(), stdin=f)
except subprocess.CalledProcessError as e:
raise MigrateException(str(e))
# reuse :)
def exec_sqlite3(cmd, filename, password=None, debug=False):
exec_mysql(cmd, filename, password, debug)
def exec_postgres(cmd, filename, password=None, debug=False):
if debug:
if password:
print_debug("PGPASSWORD=%s %s -f %s" % (password, cmd, filename))
else:
print_debug("%s -f %s" % (cmd, filename))
return 0
env_password = None
if password:
if 'PGPASSWORD' in os.environ:
env_password = os.environ['PGPASSWORD']
os.environ['PGPASSWORD'] = password
# for Postgres exit status for bad file input is 0, so we use temporary file to detect errors
err_filename = tempfile.mktemp()
try:
subprocess.check_call(cmd.split() + ['-f', filename], stdout=open(os.devnull), stderr=open(err_filename, 'w'))
finally:
if env_password:
os.environ['PGPASSWORD'] = env_password
elif password:
del os.environ['PGPASSWORD']
with open(err_filename, 'r') as fd:
stat = os.fstat(fd.fileno())
if stat.st_size:
raise MigrateException(''.join(fd.readlines()))
os.remove(err_filename)
def main(*args):
# allow flexibility for testing
args = args or sys.argv[1:]
login_name = get_login_name()
migration_path = os.path.join(os.getcwd(), "migrations")
program = os.path.splitext(os.path.split(__file__)[1])[0]
parser = argparse.ArgumentParser(
prog=program,
formatter_class=argparse.RawTextHelpFormatter,
usage="""\
%(prog)s [options] <command>
A simple generic database migration tool using SQL scripts
commands:
up Upgrade from a revision to the latest
down Downgrade from the latest to a lower revision
reset Rollback and re-run to the current revision
create Create a migration. Specify "-r 0" to add a new revision
""")
parser.add_argument(dest='command', choices=('create', 'up', 'down', 'reset'))
parser.add_argument("-e", dest="engine", default='sqlite3', choices=('postgres', 'mysql', 'sqlite3'),
help="database engine (default: \"sqlite3\")")
parser.add_argument("-r", dest="rev",
help="revision to use. specify \"0\" for the next revision if using the "
"\"create\" command. (default: last revision)")
parser.add_argument("-m", dest="message",
help="message description for migrations created with the \"create\" command")
parser.add_argument("-u", dest="user", default=login_name,
help="database user name (default: \"%s\")" % login_name)
parser.add_argument("-p", dest="password", default='', help="database password.")
parser.add_argument("--host", default="localhost", help='database server host (default: "localhost")')
parser.add_argument("--port", help='server port (default: postgres=5432, mysql=3306)')
parser.add_argument("-d", dest="database", default=login_name,
help="database name to use. specify a /path/to/file if using sqlite3. "
"(default: login name)")
parser.add_argument("--path", default=migration_path,
help="path to the migration folder either absolute or relative to the "
"current directory. (default: \"./migrations\")")
parser.add_argument("-f", dest='file', metavar='CONFIG', default=".migrate",
help="configuration file in \".ini\" format. "
"sections represent different configuration environments.\n"
"keys include: migration_path, user, password, host, port, "
"database, and engine. (default: \".migrate\")")
parser.add_argument("--env", default='dev',
help="configuration environment. applies only to config file option "
"(default: \"dev\")")
parser.add_argument("--debug", action='store_true', default=False,
help="print the commands but does not execute.")
parser.add_argument("--skip-errors", default=False, action='store_true',
help="continue migration even when some scripts in a revision fail")
parser.add_argument("--verbose", dest="verbose", action='store_true', default=False, help="show verbose output.")
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
config = {}
args = parser.parse_args(args=args)
for name in ('engine', 'command', 'rev', 'password', 'user', 'path', 'env', 'skip_errors',
'host', 'port', 'database', 'file', 'message', 'verbose', 'debug'):
config[name] = getattr(args, name)
try:
if 'file' in config:
if os.path.isfile(config['file']):
cfg = ConfigParser()
cfg.read(config['file'])
env = config.get('env', 'dev')
for name in ('engine', 'user', 'password', 'migration_path',
'host', 'port', 'database', 'verbose'):
if cfg.has_option(env, name):
value = cfg.get(env, name)
if name == 'migration_path':
config['path'] = value
if value is not None:
config[name] = value
elif config['file'] != '.migrate':
raise Exception("Couldn't find configuration file: %s" % config['file'])
Migrate(**config).run()
except MigrateException as e:
print(str(e), file=sys.stderr)
except Exception as e:
print(str(e), file=sys.stderr)
parser.print_usage(sys.stderr)
if __name__ == '__main__':
main()
|
|
from copy import copy
from cStringIO import StringIO
import datetime
import inspect
import sys
import traceback
from django.core.management import call_command
from django.core.management.commands import loaddata
from django.db import models
import south.db
from south import exceptions
from south.db import DEFAULT_DB_ALIAS
from south.models import MigrationHistory
from south.signals import ran_migration
class Migrator(object):
def __init__(self, verbosity=0):
self.verbosity = int(verbosity)
@staticmethod
def title(target):
raise NotImplementedError()
def print_title(self, target):
if self.verbosity:
print self.title(target)
@staticmethod
def status(target):
raise NotImplementedError()
def print_status(self, migration):
status = self.status(migration)
if self.verbosity and status:
print status
@staticmethod
def orm(migration):
raise NotImplementedError()
def backwards(self, migration):
return self._wrap_direction(migration.backwards(), migration.prev_orm())
def direction(self, migration):
raise NotImplementedError()
@staticmethod
def _wrap_direction(direction, orm):
args = inspect.getargspec(direction)
if len(args[0]) == 1:
# Old migration, no ORM should be passed in
return direction
return (lambda: direction(orm))
@staticmethod
def record(migration, database):
raise NotImplementedError()
def run_migration_error(self, migration, extra_info=''):
return (' ! Error found during real run of migration! Aborting.\n'
'\n'
' ! Since you have a database that does not support running\n'
' ! schema-altering statements in transactions, we have had \n'
' ! to leave it in an interim state between migrations.\n'
'%s\n'
' ! The South developers regret this has happened, and would\n'
' ! like to gently persuade you to consider a slightly\n'
' ! easier-to-deal-with DBMS.\n') % extra_info
def run_migration(self, migration):
migration_function = self.direction(migration)
south.db.db.start_transaction()
try:
migration_function()
south.db.db.execute_deferred_sql()
except:
south.db.db.rollback_transaction()
if not south.db.db.has_ddl_transactions:
print self.run_migration_error(migration)
raise
else:
south.db.db.commit_transaction()
def run(self, migration):
# Get the correct ORM.
south.db.db.current_orm = self.orm(migration)
# If the database doesn't support running DDL inside a transaction
# *cough*MySQL*cough* then do a dry run first.
if not south.db.db.has_ddl_transactions:
dry_run = DryRunMigrator(migrator=self, ignore_fail=False)
dry_run.run_migration(migration)
return self.run_migration(migration)
def done_migrate(self, migration, database):
south.db.db.start_transaction()
try:
# Record us as having done this
self.record(migration, database)
except:
south.db.db.rollback_transaction()
raise
else:
south.db.db.commit_transaction()
def send_ran_migration(self, migration):
ran_migration.send(None,
app=migration.app_label(),
migration=migration,
method=self.__class__.__name__.lower())
def migrate(self, migration, database):
"""
Runs the specified migration forwards/backwards, in order.
"""
app = migration.migrations._migrations
migration_name = migration.name()
self.print_status(migration)
result = self.run(migration)
self.done_migrate(migration, database)
self.send_ran_migration(migration)
return result
def migrate_many(self, target, migrations, database):
raise NotImplementedError()
class MigratorWrapper(object):
def __init__(self, migrator, *args, **kwargs):
self._migrator = copy(migrator)
attributes = dict([(k, getattr(self, k))
for k in self.__class__.__dict__.iterkeys()
if not k.startswith('__')])
self._migrator.__dict__.update(attributes)
def __getattr__(self, name):
return getattr(self._migrator, name)
class DryRunMigrator(MigratorWrapper):
def __init__(self, ignore_fail=True, *args, **kwargs):
super(DryRunMigrator, self).__init__(*args, **kwargs)
self._ignore_fail = ignore_fail
def _run_migration(self, migration):
if migration.no_dry_run() and self.verbosity:
print " - Migration '%s' is marked for no-dry-run." % migration
return
south.db.db.dry_run = True
if self._ignore_fail:
south.db.db.debug, old_debug = False, south.db.db.debug
pending_creates = south.db.db.get_pending_creates()
south.db.db.start_transaction()
migration_function = self.direction(migration)
try:
try:
migration_function()
south.db.db.execute_deferred_sql()
except:
raise exceptions.FailedDryRun(migration, sys.exc_info())
finally:
south.db.db.rollback_transactions_dry_run()
if self._ignore_fail:
south.db.db.debug = old_debug
south.db.db.clear_run_data(pending_creates)
south.db.db.dry_run = False
def run_migration(self, migration):
try:
self._run_migration(migration)
except exceptions.FailedDryRun:
if self._ignore_fail:
return False
raise
def done_migrate(self, *args, **kwargs):
pass
def send_ran_migration(self, *args, **kwargs):
pass
class FakeMigrator(MigratorWrapper):
def run(self, migration):
if self.verbosity:
print ' (faked)'
def send_ran_migration(self, *args, **kwargs):
pass
class LoadInitialDataMigrator(MigratorWrapper):
def load_initial_data(self, target):
if target is None or target != target.migrations[-1]:
return
# Load initial data, if we ended up at target
if self.verbosity:
print " - Loading initial data for %s." % target.app_label()
# Override Django's get_apps call temporarily to only load from the
# current app
old_get_apps = models.get_apps
new_get_apps = lambda: [models.get_app(target.app_label())]
models.get_apps = new_get_apps
loaddata.get_apps = new_get_apps
try:
call_command('loaddata', 'initial_data', verbosity=self.verbosity)
finally:
models.get_apps = old_get_apps
loaddata.get_apps = old_get_apps
def migrate_many(self, target, migrations, database):
migrator = self._migrator
result = migrator.__class__.migrate_many(migrator, target, migrations, database)
if result:
self.load_initial_data(target)
return True
class Forwards(Migrator):
"""
Runs the specified migration forwards, in order.
"""
torun = 'forwards'
@staticmethod
def title(target):
if target is not None:
return " - Migrating forwards to %s." % target.name()
else:
assert False, "You cannot migrate forwards to zero."
@staticmethod
def status(migration):
return ' > %s' % migration
@staticmethod
def orm(migration):
return migration.orm()
def forwards(self, migration):
return self._wrap_direction(migration.forwards(), migration.orm())
direction = forwards
@staticmethod
def record(migration, database):
# Record us as having done this
record = MigrationHistory.for_migration(migration, database)
record.applied = datetime.datetime.utcnow()
if database != DEFAULT_DB_ALIAS:
record.save(using=database)
else:
# Django 1.1 and below always go down this branch.
record.save()
def format_backwards(self, migration):
old_debug, old_dry_run = south.db.db.debug, south.db.db.dry_run
south.db.db.debug = south.db.db.dry_run = True
stdout = sys.stdout
sys.stdout = StringIO()
try:
try:
self.backwards(migration)()
return sys.stdout.getvalue()
except:
raise
finally:
south.db.db.debug, south.db.db.dry_run = old_debug, old_dry_run
sys.stdout = stdout
def run_migration_error(self, migration, extra_info=''):
extra_info = ('\n'
'! You *might* be able to recover with:'
'%s'
'%s' %
(self.format_backwards(migration), extra_info))
return super(Forwards, self).run_migration_error(migration, extra_info)
def migrate_many(self, target, migrations, database):
try:
for migration in migrations:
result = self.migrate(migration, database)
if result is False: # The migrations errored, but nicely.
return False
finally:
# Call any pending post_syncdb signals
south.db.db.send_pending_create_signals()
return True
class Backwards(Migrator):
"""
Runs the specified migration backwards, in order.
"""
torun = 'backwards'
@staticmethod
def title(target):
if target is None:
return " - Migrating backwards to zero state."
else:
return " - Migrating backwards to just after %s." % target.name()
@staticmethod
def status(migration):
return ' < %s' % migration
@staticmethod
def orm(migration):
return migration.prev_orm()
direction = Migrator.backwards
@staticmethod
def record(migration, database):
# Record us as having not done this
record = MigrationHistory.for_migration(migration, database)
if record.id is not None:
if database != DEFAULT_DB_ALIAS:
record.delete(using=database)
else:
# Django 1.1 always goes down here
record.delete()
def migrate_many(self, target, migrations, database):
for migration in migrations:
self.migrate(migration, database)
return True
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module implements a simple WSGI server for the memory_inspector Web UI.
The WSGI server essentially handles two kinds of requests:
- /ajax/foo/bar: The AJAX endpoints which exchange JSON data with the JS.
Requests routing is achieved using a simple @uri decorator which simply
performs regex matching on the request path.
- /static/content: Anything not matching the /ajax/ prefix is treated as a
static content request (for serving the index.html and JS/CSS resources).
The following HTTP status code are returned by the server:
- 200 - OK: The request was handled correctly.
- 404 - Not found: None of the defined handlers did match the /request/path.
- 410 - Gone: The path was matched but the handler returned an empty response.
This typically happens when the target device is disconnected.
"""
import cgi
import collections
import datetime
import glob
import json
import memory_inspector
import mimetypes
import os
import posixpath
import re
import traceback
import urlparse
import uuid
import wsgiref.simple_server
from memory_inspector import constants
from memory_inspector.core import backends
from memory_inspector.core import memory_map
from memory_inspector.classification import mmap_classifier
from memory_inspector.classification import native_heap_classifier
from memory_inspector.data import serialization
from memory_inspector.data import file_storage
from memory_inspector.frontends import background_tasks
_HTTP_OK = '200 OK'
_HTTP_GONE = '410 Gone'
_HTTP_NOT_FOUND = '404 Not Found'
_HTTP_INTERNAL_ERROR = '500 Internal Server Error'
_PERSISTENT_STORAGE_PATH = os.path.join(
os.path.expanduser('~'), '.config', 'memory_inspector')
_CONTENT_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'www_content'))
_APP_PROCESS_RE = r'^[\w.:]+$' # Regex for matching app processes.
_STATS_HIST_SIZE = 120 # Keep at most 120 samples of stats per process.
_CACHE_LEN = 10 # Max length of |_cached_objs|.
# |_cached_objs| keeps the state of short-lived objects that the client needs to
# _cached_objs subsequent AJAX calls.
_cached_objs = collections.OrderedDict()
_persistent_storage = file_storage.Storage(_PERSISTENT_STORAGE_PATH)
_proc_stats_history = {} # /Android/device/PID -> deque([stats@T=0, stats@T=1])
class UriHandler(object):
"""Base decorator used to automatically route /requests/by/path.
Each handler is called with the following args:
args: a tuple of the matching regex groups.
req_vars: a dictionary of request args (querystring for GET, body for POST).
Each handler must return a tuple with the following elements:
http_code: a string with the HTTP status code (e.g., '200 - OK')
headers: a list of HTTP headers (e.g., [('Content-Type': 'foo/bar')])
body: the HTTP response body.
"""
_handlers = []
def __init__(self, path_regex, verb='GET', output_filter=None):
self._path_regex = path_regex
self._verb = verb
default_output_filter = lambda *x: x # Just return the same args unchanged.
self._output_filter = output_filter or default_output_filter
def __call__(self, handler):
UriHandler._handlers += [(
self._verb, self._path_regex, self._output_filter, handler)]
@staticmethod
def Handle(method, path, req_vars):
"""Finds a matching handler and calls it (or returns a 404 - Not Found)."""
cache_headers = [('Cache-Control', 'no-cache'),
('Expires', 'Fri, 19 Sep 1986 05:00:00 GMT')]
for (match_method, path_regex, output_filter, fn) in UriHandler._handlers:
if method != match_method:
continue
m = re.match(path_regex, path)
if not m:
continue
try:
(http_code, headers, body) = fn(m.groups(), req_vars)
except Exception as e:
traceback.print_exc()
return _HTTP_INTERNAL_ERROR, [], str(e)
return output_filter(http_code, cache_headers + headers, body)
return (_HTTP_NOT_FOUND, [], 'No AJAX handlers found')
class AjaxHandler(UriHandler):
"""Decorator for routing AJAX requests.
This decorator performs JSON serialization which is shared by most of the
handlers defined below.
"""
def __init__(self, path_regex, verb='GET'):
super(AjaxHandler, self).__init__(
path_regex, verb, AjaxHandler.AjaxOutputFilter)
@staticmethod
def AjaxOutputFilter(http_code, headers, body):
serialized_content = json.dumps(body, cls=serialization.Encoder)
return http_code, headers, serialized_content
@AjaxHandler('/ajax/backends')
def _ListBackends(args, req_vars): # pylint: disable=W0613
return _HTTP_OK, [], [backend.name for backend in backends.ListBackends()]
@AjaxHandler('/ajax/devices')
def _ListDevices(args, req_vars): # pylint: disable=W0613
resp = []
for device in backends.ListDevices():
# The device settings must loaded at discovery time (i.e. here), not during
# startup, because it might have been plugged later.
for k, v in _persistent_storage.LoadSettings(device.id).iteritems():
device.settings[k] = v
resp += [{'backend': device.backend.name,
'id': device.id,
'name': device.name}]
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/dump/mmap/([^/]+)/([^/]+)/(\d+)')
def _DumpMmapsForProcess(args, req_vars): # pylint: disable=W0613
"""Dumps memory maps for a process.
The response is formatted according to the Google Charts DataTable format.
"""
process = _GetProcess(args)
if not process:
return _HTTP_GONE, [], 'Device not found or process died'
mmap = process.DumpMemoryMaps()
table = _ConvertMmapToGTable(mmap)
# Store the dump in the cache. The client might need it later for profiling.
cache_id = _CacheObject(mmap)
return _HTTP_OK, [], {'table': table, 'id': cache_id}
@AjaxHandler('/ajax/initialize/([^/]+)/([^/]+)$', 'POST')
def _InitializeDevice(args, req_vars): # pylint: disable=W0613
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
device.Initialize()
if req_vars['enableNativeTracing']:
device.EnableNativeTracing(True)
return _HTTP_OK, [], {
'isNativeTracingEnabled': device.IsNativeTracingEnabled()}
@AjaxHandler(r'/ajax/profile/create', 'POST')
def _CreateProfile(args, req_vars): # pylint: disable=W0613
"""Creates (and caches) a profile from a set of dumps.
The profiling data can be retrieved afterwards using the /profile/{PROFILE_ID}
endpoints (below).
"""
classifier = None # A classifier module (/classification/*_classifier.py).
dumps = {} # dump-time -> obj. to classify (e.g., |memory_map.Map|).
for arg in 'type', 'source', 'ruleset':
assert(arg in req_vars), 'Expecting %s argument in POST data' % arg
# Step 1: collect the memory dumps, according to what the client specified in
# the 'type' and 'source' POST arguments.
# Case 1a: The client requests to load data from an archive.
if req_vars['source'] == 'archive':
archive = _persistent_storage.OpenArchive(req_vars['archive'])
if not archive:
return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive']
first_timestamp = None
for timestamp_str in req_vars['snapshots']:
timestamp = file_storage.Archive.StrToTimestamp(timestamp_str)
first_timestamp = first_timestamp or timestamp
time_delta = int((timestamp - first_timestamp).total_seconds())
if req_vars['type'] == 'mmap':
dumps[time_delta] = archive.LoadMemMaps(timestamp)
elif req_vars['type'] == 'nheap':
dumps[time_delta] = archive.LoadNativeHeap(timestamp)
# Case 1b: Use a dump recently cached (only mmap, via _DumpMmapsForProcess).
elif req_vars['source'] == 'cache':
assert(req_vars['type'] == 'mmap'), 'Only cached mmap dumps are supported.'
dumps[0] = _GetCacheObject(req_vars['id'])
if not dumps:
return _HTTP_GONE, [], 'No memory dumps could be retrieved'
# Initialize the classifier (mmap or nheap) and prepare symbols for nheap.
if req_vars['type'] == 'mmap':
classifier = mmap_classifier
elif req_vars['type'] == 'nheap':
classifier = native_heap_classifier
if not archive.HasSymbols():
return _HTTP_GONE, [], 'No symbols in archive %s' % req_vars['archive']
symbols = archive.LoadSymbols()
for nheap in dumps.itervalues():
nheap.SymbolizeUsingSymbolDB(symbols)
if not classifier:
return _HTTP_GONE, [], 'Classifier %s not supported.' % req_vars['type']
# Step 2: Load the rule-set specified by the client in the 'ruleset' POST arg.
if req_vars['ruleset'] == 'heuristic':
assert(req_vars['type'] == 'nheap'), (
'heuristic rules are supported only for nheap')
rules = native_heap_classifier.InferHeuristicRulesFromHeap(dumps[0])
else:
rules_path = os.path.join(constants.CLASSIFICATION_RULES_PATH,
req_vars['ruleset'])
if not os.path.isfile(rules_path):
return _HTTP_GONE, [], 'Cannot find the rule-set %s' % rules_path
with open(rules_path) as f:
rules = classifier.LoadRules(f.read())
# Step 3: Aggregate the dump data using the classifier and generate the
# profile data (which will be kept cached here in the server).
# The resulting profile will consist of 1+ snapshots (depending on the number
# dumps the client has requested to process) and a number of 1+ metrics
# (depending on the buckets' keys returned by the classifier).
# Converts the {time: dump_obj} dict into a {time: |AggregatedResult|} dict.
# using the classifier.
snapshots = collections.OrderedDict((time, classifier.Classify(dump, rules))
for time, dump in sorted(dumps.iteritems()))
# Add the profile to the cache (and eventually discard old items).
# |profile_id| is the key that the client will use in subsequent requests
# (to the /ajax/profile/{ID}/ endpoints) to refer to this particular profile.
profile_id = _CacheObject(snapshots)
first_snapshot = next(snapshots.itervalues())
return _HTTP_OK, [], {'id': profile_id,
'times': snapshots.keys(),
'metrics': first_snapshot.keys,
'rootBucket': first_snapshot.total.name + '/'}
@AjaxHandler(r'/ajax/profile/([^/]+)/tree/(\d+)/(\d+)')
def _GetProfileTreeDataForSnapshot(args, req_vars): # pylint: disable=W0613
"""Gets the data for the tree chart for a given time and metric.
The response is formatted according to the Google Charts DataTable format.
"""
snapshot_id = args[0]
metric_index = int(args[1])
time = int(args[2])
snapshots = _GetCacheObject(snapshot_id)
if not snapshots:
return _HTTP_GONE, [], 'Cannot find the selected profile.'
if time not in snapshots:
return _HTTP_GONE, [], 'Cannot find snapshot at T=%d.' % time
snapshot = snapshots[time]
if metric_index >= len(snapshot.keys):
return _HTTP_GONE, [], 'Invalid metric id %d' % metric_index
resp = {'cols': [{'label': 'bucket', 'type': 'string'},
{'label': 'parent', 'type': 'string'}],
'rows': []}
def VisitBucketAndAddRows(bucket, parent_id=''):
"""Recursively creates the (node, parent) visiting |ResultTree| in DFS."""
node_id = parent_id + bucket.name + '/'
node_label = '<dl><dt>%s</dt><dd>%s</dd></dl>' % (
bucket.name, _StrMem(bucket.values[metric_index]))
resp['rows'] += [{'c': [
{'v': node_id, 'f': node_label},
{'v': parent_id, 'f': None},
]}]
for child in bucket.children:
VisitBucketAndAddRows(child, node_id)
VisitBucketAndAddRows(snapshot.total)
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/profile/([^/]+)/time_serie/(\d+)/(.*)$')
def _GetTimeSerieForSnapshot(args, req_vars): # pylint: disable=W0613
"""Gets the data for the area chart for a given metric and bucket.
The response is formatted according to the Google Charts DataTable format.
"""
snapshot_id = args[0]
metric_index = int(args[1])
bucket_path = args[2]
snapshots = _GetCacheObject(snapshot_id)
if not snapshots:
return _HTTP_GONE, [], 'Cannot find the selected profile.'
if metric_index >= len(next(snapshots.itervalues()).keys):
return _HTTP_GONE, [], 'Invalid metric id %d' % metric_index
def FindBucketByPath(bucket, path, parent_path=''): # Essentially a DFS.
cur_path = parent_path + bucket.name + '/'
if cur_path == path:
return bucket
for child in bucket.children:
res = FindBucketByPath(child, path, cur_path)
if res:
return res
return None
# The resulting data table will look like this (assuming len(metrics) == 2):
# Time Ashmem Dalvik Other
# 0 (1024,0) (4096,1024) (0,0)
# 30 (512,512) (1024,1024) (0,512)
# 60 (0,512) (1024,0) (512,0)
resp = {'cols': [], 'rows': []}
for time, aggregated_result in snapshots.iteritems():
bucket = FindBucketByPath(aggregated_result.total, bucket_path)
if not bucket:
return _HTTP_GONE, [], 'Bucket %s not found' % bucket_path
# If the user selected a non-leaf bucket, display the breakdown of its
# direct children. Otherwise just the leaf bucket.
children_buckets = bucket.children if bucket.children else [bucket]
# Create the columns (form the buckets) when processing the first snapshot.
if not resp['cols']:
resp['cols'] += [{'label': 'Time', 'type': 'string'}]
for child_bucket in children_buckets:
resp['cols'] += [{'label': child_bucket.name, 'type': 'number'}]
row = [{'v': str(time), 'f': None}]
for child_bucket in children_buckets:
row += [{'v': child_bucket.values[metric_index] / 1024, 'f': None}]
resp['rows'] += [{'c': row}]
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/profile/rules')
def _ListProfilingRules(args, req_vars): # pylint: disable=W0613
"""Lists the classification rule files available for profiling."""
rules = glob.glob(constants.CLASSIFICATION_RULES_PATH +
os.sep + '*' + os.sep + '*.py')
rules = [x.replace(constants.CLASSIFICATION_RULES_PATH, '')[1:] # Strip /.
for x in rules]
resp = {'mmap': filter(lambda x: 'mmap-' in x, rules),
'nheap': filter(lambda x: 'nheap-' in x, rules)}
resp['nheap'].insert(0, 'heuristic')
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/ps/([^/]+)/([^/]+)$') # /ajax/ps/Android/a0b1c2[?all=1]
def _ListProcesses(args, req_vars): # pylint: disable=W0613
"""Lists processes and their CPU / mem stats.
The response is formatted according to the Google Charts DataTable format.
"""
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
resp = {
'cols': [
{'label': 'Pid', 'type':'number'},
{'label': 'Name', 'type':'string'},
{'label': 'Cpu %', 'type':'number'},
{'label': 'Mem RSS Kb', 'type':'number'},
{'label': '# Threads', 'type':'number'},
],
'rows': []}
for process in device.ListProcesses():
# Exclude system apps if the request didn't contain the ?all=1 arg.
if not req_vars.get('all') and not re.match(_APP_PROCESS_RE, process.name):
continue
stats = process.GetStats()
resp['rows'] += [{'c': [
{'v': process.pid, 'f': None},
{'v': process.name, 'f': None},
{'v': stats.cpu_usage, 'f': None},
{'v': stats.vm_rss, 'f': None},
{'v': stats.threads, 'f': None},
]}]
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/stats/([^/]+)/([^/]+)$') # /ajax/stats/Android/a0b1c2
def _GetDeviceStats(args, req_vars): # pylint: disable=W0613
"""Lists device CPU / mem stats.
The response is formatted according to the Google Charts DataTable format.
"""
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
device_stats = device.GetStats()
cpu_stats = {
'cols': [
{'label': 'CPU', 'type':'string'},
{'label': 'Usr %', 'type':'number'},
{'label': 'Sys %', 'type':'number'},
{'label': 'Idle %', 'type':'number'},
],
'rows': []}
for cpu_idx in xrange(len(device_stats.cpu_times)):
cpu = device_stats.cpu_times[cpu_idx]
cpu_stats['rows'] += [{'c': [
{'v': '# %d' % cpu_idx, 'f': None},
{'v': cpu['usr'], 'f': None},
{'v': cpu['sys'], 'f': None},
{'v': cpu['idle'], 'f': None},
]}]
mem_stats = {
'cols': [
{'label': 'Section', 'type':'string'},
{'label': 'MB', 'type':'number', 'pattern': ''},
],
'rows': []}
for key, value in device_stats.memory_stats.iteritems():
mem_stats['rows'] += [{'c': [
{'v': key, 'f': None},
{'v': value / 1024, 'f': None}
]}]
return _HTTP_OK, [], {'cpu': cpu_stats, 'mem': mem_stats}
@AjaxHandler(r'/ajax/stats/([^/]+)/([^/]+)/(\d+)$') # /ajax/stats/Android/a0/3
def _GetProcessStats(args, req_vars): # pylint: disable=W0613
"""Lists CPU / mem stats for a given process (and keeps history).
The response is formatted according to the Google Charts DataTable format.
"""
process = _GetProcess(args)
if not process:
return _HTTP_GONE, [], 'Device not found'
proc_uri = '/'.join(args)
cur_stats = process.GetStats()
if proc_uri not in _proc_stats_history:
_proc_stats_history[proc_uri] = collections.deque(maxlen=_STATS_HIST_SIZE)
history = _proc_stats_history[proc_uri]
history.append(cur_stats)
cpu_stats = {
'cols': [
{'label': 'T', 'type':'string'},
{'label': 'CPU %', 'type':'number'},
{'label': '# Threads', 'type':'number'},
],
'rows': []
}
mem_stats = {
'cols': [
{'label': 'T', 'type':'string'},
{'label': 'Mem RSS Kb', 'type':'number'},
{'label': 'Page faults', 'type':'number'},
],
'rows': []
}
for stats in history:
cpu_stats['rows'] += [{'c': [
{'v': str(datetime.timedelta(seconds=stats.run_time)), 'f': None},
{'v': stats.cpu_usage, 'f': None},
{'v': stats.threads, 'f': None},
]}]
mem_stats['rows'] += [{'c': [
{'v': str(datetime.timedelta(seconds=stats.run_time)), 'f': None},
{'v': stats.vm_rss, 'f': None},
{'v': stats.page_faults, 'f': None},
]}]
return _HTTP_OK, [], {'cpu': cpu_stats, 'mem': mem_stats}
@AjaxHandler(r'/ajax/settings/([^/]+)/?(\w+)?$') # /ajax/settings/Android[/id]
def _GetDeviceOrBackendSettings(args, req_vars): # pylint: disable=W0613
backend = backends.GetBackend(args[0])
if not backend:
return _HTTP_GONE, [], 'Backend not found'
if args[1]:
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
settings = device.settings
else:
settings = backend.settings
assert(isinstance(settings, backends.Settings))
resp = {}
for key in settings.expected_keys:
resp[key] = {'description': settings.expected_keys[key],
'value': settings.values[key]}
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/settings/([^/]+)/?(\w+)?$', 'POST')
def _SetDeviceOrBackendSettings(args, req_vars): # pylint: disable=W0613
backend = backends.GetBackend(args[0])
if not backend:
return _HTTP_GONE, [], 'Backend not found'
if args[1]:
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
settings = device.settings
storage_name = device.id
else:
settings = backend.settings
storage_name = backend.name
for key in req_vars.iterkeys():
settings[key] = req_vars[key]
_persistent_storage.StoreSettings(storage_name, settings.values)
return _HTTP_OK, [], ''
@AjaxHandler(r'/ajax/storage/list')
def _ListStorage(args, req_vars): # pylint: disable=W0613
resp = {
'cols': [
{'label': 'Archive', 'type':'string'},
{'label': 'Snapshot', 'type':'string'},
{'label': 'Mem maps', 'type':'boolean'},
{'label': 'N. Heap', 'type':'boolean'},
],
'rows': []}
for archive_name in _persistent_storage.ListArchives():
archive = _persistent_storage.OpenArchive(archive_name)
first_timestamp = None
for timestamp in archive.ListSnapshots():
first_timestamp = timestamp if not first_timestamp else first_timestamp
time_delta = '%d s.' % (timestamp - first_timestamp).total_seconds()
resp['rows'] += [{'c': [
{'v': archive_name, 'f': None},
{'v': file_storage.Archive.TimestampToStr(timestamp),
'f': time_delta},
{'v': archive.HasMemMaps(timestamp), 'f': None},
{'v': archive.HasNativeHeap(timestamp), 'f': None},
]}]
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/storage/(.+)/(.+)/mmaps')
def _LoadMmapsFromStorage(args, req_vars): # pylint: disable=W0613
archive = _persistent_storage.OpenArchive(args[0])
if not archive:
return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive']
timestamp = file_storage.Archive.StrToTimestamp(args[1])
if not archive.HasMemMaps(timestamp):
return _HTTP_GONE, [], 'No mmaps for snapshot %s' % timestamp
mmap = archive.LoadMemMaps(timestamp)
return _HTTP_OK, [], {'table': _ConvertMmapToGTable(mmap)}
@AjaxHandler(r'/ajax/storage/(.+)/(.+)/nheap')
def _LoadNheapFromStorage(args, req_vars):
"""Returns a Google Charts DataTable dictionary for the nheap."""
archive = _persistent_storage.OpenArchive(args[0])
if not archive:
return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive']
timestamp = file_storage.Archive.StrToTimestamp(args[1])
if not archive.HasNativeHeap(timestamp):
return _HTTP_GONE, [], 'No native heap dump for snapshot %s' % timestamp
nheap = archive.LoadNativeHeap(timestamp)
symbols = archive.LoadSymbols()
nheap.SymbolizeUsingSymbolDB(symbols)
resp = {
'cols': [
{'label': 'Allocated', 'type':'number'},
{'label': 'Resident', 'type':'number'},
{'label': 'Flags', 'type':'number'},
{'label': 'Stack Trace', 'type':'string'},
],
'rows': []}
for alloc in nheap.allocations:
strace = '<dl>'
for frame in alloc.stack_trace.frames:
# Use the fallback libname.so+0xaddr if symbol info is not available.
symbol_name = frame.symbol.name if frame.symbol else '??'
source_info = (str(frame.symbol.source_info[0]) if
frame.symbol and frame.symbol.source_info else frame.raw_address)
strace += '<dd title="%s">%s</dd><dt>%s</dt>' % (
cgi.escape(source_info),
cgi.escape(posixpath.basename(source_info)),
cgi.escape(symbol_name))
strace += '</dl>'
resp['rows'] += [{'c': [
{'v': alloc.size, 'f': _StrMem(alloc.size)},
{'v': alloc.resident_size, 'f': _StrMem(alloc.resident_size)},
{'v': alloc.flags, 'f': None},
{'v': strace, 'f': None},
]}]
return _HTTP_OK, [], resp
# /ajax/tracer/start/Android/device-id/pid
@AjaxHandler(r'/ajax/tracer/start/([^/]+)/([^/]+)/(\d+)', 'POST')
def _StartTracer(args, req_vars):
for arg in 'interval', 'count', 'traceNativeHeap':
assert(arg in req_vars), 'Expecting %s argument in POST data' % arg
process = _GetProcess(args)
if not process:
return _HTTP_GONE, [], 'Device not found or process died'
task_id = background_tasks.StartTracer(
storage_path=_PERSISTENT_STORAGE_PATH,
process=process,
interval=int(req_vars['interval']),
count=int(req_vars['count']),
trace_native_heap=req_vars['traceNativeHeap'])
return _HTTP_OK, [], task_id
@AjaxHandler(r'/ajax/tracer/status/(\d+)') # /ajax/tracer/status/{task_id}
def _GetTracerStatus(args, req_vars): # pylint: disable=W0613
task = background_tasks.Get(int(args[0]))
if not task:
return _HTTP_GONE, [], 'Task not found'
return _HTTP_OK, [], task.GetProgress()
@UriHandler(r'^(?!/ajax)/(.*)$')
def _StaticContent(args, req_vars): # pylint: disable=W0613
req_path = args[0] if args[0] else 'index.html'
file_path = os.path.abspath(os.path.join(_CONTENT_DIR, req_path))
if (os.path.isfile(file_path) and
os.path.commonprefix([file_path, _CONTENT_DIR]) == _CONTENT_DIR):
mtype = 'text/plain'
guessed_mime = mimetypes.guess_type(file_path)
if guessed_mime and guessed_mime[0]:
mtype = guessed_mime[0]
with open(file_path, 'rb') as f:
body = f.read()
return _HTTP_OK, [('Content-Type', mtype)], body
return _HTTP_NOT_FOUND, [], file_path + ' not found'
def _GetDevice(args):
"""Returns a |backends.Device| instance from a /backend/device URI."""
assert(len(args) >= 2), 'Malformed request. Expecting /backend/device'
return backends.GetDevice(backend_name=args[0], device_id=args[1])
def _GetProcess(args):
"""Returns a |backends.Process| instance from a /backend/device/pid URI."""
assert(len(args) >= 3 and args[2].isdigit()), (
'Malformed request. Expecting /backend/device/pid')
device = _GetDevice(args)
if not device:
return None
return device.GetProcess(int(args[2]))
def _ConvertMmapToGTable(mmap):
"""Returns a Google Charts DataTable dictionary for the given mmap."""
assert(isinstance(mmap, memory_map.Map))
table = {
'cols': [
{'label': 'Start', 'type':'string'},
{'label': 'End', 'type':'string'},
{'label': 'Length Kb', 'type':'number'},
{'label': 'Prot', 'type':'string'},
{'label': 'RSS Kb', 'type':'number'},
{'label': 'Priv. Dirty Kb', 'type':'number'},
{'label': 'Priv. Clean Kb', 'type':'number'},
{'label': 'Shared Dirty Kb', 'type':'number'},
{'label': 'Shared Clean Kb', 'type':'number'},
{'label': 'File', 'type':'string'},
{'label': 'Offset', 'type':'number'},
{'label': 'Resident Pages', 'type':'string'},
],
'rows': []}
for entry in mmap.entries:
table['rows'] += [{'c': [
{'v': '%08x' % entry.start, 'f': None},
{'v': '%08x' % entry.end, 'f': None},
{'v': entry.len / 1024, 'f': None},
{'v': entry.prot_flags, 'f': None},
{'v': entry.rss_bytes / 1024, 'f': None},
{'v': entry.priv_dirty_bytes / 1024, 'f': None},
{'v': entry.priv_clean_bytes / 1024, 'f': None},
{'v': entry.shared_dirty_bytes / 1024, 'f': None},
{'v': entry.shared_clean_bytes / 1024, 'f': None},
{'v': entry.mapped_file, 'f': None},
{'v': entry.mapped_offset, 'f': None},
{'v': '[%s]' % (','.join(map(str, entry.resident_pages))), 'f': None},
]}]
return table
def _CacheObject(obj_to_store):
"""Stores an object in the server-side cache and returns its unique id."""
if len(_cached_objs) >= _CACHE_LEN:
_cached_objs.popitem(last=False)
obj_id = uuid.uuid4().hex
_cached_objs[obj_id] = obj_to_store
return str(obj_id)
def _GetCacheObject(obj_id):
"""Retrieves an object in the server-side cache by its id."""
return _cached_objs.get(obj_id)
def _StrMem(nbytes):
"""Converts a number (of bytes) into a human readable string (kb, mb)."""
UNITS = ['B', 'K', 'M', 'G']
for unit in UNITS:
if abs(nbytes) < 1024.0 or unit == UNITS[-1]:
return ('%3.1f' % nbytes).replace('.0','') + ' ' + unit
nbytes /= 1024.0
def _HttpRequestHandler(environ, start_response):
"""Parses a single HTTP request and delegates the handling through UriHandler.
This essentially wires up wsgiref.simple_server with our @UriHandler(s).
"""
path = environ['PATH_INFO']
method = environ['REQUEST_METHOD']
if method == 'POST':
req_body_size = int(environ.get('CONTENT_LENGTH', 0))
req_body = environ['wsgi.input'].read(req_body_size)
req_vars = json.loads(req_body)
else:
req_vars = urlparse.parse_qs(environ['QUERY_STRING'])
(http_code, headers, body) = UriHandler.Handle(method, path, req_vars)
start_response(http_code, headers)
return [body]
def Start(http_port):
# Load the saved backends' settings (some of them might be needed to bootstrap
# as, for instance, the adb path for the Android backend).
memory_inspector.RegisterAllBackends()
for backend in backends.ListBackends():
for k, v in _persistent_storage.LoadSettings(backend.name).iteritems():
backend.settings[k] = v
httpd = wsgiref.simple_server.make_server(
'127.0.0.1', http_port, _HttpRequestHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass # Don't print useless stack traces when the user hits CTRL-C.
|
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v2.0 User action implementations"""
import six
from keystoneauth1 import exceptions as ks_exc
from openstackclient.common import command
from openstackclient.common import utils
from openstackclient.i18n import _
class CreateUser(command.ShowOne):
"""Create new user"""
def get_parser(self, prog_name):
parser = super(CreateUser, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help=_('New user name'),
)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Default project (name or ID)'),
)
parser.add_argument(
'--password',
metavar='<password>',
help=_('Set user password'),
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help=_('Prompt interactively for password'),
)
parser.add_argument(
'--email',
metavar='<email-address>',
help=_('Set user email address'),
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help=_('Enable user (default)'),
)
enable_group.add_argument(
'--disable',
action='store_true',
help=_('Disable user'),
)
parser.add_argument(
'--or-show',
action='store_true',
help=_('Return existing user'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.project:
project_id = utils.find_resource(
identity_client.tenants,
parsed_args.project,
).id
else:
project_id = None
enabled = True
if parsed_args.disable:
enabled = False
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
try:
user = identity_client.users.create(
parsed_args.name,
parsed_args.password,
parsed_args.email,
tenant_id=project_id,
enabled=enabled,
)
except ks_exc.Conflict as e:
if parsed_args.or_show:
user = utils.find_resource(
identity_client.users,
parsed_args.name,
)
self.log.info(_('Returning existing user %s'), user.name)
else:
raise e
# NOTE(dtroyer): The users.create() method wants 'tenant_id' but
# the returned resource has 'tenantId'. Sigh.
# We're using project_id now inside OSC so there.
if 'tenantId' in user._info:
user._info.update(
{'project_id': user._info.pop('tenantId')}
)
info = {}
info.update(user._info)
return zip(*sorted(six.iteritems(info)))
class DeleteUser(command.Command):
"""Delete user(s)"""
def get_parser(self, prog_name):
parser = super(DeleteUser, self).get_parser(prog_name)
parser.add_argument(
'users',
metavar='<user>',
nargs="+",
help=_('User(s) to delete (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
for user in parsed_args.users:
user_obj = utils.find_resource(
identity_client.users,
user,
)
identity_client.users.delete(user_obj.id)
class ListUser(command.Lister):
"""List users"""
def get_parser(self, prog_name):
parser = super(ListUser, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Filter users by project (name or ID)'),
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
def _format_project(project):
if not project:
return ""
if project in project_cache.keys():
return project_cache[project].name
else:
return project
project = None
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
)
project = project.id
if parsed_args.long:
columns = (
'ID',
'Name',
'tenantId',
'Email',
'Enabled',
)
column_headers = (
'ID',
'Name',
'Project',
'Email',
'Enabled',
)
# Cache the project list
project_cache = {}
try:
for p in identity_client.tenants.list():
project_cache[p.id] = p
except Exception:
# Just forget it if there's any trouble
pass
else:
columns = column_headers = ('ID', 'Name')
data = identity_client.users.list(tenant_id=project)
if parsed_args.project:
d = {}
for s in data:
d[s.id] = s
data = d.values()
if parsed_args.long:
# FIXME(dtroyer): Sometimes user objects have 'tenant_id' instead
# of 'tenantId'. Why? Dunno yet, but until that
# is fixed we need to handle it; auth_token.py
# only looks for 'tenantId'.
for d in data:
if 'tenant_id' in d._info:
d._info['tenantId'] = d._info.pop('tenant_id')
d._add_details(d._info)
return (column_headers,
(utils.get_item_properties(
s, columns,
mixed_case_fields=('tenantId',),
formatters={'tenantId': _format_project},
) for s in data))
class SetUser(command.Command):
"""Set user properties"""
def get_parser(self, prog_name):
parser = super(SetUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help=_('User to change (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Set user name'),
)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Set default project (name or ID)'),
)
parser.add_argument(
'--password',
metavar='<user-password>',
help=_('Set user password'),
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help=_('Prompt interactively for password'),
)
parser.add_argument(
'--email',
metavar='<email-address>',
help=_('Set user email address'),
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help=_('Enable user (default)'),
)
enable_group.add_argument(
'--disable',
action='store_true',
help=_('Disable user'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
if (not parsed_args.name
and not parsed_args.name
and not parsed_args.password
and not parsed_args.email
and not parsed_args.project
and not parsed_args.enable
and not parsed_args.disable):
return
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
if parsed_args.password:
identity_client.users.update_password(
user.id,
parsed_args.password,
)
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
)
identity_client.users.update_tenant(
user.id,
project.id,
)
kwargs = {}
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.email:
kwargs['email'] = parsed_args.email
kwargs['enabled'] = user.enabled
if parsed_args.enable:
kwargs['enabled'] = True
if parsed_args.disable:
kwargs['enabled'] = False
identity_client.users.update(user.id, **kwargs)
class ShowUser(command.ShowOne):
"""Display user details"""
def get_parser(self, prog_name):
parser = super(ShowUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help=_('User to display (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
info = {}
try:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
info.update(user._info)
except ks_exc.Forbidden as e:
auth_ref = self.app.client_manager.auth_ref
if (
parsed_args.user == auth_ref.user_id or
parsed_args.user == auth_ref.username
):
# Ask for currently auth'ed project so return it
info = {
'id': auth_ref.user_id,
'name': auth_ref.username,
'project_id': auth_ref.project_id,
# True because we don't get this far if it is disabled
'enabled': True,
}
else:
raise e
if 'tenantId' in info:
info.update(
{'project_id': info.pop('tenantId')}
)
if 'tenant_id' in info:
info.update(
{'project_id': info.pop('tenant_id')}
)
return zip(*sorted(six.iteritems(info)))
|
|
# import the modules
import numpy as np
import os
import sys
import tarfile
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
url = "http://commondatastorage.googleapis.com/books1000/"
last_percent_reported = None
def download_progress_hook(count,blockSize,totalSize):
global last_percent_reported
percent = int(count*blockSize*100/totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename,expected_bytes, force = False):
if force or not os.path.exists(filename):
print("attempting to download: ", filename)
filename, _ = urlretrieve(url + filename,filename, reporthook = download_progress_hook)
print("\n Download complete!")
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print("founded and verified ", filename)
else:
raise Exception(\
"failed to verify "+ filename + ". Can you get to it with a browser?")
return filename
train_filename = maybe_download("notMNIST_large.tar.gz", 247336696)
test_filename = maybe_download("notMNIST_small.tar.gz",8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force = False):
root = os.path.splitext(os.path.splitext(filename)[0])[0]
if os.path.isdir(root) and not force:
print("%s alread present - Skipping extraction of %s." %(root,filename))
else:
print("extracting data for %s. This may take a while. Please wait." % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [\
os.path.join(root,d) for d in sorted(os.listdir(root))\
if os.path.isdir(os.path.join(root,d))]
if len(data_folders) != num_classes:
raise Exception("expected %d folders, one per class. Found %d instead." %(num_classes, len(data_folders)))
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
print train_folders
######
# Problem One
######
image_size = 28
pixel_depth = 255.0
def load_letter(folder,min_num_images):
image_files = os.listdir(folder)
dataset = np.ndarray(shape = (len(image_files),image_size,image_size),dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder,image)
try:
image_data = (ndimage.imread(image_file).astype(float)-pixel_depth/2)/pixel_depth
if image_data.shape != (image_size,image_size):
raise Exception("unexpected image shape : %s" % str(image_data.shape))
dataset[num_images,:,:] = image_data
num_images += 1
except IOError as e:
print("count not read ", image_file,': ', e, "we will skip it ")
dataset = dataset[0:num_images,:,:]
if num_images < min_num_images:
raise Exception("Many fewer images than expected: %d < %d" % (num_images, min_num_images))
print("full datasettensor: ", dataset.shape)
print("Mean: ", np.mean(dataset))
print("Standard deviation: ", np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class,force = False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
print("%s already present -skipping pickling." % set_filename)
else:
print("pickling %s." % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset,f,pickle.HIGHEST_PROTOCOL)
except Exception as e:
print("Unable to save data to ", set_filename, " :",e )
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders,1800)
# randomly select an image to display
# skipped
def make_arrays(nb_rows,img_size):
if nb_rows:
dataset = np.ndarray((nb_rows,image_size,image_size), dtype=np.float32)
labels = np.ndarray(nb_rows,dtype=np.int32)
else:
dataset,labels = None,None
return dataset,labels
def merge_datasets(pickle_files,train_size,valid_size = 0):
num_classes = len(pickle_files)
valid_dataset,valid_labels = make_arrays(valid_size,image_size)
train_dataset,train_labels = make_arrays(train_size,image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0,0
end_v,end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class + tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file,'rb') as f:
letter_set = pickle.load(f)
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class,:,:]
valid_dataset[start_v:end_v,:,:] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l,:,:]
train_dataset[start_t:end_t,:,:] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print("Unable to process data from ", pickle_file, ": ",e)
raise
return valid_dataset,valid_labels, train_dataset,train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset,valid_labels, train_dataset,train_labels = merge_datasets(\
train_datasets,train_size,valid_size)
_,_, test_dataset,test_labels = merge_datasets(test_datasets,test_size)
print("Training: ", train_dataset.shape,train_labels.shape)
print("Validation: ", valid_dataset.shape,valid_labels.shape)
print("Testing: ", test_dataset.shape,test_labels.shape)
# Originally the dataset is organized letter by letter, now let us randomly shuffle it
def randomize(dataset,labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset,train_labels = randomize(train_dataset,train_labels)
test_dataset,test_labels = randomize(test_dataset,test_labels)
valid_dataset,valid_labels = randomize(valid_dataset,valid_labels)
# next we will save the datase for later use
pickle_file = "notMNIST.pickle"
try:
f = open(pickle_file,'wb')
save = {'train_dataset': train_dataset,\
'train_labels': train_labels,\
'valid_dataset': valid_dataset,\
'valid_labels': valid_labels,\
'test_dataset': test_dataset,\
'test_labels': test_labels,}
pickle.dump(save,f,pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print("compressed pickle size", statinfo.st_size)
del train_dataset, train_labels,valid_dataset,valid_labels,test_dataset,test_labels
try:
f = open(pickle_file,'rb')
data = pickle.load(f)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print("Training: ", data['train_dataset'].shape,data['train_labels'].shape)
print("Validation: ", data['valid_dataset'].shape,data['valid_labels'].shape)
print("Testing: ", data['test_dataset'].shape,data['test_labels'].shape)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class TaskTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks(sid="WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Tasks/WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"age": 25200,
"assignment_status": "pending",
"attributes": "{\\"body\\": \\"hello\\"}",
"date_created": "2014-05-14T18:50:02Z",
"date_updated": "2014-05-15T07:26:06Z",
"priority": 0,
"reason": "Test Reason",
"sid": "WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_queue_sid": "WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_channel_sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_channel_unique_name": "task-channel",
"timeout": 60,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow_sid": "WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow_friendly_name": "Test Workflow",
"task_queue_friendly_name": "Test Queue",
"addons": "{}",
"links": {
"task_queue": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskQueues/WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workflows/WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"reservations": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks(sid="WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks(sid="WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Tasks/WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"age": 25200,
"assignment_status": "pending",
"attributes": "{\\"body\\": \\"hello\\"}",
"date_created": "2014-05-14T18:50:02Z",
"date_updated": "2014-05-15T07:26:06Z",
"priority": 0,
"reason": "Test Reason",
"sid": "WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_queue_sid": "WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_channel_sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_channel_unique_name": "task-channel",
"timeout": 60,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow_sid": "WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow_friendly_name": "Test Workflow",
"task_queue_friendly_name": "Test Queue",
"addons": "{}",
"links": {
"task_queue": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskQueues/WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workflows/WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"reservations": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks(sid="WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks(sid="WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Tasks/WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks(sid="WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks.list()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Tasks',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks?TaskQueueSid=WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Priority=1&TaskQueueName=task_queue_name&WorkflowName=workflow_name&WorkflowSid=WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&AssignmentStatus=pending%2Creserved&PageSize=50&Page=0",
"key": "tasks",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks?TaskQueueSid=WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Priority=1&TaskQueueName=task_queue_name&WorkflowName=workflow_name&WorkflowSid=WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&AssignmentStatus=pending%2Creserved&PageSize=50&Page=0"
},
"tasks": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"age": 25200,
"assignment_status": "pending",
"attributes": "{\\"body\\": \\"hello\\"}",
"date_created": "2014-05-14T14:26:54Z",
"date_updated": "2014-05-15T16:03:42Z",
"priority": 0,
"reason": "Test Reason",
"sid": "WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_queue_sid": "WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_channel_sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_channel_unique_name": "task-channel",
"timeout": 60,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow_sid": "WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow_friendly_name": "Test Workflow",
"task_queue_friendly_name": "Test Queue",
"addons": "{}",
"links": {
"task_queue": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskQueues/WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workflows/WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"reservations": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations"
}
}
]
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks?TaskQueueSid=WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Priority=1&TaskQueueName=task_queue_name&WorkflowName=workflow_name&WorkflowSid=WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&AssignmentStatus=pending&PageSize=50&Page=0",
"key": "tasks",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks?TaskQueueSid=WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Priority=1&TaskQueueName=task_queue_name&WorkflowName=workflow_name&WorkflowSid=WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&AssignmentStatus=pending&PageSize=50&Page=0"
},
"tasks": []
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks.list()
self.assertIsNotNone(actual)
def test_read_assignment_status_multiple_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks?TaskQueueSid=WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Priority=1&TaskQueueName=task_queue_name&WorkflowName=workflow_name&WorkflowSid=WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&AssignmentStatus=pending&PageSize=50&Page=0",
"key": "tasks",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks?TaskQueueSid=WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Priority=1&TaskQueueName=task_queue_name&WorkflowName=workflow_name&WorkflowSid=WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&AssignmentStatus=pending&PageSize=50&Page=0"
},
"tasks": []
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks.list()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks.create()
self.holodeck.assert_has_request(Request(
'post',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Tasks',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"age": 25200,
"assignment_status": "pending",
"attributes": "{\\"body\\": \\"attributes\\"}",
"date_created": "2014-05-14T18:50:02Z",
"date_updated": "2014-05-15T07:26:06Z",
"priority": 1,
"reason": "Test Reason",
"sid": "WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_queue_sid": "WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_channel_sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_channel_unique_name": "unique",
"timeout": 60,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow_sid": "WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow_friendly_name": "Example Workflow",
"task_queue_friendly_name": "Example Task Queue",
"addons": "{}",
"links": {
"task_queue": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskQueues/WQaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workflow": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workflows/WFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"reservations": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tasks.create()
self.assertIsNotNone(actual)
|
|
# Copyright (C) 2003 Python Software Foundation
import unittest
import shutil
import tempfile
import sys
import stat
import os
import os.path
from test import support
from test.support import TESTFN
from os.path import splitdrive
from distutils.spawn import find_executable, spawn
from shutil import (_make_tarball, _make_zipfile, make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats, Error, unpack_archive,
register_unpack_format, RegistryError,
unregister_unpack_format, get_unpack_formats)
import tarfile
import warnings
from test import support
from test.support import TESTFN, check_warnings, captured_stdout
try:
import bz2
BZ2_SUPPORTED = True
except ImportError:
BZ2_SUPPORTED = False
TESTFN2 = TESTFN + "2"
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zlib
except ImportError:
zlib = None
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
class TestShutil(unittest.TestCase):
def setUp(self):
super(TestShutil, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TestShutil, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def write_file(self, path, content='xxx'):
"""Writes a file in the given path.
path can be a string or a sequence.
"""
if isinstance(path, (list, tuple)):
path = os.path.join(*path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(OSError, shutil.rmtree, filename)
# See bug #1071513 for why we don't run this on cygwin
# and bug #1076467 for why we don't run this as root.
if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
and not (hasattr(os, 'geteuid') and os.geteuid() == 0)):
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.childpath = os.path.join(TESTFN, 'a')
f = open(self.childpath, 'w')
f.close()
old_dir_mode = os.stat(TESTFN).st_mode
old_child_mode = os.stat(self.childpath).st_mode
# Make unwritable.
os.chmod(self.childpath, stat.S_IREAD)
os.chmod(TESTFN, stat.S_IREAD)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 2,
"Expected call to onerror function did not happen.")
# Make writable again.
os.chmod(TESTFN, old_dir_mode)
os.chmod(self.childpath, old_child_mode)
# Clean up.
shutil.rmtree(TESTFN)
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 400, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState == 0:
if func is os.remove:
self.assertEqual(arg, self.childpath)
else:
self.assertIs(func, os.listdir,
"func must be either os.remove or os.listdir")
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 2
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.fdopen(handle).close()
self.assertRaises(OSError, shutil.rmtree, path)
os.remove(path)
def _write_data(self, path, data):
f = open(path, "w")
f.write(data)
f.close()
def test_copytree_simple(self):
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
self._write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
self._write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
try:
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_data(os.path.join(dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_data(os.path.join(dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
finally:
for path in (
os.path.join(src_dir, 'test.txt'),
os.path.join(dst_dir, 'test.txt'),
os.path.join(src_dir, 'test_dir', 'test.txt'),
os.path.join(dst_dir, 'test_dir', 'test.txt'),
):
if os.path.exists(path):
os.remove(path)
for path in (src_dir,
os.path.dirname(dst_dir)
):
if os.path.exists(path):
shutil.rmtree(path)
def test_copytree_with_exclude(self):
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
try:
dst_dir = join(tempfile.mkdtemp(), 'destination')
self._write_data(join(src_dir, 'test.txt'), '123')
self._write_data(join(src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
self._write_data(join(src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
self._write_data(join(src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
self._write_data(join(src_dir, 'test_dir2', 'subdir', 'test.txt'),
'456')
self._write_data(join(src_dir, 'test_dir2', 'subdir2', 'test.py'),
'456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
@unittest.skipUnless(hasattr(os, 'link'), 'requires os.link')
def test_dont_copy_file_onto_link_to_itself(self):
# Temporarily disable test on Windows.
if os.name == 'nt':
return
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w') as f:
f.write('cheddar')
os.link(src, dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
def test_dont_copy_file_onto_symlink_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w') as f:
f.write('cheddar')
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
if hasattr(os, "mkfifo"):
# Issue #3002: copyfile and copytree block indefinitely on named pipes
def test_copyfile_named_pipe(self):
os.mkfifo(TESTFN)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
@support.skip_unless_symlink
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
os.mkfifo(pipe)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
def test_copytree_special_func(self):
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
self._write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
self._write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
copied = []
def _copy(src, dst):
copied.append((src, dst))
shutil.copytree(src_dir, dst_dir, copy_function=_copy)
self.assertEqual(len(copied), 2)
@support.skip_unless_symlink
def test_copytree_dangling_symlinks(self):
# a dangling symlink raises an error at the end
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
os.symlink('IDONTEXIST', os.path.join(src_dir, 'test.txt'))
os.mkdir(os.path.join(src_dir, 'test_dir'))
self._write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
self.assertRaises(Error, shutil.copytree, src_dir, dst_dir)
# a dangling symlink is ignored with the proper flag
dst_dir = os.path.join(self.mkdtemp(), 'destination2')
shutil.copytree(src_dir, dst_dir, ignore_dangling_symlinks=True)
self.assertNotIn('test.txt', os.listdir(dst_dir))
# a dangling symlink is copied if symlinks=True
dst_dir = os.path.join(self.mkdtemp(), 'destination3')
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertIn('test.txt', os.listdir(dst_dir))
@unittest.skipUnless(zlib, "requires zlib")
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with captured_stdout() as s:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
_make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
self.assertTrue(os.path.exists(tarball))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@unittest.skipUnless(zlib, "Requires zlib")
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir, root_dir, base_name = self._create_files()
base_name = os.path.join(self.mkdtemp() , 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.exists(res))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = _make_tarball(base_name, 'dist', compress=None,
owner=owner, group=group)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
self.assertTrue(os.path.exists(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
def _compare_dirs(self, dir1, dir2):
# check that dir1 and dir2 are equivalent,
# return the diff
diff = []
for root, dirs, files in os.walk(dir1):
for file_ in files:
path = os.path.join(root, file_)
target_path = os.path.join(dir2, os.path.split(path)[-1])
if not os.path.exists(target_path):
diff.append(file_)
return diff
@unittest.skipUnless(zlib, "Requires zlib")
def test_unpack_archive(self):
formats = ['tar', 'gztar', 'zip']
if BZ2_SUPPORTED:
formats.append('bztar')
for format in formats:
tmpdir = self.mkdtemp()
base_dir, root_dir, base_name = self._create_files()
tmpdir2 = self.mkdtemp()
filename = make_archive(base_name, format, root_dir, base_dir)
# let's try to unpack it now
unpack_archive(filename, tmpdir2)
diff = self._compare_dirs(tmpdir, tmpdir2)
self.assertEqual(diff, [])
def test_unpack_registery(self):
formats = get_unpack_formats()
def _boo(filename, extract_dir, extra):
self.assertEqual(extra, 1)
self.assertEqual(filename, 'stuff.boo')
self.assertEqual(extract_dir, 'xx')
register_unpack_format('Boo', ['.boo', '.b2'], _boo, [('extra', 1)])
unpack_archive('stuff.boo', 'xx')
# trying to register a .boo unpacker again
self.assertRaises(RegistryError, register_unpack_format, 'Boo2',
['.boo'], _boo)
# should work now
unregister_unpack_format('Boo')
register_unpack_format('Boo2', ['.boo'], _boo)
self.assertIn(('Boo2', ['.boo'], ''), get_unpack_formats())
self.assertNotIn(('Boo', ['.boo'], ''), get_unpack_formats())
# let's leave a clean state
unregister_unpack_format('Boo2')
self.assertEqual(get_unpack_formats(), formats)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
# Try to create a dir in the current directory, hoping that it is
# not located on the same filesystem as the system tmp dir.
try:
self.dir_other_fs = tempfile.mkdtemp(
dir=os.path.dirname(__file__))
self.file_other_fs = os.path.join(self.dir_other_fs,
filename)
except OSError:
self.dir_other_fs = None
with open(self.src_file, "wb") as f:
f.write(b"spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir, self.dir_other_fs):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.file_other_fs,
self.file_other_fs)
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.dir_other_fs,
self.file_other_fs)
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
dst_dir = tempfile.mktemp(dir=self.dir_other_fs)
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_dir(self.src_dir, self.dir_other_fs,
os.path.join(self.dir_other_fs, os.path.basename(self.src_dir)))
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
class TestCopyFile(unittest.TestCase):
_delete = False
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise IOError("Cannot close")
return self._suppress_at_exit
def tearDown(self):
if self._delete:
del shutil.open
def _set_shutil_open(self, func):
shutil.open = func
self._delete = True
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise IOError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError, shutil.copyfile, 'srcfile', 'destfile')
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise IOError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError,
shutil.copyfile, 'srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
def test_main():
support.run_unittest(TestShutil, TestMove, TestCopyFile)
if __name__ == '__main__':
test_main()
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import time
from mock import Mock
from swift.proxy.controllers import InfoController
from swift.proxy.server import Application as ProxyApp
from swift.common import utils
from swift.common.swob import Request, HTTPException
class TestInfoController(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def get_controller(self, expose_info=None, disallowed_sections=None,
admin_key=None):
disallowed_sections = disallowed_sections or []
app = Mock(spec=ProxyApp)
return InfoController(app, None, expose_info,
disallowed_sections, admin_key)
def start_response(self, status, headers):
self.got_statuses.append(status)
for h in headers:
self.got_headers.append({h[0]: h[1]})
def test_disabled_info(self):
controller = self.get_controller(expose_info=False)
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('403 Forbidden', str(resp))
def test_get_info(self):
controller = self.get_controller(expose_info=True)
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertNotIn('admin', info)
self.assertIn('foo', info)
self.assertIn('bar', info['foo'])
self.assertEqual(info['foo']['bar'], 'baz')
def test_options_info(self):
controller = self.get_controller(expose_info=True)
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'})
resp = controller.OPTIONS(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
self.assertIn('Allow', resp.headers)
def test_get_info_cors(self):
controller = self.get_controller(expose_info=True)
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'},
headers={'Origin': 'http://example.com'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertNotIn('admin', info)
self.assertIn('foo', info)
self.assertIn('bar', info['foo'])
self.assertEqual(info['foo']['bar'], 'baz')
self.assertIn('Access-Control-Allow-Origin', resp.headers)
self.assertIn('Access-Control-Expose-Headers', resp.headers)
def test_head_info(self):
controller = self.get_controller(expose_info=True)
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'HEAD'})
resp = controller.HEAD(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
def test_disallow_info(self):
controller = self.get_controller(expose_info=True,
disallowed_sections=['foo2'])
utils._swift_info = {'foo': {'bar': 'baz'},
'foo2': {'bar2': 'baz2'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
req = Request.blank(
'/info', environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertIn('foo', info)
self.assertIn('bar', info['foo'])
self.assertEqual(info['foo']['bar'], 'baz')
self.assertNotIn('foo2', info)
def test_disabled_admin_info(self):
controller = self.get_controller(expose_info=True, admin_key='')
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = utils.get_hmac('GET', '/info', expires, '')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('403 Forbidden', str(resp))
def test_get_admin_info(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertIn('admin', info)
self.assertIn('qux', info['admin'])
self.assertIn('quux', info['admin']['qux'])
self.assertEqual(info['admin']['qux']['quux'], 'corge')
def test_head_admin_info(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'HEAD'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
expires = int(time.time() + 86400)
sig = utils.get_hmac('HEAD', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'HEAD'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
def test_get_admin_info_invalid_method(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = utils.get_hmac('HEAD', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
def test_get_admin_info_invalid_expires(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = 1
sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
expires = 'abc'
sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
def test_get_admin_info_invalid_path(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = utils.get_hmac('GET', '/foo', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
def test_get_admin_info_invalid_key(self):
controller = self.get_controller(expose_info=True,
admin_key='secret-admin-key')
utils._swift_info = {'foo': {'bar': 'baz'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = utils.get_hmac('GET', '/foo', expires, 'invalid-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('401 Unauthorized', str(resp))
def test_admin_disallow_info(self):
controller = self.get_controller(expose_info=True,
disallowed_sections=['foo2'],
admin_key='secret-admin-key')
utils._swift_info = {'foo': {'bar': 'baz'},
'foo2': {'bar2': 'baz2'}}
utils._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
path, environ={'REQUEST_METHOD': 'GET'})
resp = controller.GET(req)
self.assertIsInstance(resp, HTTPException)
self.assertEqual('200 OK', str(resp))
info = json.loads(resp.body)
self.assertNotIn('foo2', info)
self.assertIn('admin', info)
self.assertIn('disallowed_sections', info['admin'])
self.assertIn('foo2', info['admin']['disallowed_sections'])
self.assertIn('qux', info['admin'])
self.assertIn('quux', info['admin']['qux'])
self.assertEqual(info['admin']['qux']['quux'], 'corge')
if __name__ == '__main__':
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
# TODO(pekowsk): Remove import cfg and below comment in Havana.
# This import should no longer be needed when the amqp_rpc_single_reply_queue
# option is removed.
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
# TODO(pekowski): Remove this option in Havana.
amqp_opts = [
cfg.BoolOpt('amqp_rpc_single_reply_queue',
default=False,
help='Enable a fast single reply queue if using AMQP based '
'RPC like RabbitMQ or Qpid.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the teatDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new
Connection, or one from a pool. The function will also catch
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so
forth without making the caller be responsible for catching
them. If possible the function makes sure to return a
connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool"""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self"""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance"""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
""" Connection class for RPC replies / callbacks """
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('no calling threads waiting for msg_id : %s'
', message : %s') % (msg_id, message_data))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager used by
the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback to allow it to be invoked in a green
thread.
"""
def __init__(self, conf, callback, connection_pool):
"""
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data)
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version', None)
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
def _process_data(self, ctxt, version, method, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, **args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending" flag"""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
LOG.exception(_('Timed out waiting for RPC response.'))
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
#TODO(pekowski): Remove MulticallWaiter() in Havana.
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(timeout=timeout or
conf.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
self._conf = conf
self.msg_id_cache = _MsgIdCache()
def done(self):
if self._done:
return
self._done = True
self._iterator.close()
self._iterator = None
self._connection.close()
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
self._result = data['result']
def __iter__(self):
"""Return a result until we get a 'None' response from consumer"""
if self._done:
raise StopIteration
while True:
try:
self._iterator.next()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
result = self._result
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection"""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
# TODO(pekowski): Remove all these comments in Havana.
# For amqp_rpc_single_reply_queue = False,
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
# For amqp_rpc_single_reply_queue = True,
# The 'with' statement is mandatory for closing the connection
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
# TODO(pekowski): Remove this flag and the code under the if clause
# in Havana.
if not conf.amqp_rpc_single_reply_queue:
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
else:
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg, force_envelope=True)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
|
|
import datetime as dt
import logging
import queue
from abc import ABCMeta, abstractmethod
from typing import Dict, Iterable, Union
import numpy as np
import pandas as pd
import pytech.utils as utils
from pytech.decorators.decorators import memoize, lazy_property
from pytech.backtest.event import MarketEvent
from pytech.data.reader import BarReader
class DataHandler(metaclass=ABCMeta):
CHUNK_SIZE = 'D'
def __init__(self,
events: queue.Queue,
tickers: Iterable[str],
start_date: dt.datetime,
end_date: dt.datetime,
asset_lib_name: str = 'pytech.bars',
market_lib_name: str = 'pytech.market'):
"""
All child classes MUST call this constructor.
:param events: The universal queue.
:param tickers: An iterable of tickers. This will create the asset
universe or all the assets that will available to be traded.
:param start_date: The start of the sim.
:param end_date: The end of the sim.
:param asset_lib_name: The name of the mongo library where asset
bars are stored. Defaults to *pytech.bars*
:param market_lib_name: The name of the mongo library where market
bars are stored. Defaults to *pytech.market*
"""
self.logger = logging.getLogger(__name__)
self.events = events
self.tickers = []
self.tickers.extend(tickers)
# self._ticker_data = {}
self.latest_ticker_data = {}
self.continue_backtest = True
self.start_date = utils.parse_date(start_date)
self.end_date = utils.parse_date(end_date)
self.asset_lib_name = asset_lib_name
self.market_lib_name = market_lib_name
self.asset_reader = BarReader(asset_lib_name)
self.market_reader = BarReader(market_lib_name)
# self._populate_ticker_data()
@lazy_property
def ticker_data(self):
return self._populate_ticker_data()
@abstractmethod
def get_latest_bar(self, ticker: str):
"""
Return the latest bar updated.
:param str ticker: The ticker to retrieve the bar for.
:return: The latest update bar for the given ticker.
"""
raise NotImplementedError('Must implement get_latest_bar()')
@abstractmethod
def get_latest_bars(self, ticker: str, n: int = 1):
"""
Returns the last **n** bars from the latest_symbol list,
or fewer if less bars available.
:param ticker:
:param int n: The number of bars.
:return:
"""
raise NotImplementedError('Must implement get_latest_bars()')
@abstractmethod
def get_latest_bar_dt(self, ticker: str):
"""
Return the datetime for the last bar for the given ticker.
:param str ticker: The ticker of the asset.
:return: The datetime of the last bar for the given asset
:rtype: dt.datetime
"""
raise NotImplementedError('Must implement get_latest_bar_dt()')
@abstractmethod
def get_latest_bar_value(self, ticker: str, val_type, n=1):
"""
Return the last **n** bars from the latest_symbol list.
:param ticker:
:param val_type:
:param n:
:return:
"""
raise NotImplementedError('Must implement get_latest_bar_value()')
@abstractmethod
def update_bars(self):
"""
Pushes the latest bar to the latest symbol structure for all symbols
in the symbol list.
"""
raise NotImplementedError('Must implement update_bars()')
@abstractmethod
def _populate_ticker_data(self):
"""
Populate the ticker_data dict with a pandas OHLCV
df as the value and the ticker as the key.
This will get called on ``__init__`` and is **NOT** intended to ever
be called directly by child classes.
"""
raise NotImplementedError('Must implement _populate_ticker_data()')
class Bars(DataHandler):
def __init__(self,
events: queue.Queue,
tickers: Iterable,
start_date: dt.datetime,
end_date: dt.datetime,
source: str = 'google',
asset_lib_name: str = 'pytech.bars',
market_lib_name: str = 'pytech.market'):
self.source = source
super().__init__(events, tickers, start_date, end_date,
asset_lib_name, market_lib_name)
def _populate_ticker_data(self) -> Dict[str, Iterable[pd.Series]]:
"""
Populate the ticker_data dict with a pandas OHLCV
df as the value and the ticker as the key.
"""
comb_index = None
df_dict = self._get_data()
out = {}
for t in self.tickers:
out[t] = df_dict[t]
# TODO needed?
if comb_index is None:
comb_index = out[t].index
else:
comb_index.union(out[t].index)
self.latest_ticker_data[t] = []
for t in self.tickers:
out[t] = out[t].iterrows()
# self.ticker_data[t] = (self.ticker_data[t].iterrows())
return out
@memoize
def make_agg_df(self, col: str = utils.CLOSE_COL,
market_ticker: Union[str, None] = 'SPY') -> pd.DataFrame:
"""
Make a df that contains all of the ticker data and write it the db.
This is used to do analysis like correlation, so a market ticker should
be added.
:param col: The column to use to create the aggregate DF.
:param market_ticker: The ticker that will be used to represent the
market. If None is passed then no market_ticker will be used.
:return: The aggregate data frame.
"""
agg_df = pd.DataFrame()
df_dict = self._get_data()
if market_ticker is not None and market_ticker not in self.tickers:
# get the market data if it has not already been fetched
market_df = self.market_reader.get_data(market_ticker, columns=col)
agg_df[market_ticker] = market_df[col]
for t in self.tickers:
temp_df = df_dict[t]
agg_df[t] = temp_df[col]
return agg_df
@memoize
def _get_data(self,
tickers: Iterable[str] = None,
**kwargs) -> Dict[str, pd.DataFrame]:
"""
Get the data.
:param tickers: any extra tickers to get data for.
:return:
"""
if tickers is not None:
tickers = self.tickers
tickers.extend(tickers)
else:
tickers = self.tickers
return self.asset_reader.get_data(tickers,
source=self.source,
start=self.start_date,
end=self.end_date,
**kwargs)
def _get_new_bar(self, ticker: str):
"""
Get the latest bar from the data feed and return it as a tuple.
:return: bar
"""
# yield from self.ticker_data
for bar in self.ticker_data[ticker]:
yield bar
def get_latest_bar(self, ticker: str):
try:
bars_list = self.latest_ticker_data[ticker]
except KeyError:
self.logger.exception(
f'{ticker} is not available in the given data set.')
raise
else:
return bars_list[-1]
def get_latest_bars(self, ticker: str, n: int = 1):
"""
Returns the last ``n`` bars from the latest_ticker_data.
If there is less than ``n`` bars available then n-k is returned.
:param str ticker: The ticker of the asset for which the bars are
needed.
:param int n: The number of bars to return.
(default: 1)
:return: A list of bars.
"""
try:
bars_list = self.latest_ticker_data[ticker]
except KeyError:
self.logger.exception(
f'Could not find {ticker} in latest_ticker_data')
raise
else:
return bars_list[-n:]
def get_latest_bar_dt(self, ticker) -> dt.datetime:
try:
bars_list = self.latest_ticker_data[ticker]
except KeyError:
self.logger.exception(
f'Could not find {ticker} in latest_ticker_data')
raise
else:
return utils.dt_utils.parse_date(bars_list[-1].name)
def get_latest_bar_value(self, ticker, val_type, n=1):
"""
Get the last ``n`` bars but return a series containing only the
``val_type`` requested.
:param str ticker: The ticker of the asset for which the bars are
needed.
:param val_type:
:param n:
:return:
"""
try:
bars_list = self.get_latest_bars(ticker, n)
except KeyError:
self.logger.exception(
f'Could not find {ticker} in latest_ticker_data')
raise
else:
return np.array([getattr(bar, val_type) for bar in bars_list])
def update_bars(self):
for ticker in self.tickers:
try:
bar = next(self._get_new_bar(ticker))
# bar is a tuple and we only care about the 2nd value in it.
bar = bar[1]
except StopIteration:
self.continue_backtest = False
else:
if bar is not None:
self.latest_ticker_data[ticker].append(bar)
self.events.put(MarketEvent())
|
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
import os
import shutil
import time
from decimal import Decimal
from threading import Thread
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name, timeout=60.):
global got_loading_error
t0 = time.time()
while time.time() - t0 < timeout and not got_loading_error:
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == - \
4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
# Add a small sleep to avoid CPU exhaustion in the unlikely case
# the race never happens.
time.sleep(0.001)
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
def data_dir(*p): return os.path.join(node.datadir, self.chain, *p)
def wallet_dir(*p): return data_dir('wallets', *p)
def wallet(name): return node.get_wallet_rpc(name)
def wallet_file(name):
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, self.wallet_data_filename)
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(),
{'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name,
self.wallet_data_filename)),
True)
# create symlink to verify wallet directory path can be referenced
# through symlink
if os.name != 'nt':
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
os.rename(wallet_dir(self.default_wallet_name, self.wallet_data_filename),
wallet_dir("w8"))
# create another dummy wallet for use in testing backups later
self.start_node(
0, ["-nowallet", "-wallet=" + self.default_wallet_name])
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_dir(self.default_wallet_name, self.wallet_data_filename),
empty_wallet)
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly
# '' - to verify default wallet file is created correctly
wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5',
os.path.join(self.options.tmpdir, 'extern/w6'),
'w7_symlink', 'w8', self.default_wallet_name]
if os.name == 'nt':
wallet_names.remove('w7_symlink')
extra_args = ['-nowallet'] + \
['-wallet={}'.format(n) for n in wallet_names]
self.start_node(0, extra_args)
assert_equal(
sorted(map(lambda w: w['name'],
self.nodes[0].listwalletdir()['wallets'])),
[self.default_wallet_name, os.path.join('sub', 'w5'), 'w', 'w1',
'w2', 'w3', 'w7', 'w7_symlink', 'w8'])
assert_equal(set(node.listwallets()), set(wallet_names))
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
# should not initialize if wallet path can't be created
exp_stderr = "boost::filesystem::create_director"
self.nodes[0].assert_start_raises_init_error(
['-wallet=w8/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.nodes[0].assert_start_raises_init_error(
['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(
['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(
['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(
['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
if os.name != 'nt':
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(
['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(
['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(
['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0, ['-nowallet', '-wallet=w4', '-wallet=w5'])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the
# walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-wallet=w4', '-wallet=w5',
'-walletdir=' + data_dir()])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50000000)
competing_wallet_dir = os.path.join(
self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-walletdir=' + competing_wallet_dir])
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\"!"
self.nodes[1].assert_start_raises_init_error(
['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'],
self.nodes[0].listwalletdir()['wallets'])),
[self.default_wallet_name, os.path.join('sub', 'w5'), 'w',
'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy'])
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'],
50000000 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded",
wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified (must request wallet RPC through /wallet/<filename> uri-path).",
node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100000000)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1000000)
w1.sendtoaddress(w3.getnewaddress(), 2000000)
w1.sendtoaddress(w4.getnewaddress(), 3000000)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1000000)
assert_equal(w3.getbalance(), 2000000)
assert_equal(w4.getbalance(), 3000000)
batch = w1.batch([w1.getblockchaininfo.get_request(),
w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(1000)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('1000.00'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(
-18,
"No wallet is loaded. Load a wallet using loadwallet or create a new"
" one with createwallet. (Note: A default wallet is no longer "
"automatically created)",
node.getwalletinfo
)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19,
"Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(
node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest",
"wallets", "wallets")
assert_raises_rpc_error(
-18,
"Wallet file verification failed. Failed to load database path "
"'{}'. Path does not exist.".format(path),
self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(
self.options.tmpdir,
"node0",
"regtest",
"wallets",
"w1",
self.wallet_data_filename)
assert_raises_rpc_error(
-4,
"Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(
path),
self.nodes[0].loadwallet,
wallet_names[0])
# Fail to load duplicate wallets by different ways (directory and
# filepath)
path = os.path.join(
self.options.tmpdir,
"node0",
"regtest",
"wallets",
self.wallet_data_filename)
assert_raises_rpc_error(
-4,
"Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(
path),
self.nodes[0].loadwallet,
self.wallet_data_filename)
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid",
self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another.
# Test this twice to make sure that we don't re-introduce
# https://github.com/bitcoin/bitcoin/issues/14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid",
self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
if os.name != 'nt':
assert_raises_rpc_error(
-4,
"Wallet file verification failed. Invalid -wallet path 'w8_symlink'",
self.nodes[0].loadwallet,
'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a
# wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest",
"wallets", "empty_wallet_dir")
assert_raises_rpc_error(
-18,
"Wallet file verification failed. Failed to load database "
"path '{}'. Data is not in recognized format.".format(path),
self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest",
"wallets", "w2")
assert_raises_rpc_error(
-4,
f"Failed to create database path '{path}'. Database already exists.",
self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected",
self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded",
self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded",
node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Cannot unload the requested wallet",
w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(
-18,
"No wallet is loaded. Load a wallet using loadwallet or create a new"
" one with createwallet. (Note: A default wallet is no longer "
"automatically created)",
self.nodes[0].getwalletinfo
)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'],
self.nodes[0].listwalletdir()['wallets'])),
[self.default_wallet_name, os.path.join('sub', 'w5'), 'w',
'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy',
'w9'])
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
assert_raises_rpc_error(-4, "Error initializing wallet database environment",
self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
|
from __future__ import unicode_literals
from django.template import Template
from django.template.loader import render_to_string
from django.utils.html import conditional_escape
from crispy_forms.compatibility import text_type
from crispy_forms.utils import flatatt
from crispy_forms.helper import FormHelper
from crispy_forms.layout import LayoutObject, Layout, Field, Button
from crispy_forms.bootstrap import ContainerHolder, Container
TEMPLATE_PACK = 'vds/forms'
class TabHolder(ContainerHolder):
"""
TabHolder object. It wraps Tab objects in a container. Requires bootstrap-tab.js::
TabHolder(
Tab('form_field_1', 'form_field_2'),
Tab('form_field_3')
)
"""
template = '%s/layout/tab.html'
def render(self, form, form_style, context, template_pack=None, **kwargs):
for tab in self.fields:
tab.active = False
# Open the group that should be open.
self.open_target_group_for_form(form)
content = self.get_rendered_fields(form, form_style, context, template_pack)
links = ''.join(tab.render_link(form, template_pack) for tab in self.fields)
context.update({
'tabs': self,
'links': links,
'content': content
})
template = self.get_template_name(template_pack)
return render_to_string(template, context.flatten())
class Tab(Container):
"""
Tab object. It wraps fields in a div whose default class is "tab-pane" and
takes a name as first argument. Example::
Tab('tab_name', 'form_field_1', 'form_field_2', 'form_field_3')
"""
css_class = 'vds-tabs--default__content'
link_template = '%s/layout/tab-link.html'
errors = True
def _has_errors(self, form):
for field in self.fields:
if field.errors:
return True
else:
return False
def render_link(self, form, template_pack=TEMPLATE_PACK, **kwargs):
"""
Render the link for the tab-pane. It must be called after render so css_class is updated
with active if needed.
"""
self.errors = self._has_errors(form=form)
link_template = self.link_template % template_pack
return render_to_string(link_template, {'link': self})
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
if self.active:
self.css_class = 'vds-tabs--default__content vds-show'
else:
self.css_class = 'vds-tabs--default__content vds-hide'
return super(Container, self).render(form, form_style, context, template_pack)
class Fieldset(LayoutObject):
"""
Layout object. It wraps fields in a <fieldset>
Example::
Fieldset("Text for the legend",
'form_field_1',
'form_field_2'
)
The first parameter is the text for the fieldset legend. This text is context aware,
so you can do things like::
Fieldset("Data for {{ user.username }}",
'form_field_1',
'form_field_2'
)
"""
template = "%s/layout/fieldset.html"
def _has_errors(self, form):
for field in self.fields:
if field.errors:
return True
else:
return False
def __init__(self, legend, *fields, **kwargs):
self.fields = list(fields)
self.legend = legend
self.css_class = kwargs.pop('css_class', '')
self.css_id = kwargs.pop('css_id', None)
self.template = kwargs.pop('template', self.template)
self.flat_attrs = flatatt(kwargs)
self.errors = False
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
fields = self.get_rendered_fields(form, form_style, context, template_pack, **kwargs)
self.errors = self._has_errors(form=form)
legend = ''
if self.legend:
legend = '%s' % Template(text_type(self.legend)).render(context)
template = self.get_template_name(template_pack)
return render_to_string(
template,
{'fieldset': self, 'legend': legend, 'fields': fields, 'form_style': form_style}
)
class Field(LayoutObject):
"""
Layout object, It contains one field name, and you can add attributes to it easily.
For setting class attributes, you need to use `css_class`, as `class` is a Python keyword.
Example::
Field('field_name', style="color: #333;", css_class="whatever", id="field_name")
"""
template = "%s/layout/field.html"
def __init__(self, *args, **kwargs):
self.fields = list(args)
self.attrs = {}
if not hasattr(self, 'attrs'):
self.attrs = {}
if 'css_class' in kwargs:
if 'class' in self.attrs:
self.attrs['class'] += " %s" % kwargs.pop('css_class')
else:
self.attrs['class'] = kwargs.pop('css_class')
self.wrapper_class = kwargs.pop('wrapper_class', None)
self.template = kwargs.pop('template', self.template)
# We use kwargs as HTML attributes, turning data_id='test' into data-id='test'
self.attrs.update(dict([(k.replace('_', '-'), conditional_escape(v)) for k, v in kwargs.items()]))
self.errors = False
def _has_errors(self, form):
if self.fields[0] in form.errors.keys():
return True
else:
return False
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, extra_context=None, **kwargs):
if extra_context is None:
extra_context = {}
if hasattr(self, 'wrapper_class'):
extra_context['wrapper_class'] = self.wrapper_class
template = self.get_template_name(template_pack)
self.errors = self._has_errors(form=form)
field_class = form.fields.get(self.fields[0])
if 'choices' in field_class.widget.__dict__:
self.attrs['class'] = 'vds-select'
else:
self.attrs['class'] = 'vds-input'
return self.get_rendered_fields(
form, form_style, context, template_pack,
template=template, attrs=self.attrs, extra_context=extra_context,
**kwargs
)
class Inline(LayoutObject):
"""
"""
def __init__(self, name, **kwargs):
self.fields = list()
self.name = name
self.errors = False
super(LayoutObject, self).__init__(**kwargs)
def render(self, form, form_style, context, template_pack=None, **kwargs):
try:
for inline_admin_formset in context['inline_admin_formsets']:
if self.name == inline_admin_formset.opts.__class__.__name__:
context['inline_admin_formset'] = inline_admin_formset
self.template = inline_admin_formset.opts.template
return render_to_string(self.template, context)
except:
for inline_formset in context['inline_formsets']:
if self.name == inline_formset.name:
formset = inline_formset.get_formset()
context['inline_formset'] = formset
self.template = 'vds/forms/inlines/base.html'
return render_to_string(self.template, context)
# class Button(Button):
# """
# Button("id", "name", "type")
# Button("id-cnl", "Cancel", "/sss/")
#
# Button('id-save', _("Save"), "submit", "primary")
# Button('id-cancel', _("Cancel"), "/action/delete", style="primary")
#
# Button('id-cancel', _("Cancel"), "/action/delete", style="danger", icon="danger")
# """
# template = '%s/layout/button.html'
# input_type = 'button'
#
# def __init__(self, name, value, link, style='secondary', *args, **kwargs):
#
# super(Button, self).__init__(name=name, value=value, *args, **kwargs)
#
# # Link
# if link == 'submit':
# self.input_type = 'submit'
# else:
# self.input_type = 'a'
# self.attrs = {
# 'href': link
# }
#
# # Style
# self.field_classes = 'vds-button'
# if style == 'secondary':
# self.field_classes += ' vds-button--neutral'
#
# if style == 'primary':
# self.field_classes += ' vds-button--brand'
#
# if style == 'danger':
# self.field_classes += ' vds-button--destructive'
#
# # Icon
# try:
# self.icon = kwargs['icon']
# except:
# self.icon = False
#
# def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
# """
# Renders an `<button />` if container is used as a Layout object.
# Input button value can be a variable in context.
# """
# self.value = Template(text_type(self.value)).render(context)
# template = self.get_template_name(template_pack)
#
# self.attrs.update({
# 'class': self.field_classes,
# 'value': self.value,
# 'name': self.name,
# })
#
# self.flat_attrs = flatatt(self.attrs)
#
# return render_to_string(template, {'button': self}, context)
class Button(object):
"""
"""
def __init__(self, name, value, link='/', style='secondary', *args, **kwargs):
self.name = name
self.value = value
self.link = link
if style == 'secondary':
self.style = 'neutral'
if style == 'primary':
self.style = 'brand'
if style == 'danger':
self.style = 'destructive'
def render(self):
"""
"""
context = {
"value": self.value,
"link": self.link,
"name": self.name,
"style": self.style,
}
if self.link == 'submit':
html = '<button class="vds-button vds-button--{style} type="submit" name="{name}">{value}</button>'.format(**context)
else:
html = '<a class="vds-button vds-button--{style}" href="{link}">{value}</a>'.format(**context)
return html
class Breadcrumb(object):
"""
"""
def __init__(self, title, href):
self.title = title
self.href= href
class Header(object):
"""
"""
def __init__(self):
self.title = None
self.icon = None
self.actions = []
self.navigation = []
|
|
from flask import render_template, flash, redirect, session, url_for, request, g
from flask import jsonify
from flask.ext.login import login_user, logout_user, current_user, login_required
from flask.ext.babel import lazy_gettext
from flask.ext.sqlalchemy import get_debug_queries
from app import app, db, lm, babel
from .forms import LoginForm, EditForm, PostForm
from .models import User, Post
from oauth import OAuthSignIn
from datetime import datetime
from config import POSTS_PER_PAGE, LANGUAGES, DATABASE_QUERY_TIMEOUT
from .emails import follower_notification
from guess_language import guessLanguage
from .translate import microsoft_translate
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
@app.route('/index/<int:page>', methods=['GET', 'POST'])
@login_required # add login_required
def index(page=1):
# add page argument
form = PostForm()
if form.validate_on_submit():
language = guessLanguage(form.post.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
post = Post(body=form.post.data, timestamp=datetime.utcnow(), author=g.user, language=language)
db.session.add(post)
db.session.commit()
flash(lazy_gettext('Your post is now live!'))
return redirect(url_for('index')) # avoid re-submiting
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template('index.html',
title='Home',
posts=posts,
form=form)
# @app.route('/login', methods=['GET', 'POST'])
# @oid.loginhandler # tells Flask-OpenID this is our login view function
# def login():
# if g.user is not None and g.user.is_authenticated:
# return redirect(url_for('index')) # clean than redirect('/index')
# form = LoginForm()
# # handle submitted form data
# # validate_on_submit() will run all the validators and return boolean
# if form.validate_on_submit():
# session['remember_me'] = form.remember_me.data
# return oid.try_login(form.openid.data, ask_for=['nickname', 'email'])
# return render_template('login.html',
# title='Sign In',
# form=form,
# providers=app.config['OPENID_PROVIDERS'])
# loads a user from database, used by Flask-Login
# user ids in Flask-Login are always unicode strings, so convert to int is necessary
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
# @oid.after_login
# def after_login(resp):
# # validation
# if resp.email is None or resp.email == "":
# flash('Invalid login. Please try again.')
# return redirect(url_for('login'))
# # search our database for the user
# user = User.query.filter_by(email=resp.email).first()
# # if not found, this is a new user, add into databse
# if user is None:
# nickname = resp.nickname
# if nickname is None or nickname == "":
# nickname = resp.email.split('@')[0]
# user = User(nickname=nickname, email=resp.email)
# db.session.add(user)
# db.session.commit()
# remember_me = False
# if 'remember_me' in session:
# remember_me = session['remember_me']
# session.pop('remember_me', None)
# login_user(user, remember=remember_me)
# return redirect(request.args.get('next') or url_for('index'))
# Any function decorated with before_request will run before
# the view function
# `current_user` is set by Flask-Login, so store it in g.user
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
g.locale = get_locale()
@app.after_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= DATABASE_QUERY_TIMEOUT:
app.logger.Warning("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (query.statement, query.parameters, query.duration, query.context))
return response
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
#################################
# OAuth views
#################################
@app.route('/oauth')
def oauth():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('index')) # clean than redirect('/index')
return render_template('oauth.html')
@app.route('/authorize/<provider>')
def oauth_authorize(provider):
if not g.user.is_anonymous:
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
return oauth.authorize()
@app.route('/callback/<provider>')
def oauth_callback(provider):
if not g.user.is_anonymous:
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
social_id, username, email, avatarLarge, avatarSmall = oauth.callback()
if social_id is None:
flash('Authentication failed.')
return redirect(url_for('index'))
user = User.query.filter_by(social_id=social_id).first()
if not user:
nickname = User.make_unique_nickname(username)
user = User(social_id=social_id, nickname=nickname, email=email, avatarLarge=avatarLarge, avatarSmall=avatarSmall)
db.session.add(user)
db.session.commit()
# follow him/herself
db.session.add(user.follow(user))
db.session.commit()
else:
u = user.follow(user)
if u:
db.session.add(u)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember=remember_me)
return redirect(url_for('index'))
# User profile view
@app.route('/user/<nickname>')
@app.route('/user/<nickname>/<int:page>')
@login_required
def user(nickname, page=1):
user = User.query.filter_by(nickname=nickname).first()
if user == None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
posts = user.sorted_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template('user.html',
user=user,
posts=posts)
# User about_me edit view
@app.route('/edit', methods=['GET', 'POST'])
@login_required
def edit():
form = EditForm(g.user.nickname)
if form.validate_on_submit():
g.user.nickname = form.nickname.data
g.user.about_me = form.about_me.data
db.session.add(g.user)
db.session.commit()
flash(lazy_gettext('Your changes have been saved.'))
return redirect(url_for('edit'))
else:
form.nickname.data = g.user.nickname
form.about_me.data = g.user.about_me
return render_template('edit.html', form=form)
@app.route('/user_list')
@login_required
def user_list():
users = User.query.all()
if users == None:
flash('No user are found!')
return redirect(url_for('index'))
return render_template('user_list.html', users=users)
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
#roll database back to a working session
db.session.rollback()
return render_template('500.html'), 500
@app.route('/follow/<nickname>')
@login_required
def follow(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
if user == g.user:
flash('You can\'t follow yourself!')
return redirect(url_for('user', nickname=nickname))
u = g.user.follow(user)
if u is None:
flash('Cannot follow ' + nickname + '.')
return redirect(url_for('user', nickname=nickname))
db.session.add(u)
db.session.commit()
flash('You are now following ' + nickname + '!')
return redirect(url_for('user', nickname=nickname))
@app.route('/unfollow/<nickname>')
@login_required
def unfollow(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
if user == g.user:
flash('You can\'t unfollow yourself!')
return redirect(url_for('user', nickname=nickname))
u = g.user.unfollow(user)
if u is None:
flash('Cannot unfollow ' + nickname + '.')
return redirect(url_for('user', nickname=nickname))
db.session.add(u)
db.session.commit()
flash('You have stopped following ' + nickname + '.')
follower_notification(user, g.user)
return redirect(url_for('user', nickname=nickname))
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(LANGUAGES.keys())
@app.route('/translate', methods=['POST'])
@login_required
def translate():
return jsonify({
'text': microsoft_translate(
request.form['text'],
request.form['sourceLang'],
request.form['destLang']
)
})
@app.route('/delete/<int:id>')
@login_required
def delete(id):
post = Post.query.get(id)
if post is None:
flash(lazy_gettext('Post not found.'))
return redirect(url_for('index'))
if post.author.id != g.user.id:
flash(lazy_gettext('You cannot delete this post!'))
return redirect(url_for('index'))
db.session.delete(post)
db.session.commit()
flash(lazy_gettext('Your post has been deleted.'))
return redirect(url_for('index'))
|
|
##
## MCMC sampler for Mixture-of-Isoforms (MISO) model
##
## Yarden Katz <yarden@mit.edu>
##
## The sampler uses a Metropolis-Hastings sampling scheme, combined with
## a Gibbs sampling step.
##
import scipy
import misopy
from misopy.reads_utils import count_aligned_reads, \
count_isoform_assignments
from misopy.read_simulator import simulate_reads, print_reads_summary, \
read_counts_to_read_list, \
get_reads_summary
import misopy.hypothesis_test as ht
from misopy.Gene import Gene, Exon
from misopy.py2c_gene import *
# C MISO interface
import pysplicing
from scipy import *
from numpy import *
import cPickle as pickle
from scipy.stats import mode
import math
import time
from numpy import numarray
import os
import sys
from collections import defaultdict
import glob
import logging
import logging.handlers
loggers = {}
def get_logger(logger_name, log_outdir,
level=logging.WARNING,
include_stdout=True):
"""
Return a logging object.
"""
global loggers
# Avoid race-conditions
try:
os.makedirs(log_outdir)
except OSError:
pass
if loggers.get(logger_name):
return loggers.get(logger_name)
logger = logging.getLogger(logger_name)
formatter = \
logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
# Do not log to file
#if log_outdir is not None:
# log_filename = os.path.join(log_outdir, "%s.log" %(logger_name))
# fh = logging.FileHandler(log_filename)
# fh.setLevel(level)
# fh.setFormatter(formatter)
# logger.addHandler(fh)
logging.root.setLevel(level)
# Optionally add handler that streams all logs
# to stdout
if include_stdout:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.info("Created logger %s" %(logger_name))
loggers.update({logger_name: logger})
return logger
##
## Helper statistics/linear algebra functions
##
def set_diag(a, v):
for i, elt in enumerate(a):
a[i, i] = v
return a
def maxi(l):
m = max(l)
for i, v in enumerate(l):
if m == v:
return i
def mini(l):
m = min(l)
for i, v in enumerate(l):
if m == v:
return i
def exp_logsumexp(a):
return exp(a - logsumexp(a))
def vect_logsumexp(a, axis=None):
if axis is None:
# Use the scipy.maxentropy version.
return logsumexp(a)
a = asarray(a)
shp = list(a.shape)
shp[axis] = 1
a_max = a.max(axis=axis)
s = log(exp(a - a_max.reshape(shp)).sum(axis=axis))
lse = a_max + s
return lse
def print_assignment_summary(assignments):
counts = defaultdict(int)
for a in assignments:
counts[a] += 1
for k, v in counts.iteritems():
print "Total of %d in isoform %d" %(v, k)
def float_array_to_str(array_of_floats):
"""
Convert a float numpy array to a string for printing purposes.
"""
str_float_array = '[' + ' '.join(['%.3f' %(val) for val in array_of_floats]) + ']'
return str_float_array
def get_paired_end_sampler_params(num_isoforms,
mean_frag_len,
frag_variance,
read_len,
overhang_len=1):
"""
Return parameters for MISO sampler, in paired-end mode.
"""
hyperparameters = ones(num_isoforms)
proposal_diag = 0.05
sigma = set_diag(zeros([num_isoforms-1, num_isoforms-1]),
proposal_diag)
sampler_params = {'read_len': read_len,
'overhang_len': overhang_len,
'uniform_proposal': False,
'sigma_proposal': sigma,
'mean_frag_len': mean_frag_len,
'frag_variance': frag_variance}
return sampler_params
def get_single_end_sampler_params(num_isoforms,
read_len,
overhang_len=1):
"""
Return parameters for MISO sampler, in single-end mode.
"""
hyperparameters = ones(num_isoforms)
proposal_diag = 0.05
sigma = set_diag(zeros([num_isoforms-1, num_isoforms-1]),
proposal_diag)
sampler_params = {'read_len': read_len,
'overhang_len': overhang_len,
'uniform_proposal': False,
'sigma_proposal': sigma}
return sampler_params
class MISOSampler:
def __init__(self, params,
paired_end=False,
log_dir=None):
"""
Make a sampler with the given parameters.
"""
self.params = params
self.paired_end = paired_end
# set default fragment length distribution parameters
if self.paired_end:
if ((not 'mean_frag_len' in self.params) or \
(not 'frag_variance' in self.params)):
raise Exception, "Must set mean_frag_len and frag_variance when " \
"running in sampler on paired-end data."
self.mean_frag_len = self.params['mean_frag_len']
self.frag_variance = self.params['frag_variance']
if log_dir != None:
self.log_dir = os.path.abspath(os.path.expanduser(log_dir))
self.log_dir = os.path.join(log_dir, 'logs')
# Avoid race-conditions
try:
os.makedirs(self.log_dir)
except OSError:
pass
self.miso_logger = get_logger('miso_logger', self.log_dir)
self.miso_logger.info("Instantiated sampler.")
def run_sampler(self, num_iters, reads, gene, hyperparameters, params,
output_file,
num_chains=6,
burn_in=1000,
lag=2,
prior_params=None,
# By default, use sampler with read classes (collapsed)
# to get speed boost for single-end reads
# (To revert to old reassigning sampler, use
# pysplicing.MISO_ALGO_REASSIGN)
algorithm=pysplicing.MISO_ALGO_CLASSES,
start_cond=pysplicing.MISO_START_AUTO,
stop_cond=pysplicing.MISO_STOP_FIXEDNO,
verbose=True):
"""
Fast version of MISO MCMC sampler.
Calls C version and returns results.
"""
num_isoforms = len(gene.isoforms)
self.num_isoforms = num_isoforms
if prior_params == None:
prior_params = (1.0,) * num_isoforms
read_positions = reads[0]
read_cigars = reads[1]
self.num_reads = len(read_positions)
if self.num_reads == 0:
print "No reads for gene: %s" %(gene.label)
return
output_file = output_file + ".miso"
# If output filename exists, don't run sampler
if os.path.isfile(os.path.normpath(output_file)):
print "Output filename %s exists, not running MISO." \
%(output_file)
return None
self.params['iters'] = num_iters
self.params['burn_in'] = burn_in
self.params['lag'] = lag
# Define local variables related to reads and overhang
self.overhang_len = self.params['overhang_len']
self.read_len = self.params['read_len']
t1 = 0
t2 = 0
if verbose:
t1 = time.time()
#self.miso_logger.info("Running sampler...")
#self.miso_logger.info(" - num_iters: " + str(num_iters))
#self.miso_logger.info(" - burn-in: " + str(burn_in))
#self.miso_logger.info(" - lag: " + str(lag))
#self.miso_logger.info(" - paired-end? " + str(self.paired_end))
#self.miso_logger.info(" - gene: " + str(gene))
rejected_proposals = 0
accepted_proposals = 0
psi_vectors = []
all_psi_proposals = []
if params['uniform_proposal']:
self.miso_logger.debug("UNIFORM independent proposal being used.")
proposal_type = "unif"
else:
self.miso_logger.debug("Non-uniform proposal being used.")
self.miso_logger.debug(" - sigma_proposal: " + str(params['sigma_proposal']))
proposal_type = "drift"
init_psi = ones(num_isoforms)/float(num_isoforms)
# Do not process genes with one isoform
if num_isoforms == 1:
one_iso_msg = "Gene %s has only one isoform; skipping..." \
%(gene.label)
self.miso_logger.warning(one_iso_msg)
return
# Convert Python Gene object to C
c_gene = py2c_gene(gene)
##
## Run C MISO
##
read_positions = tuple([r+1 for r in read_positions])
if self.paired_end:
# Number of standard deviations in insert length
# distribution to consider when assigning reads
# to isoforms
num_sds = 4L
# Run paired-end
miso_results = pysplicing.MISOPaired(c_gene, 0L,
read_positions,
read_cigars,
long(self.read_len),
float(self.mean_frag_len),
float(self.frag_variance),
float(num_sds),
long(num_iters),
long(burn_in),
long(lag),
prior_params,
long(self.overhang_len),
long(num_chains),
start_cond,
stop_cond)
else:
# Run single-end
miso_results = pysplicing.MISO(c_gene,
0L,
read_positions,
read_cigars,
long(self.read_len),
long(num_iters),
long(burn_in),
long(lag),
prior_params,
long(self.overhang_len),
long(num_chains),
start_cond,
stop_cond,
algorithm)
# Psi samples
psi_vectors = transpose(array(miso_results[0]))
# Log scores of accepted samples
kept_log_scores = transpose(array(miso_results[1]))
# Read classes
read_classes = miso_results[2]
# Read class statistics
read_class_data = miso_results[3]
# Assignments of reads to isoforms
assignments = miso_results[4]
# Statistics and parameters about sampler run
run_stats = miso_results[5]
# Assignments of reads to classes.
# read_classes[n] represents the read class that has
# read_assignments[n]-many reads.
reads_data = (read_classes, read_class_data)
assignments = array(assignments)
# Skip events where all reads are incompatible with the annotation;
# do not output a file for those.
if all(assignments == -1):
print "All reads incompatible with annotation, skipping..."
return
accepted_proposals = run_stats[4]
rejected_proposals = run_stats[5]
percent_acceptance = (float(accepted_proposals)/(accepted_proposals + \
rejected_proposals)) * 100
#self.miso_logger.info("Percent acceptance (including burn-in): %.4f" %(percent_acceptance))
#self.miso_logger.info("Number of iterations recorded: %d" %(len(psi_vectors)))
# Write MISO output to file
print "Outputting samples to: %s..." %(output_file)
self.miso_logger.info("Outputting samples to: %s" %(output_file))
self.output_miso_results(output_file, gene, reads_data, assignments,
psi_vectors, kept_log_scores, num_iters,
burn_in, lag, percent_acceptance,
proposal_type)
if verbose:
t2 = time.time()
print "Event took %.2f seconds" %(t2 - t1)
def output_miso_results(self, output_file, gene, reads_data, assignments,
psi_vectors, kept_log_scores, num_iters, burn_in,
lag, percent_acceptance, proposal_type):
"""
Output results of MISO to a file.
"""
output = open(output_file, 'w')
# Get a string representation of the isoforms - use '_'
# in the delimiter regardless
iso_delim = '_'
if type(gene.isoforms[0].desc) == list:
str_isoforms = '[' + ",".join(["\'" + iso_delim.join(iso.desc) + "\'" \
for iso in gene.isoforms]) + ']'
else:
str_isoforms = '[' + ",".join(["\'" + iso.desc + "\'" \
for iso in gene.isoforms]) + ']'
num_isoforms = len(gene.isoforms)
# And of the exon lengths
exon_lens = ",".join(["(\'%s\',%d)" %(p.label, p.len) \
for p in gene.parts])
## Compile header with information about isoforms and internal parameters used
## by the sampler, and also information about read counts and number of
## reads assigned to each isoform.
read_classes, read_class_counts = reads_data
read_counts_list = []
for class_num, class_type in enumerate(read_classes):
class_counts = read_class_counts[class_num]
# Get the read class type in string format
class_str = str(tuple([int(c) for c in class_type])).replace(" ", "")
# Get the read class counts in string format
class_counts_str = "%s" %(int(read_class_counts[class_num]))
# Put class and counts together
curr_str = "%s:%s" %(class_str,
class_counts_str)
read_counts_list.append(curr_str)
# Get a summary of the raw read counts supporting each isoform
read_counts_str = ",".join(read_counts_list)
assigned_counts = count_isoform_assignments(assignments)
# Get number of reads assigned to each isoform
assigned_counts_str = ",".join(["%d:%d" %(c[0], c[1]) \
for c in assigned_counts])
# coordinates where mRNAs start
mRNA_starts = []
mRNA_ends = []
for iso in gene.isoforms:
mRNA_starts.append(iso.genomic_start)
mRNA_ends.append(iso.genomic_end)
mRNA_start_coords = ",".join([str(start) for start in mRNA_starts])
mRNA_end_coords = ",".join([str(end) for end in mRNA_ends])
chrom = gene.chrom
if chrom == None:
chrom = "NA"
strand = gene.strand
if strand == None:
strand = "NA"
header = "#isoforms=%s\texon_lens=%s\titers=%d\tburn_in=%d\tlag=%d\t" \
"percent_accept=%.2f\tproposal_type=%s\t" \
"counts=%s\tassigned_counts=%s\tchrom=%s\tstrand=%s\tmRNA_starts=%s\tmRNA_ends=%s\n" \
%(str_isoforms, exon_lens, num_iters, burn_in, lag,
percent_acceptance, proposal_type, read_counts_str,
assigned_counts_str,
# Fields related to gene/event
chrom,
strand,
mRNA_start_coords,
mRNA_end_coords)
output.write(header)
# Output samples and their associated log scores, as well as read counts
results_fields = ["sampled_psi", "log_score"]
results_header = "%s\n" %("\t".join(results_fields))
output.write(results_header)
for psi_sample, curr_log_score in zip(psi_vectors, kept_log_scores):
psi_sample_str = ",".join(["%.4f" %(psi) for psi in psi_sample])
output_line = "%s\t%.2f\n" %(psi_sample_str, curr_log_score)
output.write(output_line)
output.close()
print "Completed outputting."
# return [percent_acceptance, array(psi_vectors), array(kept_log_scores)]
def run_sampler_on_event(gene, ni, ne, nb, read_len, overhang_len, num_iters,
output_dir, confidence_level=.95):
"""
Run sampler on a two-isoform gene event.
"""
print "Running sampler on a two-isoform event..."
print " - Gene label: ", gene.label, gene
print " - NI, NE, NB: %d, %d, %d" %(ni, ne, nb)
print "Using default sampler parameters."
if gene.chrom != None:
# Index output by chromosome
print "Indexing by chromosome..."
output_dir = os.path.join(output_dir, gene.chrom)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_filename = os.path.join(output_dir, gene.label)
samples = []
cred_interval = []
num_isoforms = len(gene.isoforms)
burn_in = 500
lag = 10
hyperparameters = ones(num_isoforms)
proposal_diag = 0.05
sigma = set_diag(zeros([num_isoforms-1, num_isoforms-1]),
proposal_diag)
sampler_params = {'read_len': read_len,
'overhang_len': overhang_len,
'uniform_proposal': False,
'sigma_proposal': sigma}
sampler = MISOSampler(sampler_params, log_dir=output_dir)
reads = read_counts_to_read_list(ni, ne, nb)
t1 = time.time()
sampler_results = sampler.run_sampler(num_iters, reads, gene, hyperparameters,
sampler_params, output_filename, burn_in=burn_in,
lag=lag)
if not sampler_results:
return (samples, cred_interval)
samples = sampler_results[1]
# Compute credible intervals
cred_interval = ht.compute_credible_intervals(samples, confidence_level=confidence_level)
t2 = time.time()
print " - Sampler run took %s seconds." %(str(t2-t1))
# return samples and credible intervals
return (samples, cred_interval)
def profile_miso():
from Gene import make_gene
gene = make_gene([150, 100, 150], [[1, 2, 3], [1, 3]])
read_len = 36
overhang_len = 4
output_dir = "profiler-test"
for x in range(10):
print "x = %d" %(x)
a, b = run_sampler_on_event(gene, 500, 50, 40, read_len, overhang_len,
10000, output_dir)
def main():
return
# import cProfile as profile
# import pstats
# output_file = "profile"
# profile.run('profile_miso()', output_file)
# p = pstats.Stats(output_file)
# print "name: "
# print p.sort_stats('name')
# print "all stats: "
# p.print_stats()
# print "cumulative (top 10): "
# p.sort_stats('cumulative').print_stats(20)
if __name__ == '__main__':
main()
|
|
from inspect import getmembers, isroutine, stack
from re import search, sub
import six
from selenium.common.exceptions import ElementNotInteractableException, \
NoSuchWindowException, StaleElementReferenceException
from selenium.webdriver.common.action_chains import ActionChains
import nerodia
from nerodia.adjacent import Adjacent
from nerodia.browser import Browser
from nerodia.container import Container
from nerodia.elements.scroll import Scrolling
from nerodia.exception import Error, NoMatchingWindowFoundException, ObjectDisabledException, \
ObjectReadOnlyException, UnknownFrameException, UnknownObjectException
from nerodia.js_execution import JSExecution
from nerodia.js_snippet import JSSnippet
from nerodia.locators.class_helpers import ClassHelpers
from nerodia.locators.element.selector_builder import SelectorBuilder
from nerodia.user_editable import UserEditable
from nerodia.wait.wait import TimeoutError, Waitable
from nerodia.window import Dimension, Point
class Element(ClassHelpers, JSExecution, Container, JSSnippet, Waitable, Adjacent, Scrolling):
ATTRIBUTES = []
CASE_INSENSITIVE_ATTRIBUTES = ['accept', 'accept_charset', 'align', 'alink', 'axis', 'bgcolor',
'charset', 'checked', 'clear', 'codetype', 'color', 'compact',
'declare', 'defer', 'dir', 'direction', 'disabled', 'enctype',
'face', 'frame', 'hreflang', 'http_equiv', 'lang', 'language',
'link', 'media', 'method', 'multiple', 'nohref', 'noresize',
'noshade', 'nowrap', 'readonly', 'rel', 'rev', 'rules', 'scope',
'scrolling', 'selected', 'shape', 'target', 'text', 'type',
'valign', 'valuetype', 'vlink']
keyword = None
_content_editable = None
_selector_builder = None
_element_matcher = None
_locator = None
def __init__(self, query_scope, selector):
self.query_scope = query_scope
if not isinstance(selector, dict):
raise TypeError('invalid argument: {!r}'.format(selector))
self.el = selector.pop('element', None)
if self.el and len(set(selector) - {'tag_name'}) > 0:
nerodia.logger.deprecate("'element' locator to initialize a relocatable Element",
'#cache=', ids=['element_cache'])
self.selector = selector
if self.el is None:
self.build()
@property
def exists(self):
"""
Returns True if element exists, False otherwise
Checking for staleness is deprecated
:rtype: bool
"""
try:
if self._located and self.stale:
self.reset()
elif self._located:
return True
self.assert_exists()
return True
except (UnknownObjectException, UnknownFrameException):
return False
exist = exists
def __repr__(self):
string = '#<{}: '.format(self.__class__.__name__)
if self.keyword:
string += 'keyword: {} '.format(self.keyword)
string += 'located: {}; '.format(self._located)
if not self.selector:
string += '{element: (selenium element)}'
else:
string += self.selector_string
string += '>'
return string
def __eq__(self, other):
"""
Returns True if two elements are equal
:param other: other element to compare
:rtype: bool
"""
return isinstance(other, self.__class__) and self.wd == other.wd
eql = __eq__
def __hash__(self):
return self.el.__hash__() if self._located else super(Element, self).__hash__()
@property
def text(self):
"""
Returns the text of the element
:rtype: str
"""
return self._element_call(lambda: self.el.text)
@property
def tag_name(self):
"""
Returns the tag name of the element
:rtype: str
"""
return self._element_call(lambda: self.el.tag_name).lower()
def click(self, *modifiers):
"""
Clicks the element, optionally while pressing the given modifier keys.
Note that support for holding a modifier key is currently experimental, and may not work
at all.
:param modifiers: modifier keys to press while clicking
:Example: Click an element
browser.element(name='new_user_button').click()
:Example: Click an element with shift key pressed
from selenium.webdriver.common.keys import Keys
browser.element(name='new_user_button').click(Keys.SHIFT)
:Example: Click an element with several modifier keys pressed
from selenium.webdriver.common.keys import Keys
browser.element(name='new_user_button').click(Keys.SHIFT, Keys.CONTROL)
"""
def method():
if modifiers:
action = ActionChains(self.driver)
for mod in modifiers:
action.key_down(mod)
action.click(self.el)
for mod in modifiers:
action.key_up(mod)
action.perform()
else:
self.el.click()
self._element_call(method, self.wait_for_enabled)
self.browser.after_hooks.run()
def js_click(self):
"""
Simulates JavaScript click event on element.
:Example: Click an element
browser.element(name='new_user_button').js_click()
"""
self.fire_event('click')
self.browser.after_hooks.run()
def double_click(self):
"""
Double clicks the element.
Note that browser support may vary.
:Example: Double-click an element
browser.element(name='new_user_button').double_click()
"""
self._element_call(lambda: ActionChains(self.driver).double_click(self.el)
.perform(), self.wait_for_present)
self.browser.after_hooks.run()
def js_double_click(self):
"""
Simulates JavaScript double click event on element.
:Example: Click an element
browser.element(name='new_user_button').js_double_click()
"""
self.fire_event('dblclick')
self.browser.after_hooks.run()
def right_click(self, *modifiers):
"""
Right clicks the element, optionally while pressing the given modifier keys.
Note that support for holding a modifier key is currently experimental,
and may not work at all. Also, the browser support may vary.
:Example: Right click an element
browser.element(name='new_user_button').right_click()
:Example: Right click an element with shift key pressed
browser.element(name='new_user_button').right_click(nerodia.Keys.SHIFT)
:Example: Click an element with several modifier keys pressed
browser.element(name='new_user_button').right_click(nerodia.Keys.SHIFT, nerodia.Keys.ALT)
"""
def _right_click():
action = ActionChains(self.driver)
if len(modifiers) > 0:
for mod in modifiers:
action.key_down(mod)
action.context_click(self.el)
for mod in modifiers:
action.key_up(mod)
action.perform()
else:
action.context_click(self.el).perform()
self._element_call(_right_click, self.wait_for_present)
self.browser.after_hooks.run()
def hover(self):
"""
Moves the mouse to the middle of this element
Note that browser support may vary
:Example: Hover over an element
browser.element(name='new_user_button').hover()
"""
self._element_call(lambda: ActionChains(self.driver).move_to_element(self.el)
.perform(), self.wait_for_present)
self.browser.after_hooks.run()
def drag_and_drop_on(self, other):
"""
Drag and drop this element on to another element instance
Note that browser support may vary
:param other: element to drop on
:Example: Drag an element onto another
a = browser.div(id='draggable')
b = browser.div(id='droppable')
a.drag_and_drop_on(b)
"""
self._assert_is_element(other)
value = self._element_call(lambda: ActionChains(self.driver)
.drag_and_drop(self.el, other.wd).perform(),
self.wait_for_present)
self.browser.after_hooks.run()
return value
def drag_and_drop_by(self, xoffset, yoffset):
"""
Drag and drop this element by the given offsets.
Note that browser support may vary.
:param xoffset: amount to move horizontally
:param yoffset: amount to move vertically
:Example: Drag an element onto another
browser.div(id='draggable').drag_and_drop_by(100, -200)
"""
self._element_call(lambda: ActionChains(self.driver).
drag_and_drop_by_offset(self.el, xoffset, yoffset).perform(),
self.wait_for_present)
def select_text(self, string):
"""
Selects text on page (as if dragging clicked mouse across provided text)
:param string: string to select
:Example:
browser.legend().select_text('information')
"""
self._element_call(lambda: self._execute_js('selectText', self.el, string))
@property
def classes(self):
return self.class_name.split()
def attribute_value(self, attribute_name):
"""
Returns given attribute value of the element
:param attribute_name: attribute to retrieve
:type attribute_name: str
:rtype: str
:Example:
browser.a(id='link_2').attribute_value('title') #=> 'link_title_2'
"""
return self._element_call(lambda: self.el.get_attribute(attribute_name))
get_attribute = attribute_value
attribute = attribute_value
@property
def attribute_values(self):
"""
Returns all attribute values. Attributes with special characters are returned as String,
rest are returned as a Symbol.
:rtype: dict
:Example:
browser.pre(id='rspec').attribute_values #=> {'class': 'ruby', 'id': 'rspec' }
"""
result = self._element_call(lambda: self._execute_js('attributeValues', self.el))
regex = r'[a-zA-Z\-]*'
for key in result:
match = search(regex, key)
if match and match.group(0) == key:
result[key.replace('-', '_')] = result.pop(key)
return result
get_attributes = attribute_values
attributes = attribute_values
@property
def attribute_list(self):
"""
Returns list of all attributes.
:rtype: list
:Example:
browser.pre(id='rspec').attribute_list #=> ['class', 'id']
"""
return list(self.attribute_values)
def send_keys(self, *args):
"""
Sends sequence of keystrokes to the element
:param args: keystrokes to send
:Example:
browser.text_field(name='new_user_first_name').send_keys('nerodia')
"""
return self._element_call(lambda: self.el.send_keys(*args), self.wait_for_writable)
@property
def focused(self):
"""
Returns True if the element is focused
:rtype: bool
"""
return self._element_call(lambda: self.el == self.driver.switch_to.active_element)
def fire_event(self, event_name):
"""
Simulates JavaScript events on element
Note that you may omit 'on' from event name
:param event_name: event to fire
:Example:
browser.button(name='new_user_button').fire_event('click')
browser.button(name='new_user_button').fire_event('mousemove')
browser.button(name='new_user_button').fire_event('onmouseover')
"""
event_name = sub(r'^on', '', str(event_name)).lower()
self._element_call(lambda: self._execute_js('fireEvent', self.el, event_name))
def scroll_into_view(self):
"""
Scroll until the element is in the view screen
:rtype: Point
:Example:
browser.button(name='new_user_button').scroll_into_view()
"""
nerodia.logger.deprecate('Element#scroll_into_view', 'Element#scroll methods',
ids=['scroll_into_view'])
return Point(**self._element_call(lambda: self.el.location_once_scrolled_into_view))
@property
def location(self):
"""
Get the location of the element (x, y)
:rtype: Point
:Example:
browser.button(name='new_user_button').location
"""
return Point(**self._element_call(lambda: self.el.location))
@property
def size(self):
"""
Get the size of the element (width, height)
:rtype: Dimension
:Example:
browser.button(name='new_user_button').size
"""
return Dimension(**self._element_call(lambda: self.el.size))
@property
def height(self):
"""
Get the height of the element
:rtype: int
:Example:
browser.button(name='new_user_button').height
"""
return self.size.height
@property
def width(self):
"""
Get the width of the element
:rtype: int
:Example:
browser.button(name='new_user_button').width
"""
return self.size.width
@property
def center(self):
"""
Get the center coordinates of the element
:rtype: Point
:Example:
browser.button(name='new_user_button').center
"""
location = self.location
size = self.size
return Point(round(location.x + size.width / 2), round(location.y + size.height / 2))
centre = center
@property
def driver(self):
return self.query_scope.driver
@property
def wd(self):
"""
Returns underlying Selenium object of the Nerodia Element
:rtype: selenium.webdriver.remote.webelement.WebElement
"""
self.assert_exists()
return self.el
@property
def visible(self):
"""
Returns true if this element is visible on the page
Raises exception if element does not exist
:rtype: bool
"""
nerodia.logger.warning('#visible behavior will be changing slightly, consider '
'switching to #present (more details: '
'http://watir.com/element-existentialism/',
ids=['visible_element'])
displayed = self._display_check()
if displayed is None and self._display_check():
nerodia.logger.deprecate('Checking `#visible is False` to determine a stale '
'element', '`#stale is True`', ids=['stale_visible'])
if displayed is None:
raise self._unknown_exception
return displayed
@property
def enabled(self):
"""
Returns True if the element is present and enabled on the page
:rtype: bool
"""
return self._element_call(lambda: self.el.is_enabled(), self.assert_exists)
@property
def present(self):
"""
Returns True if the element exists and is visible on the page
Returns False if the element does not exist or exists but is not visible
:rtype: bool
"""
try:
return self._display_check()
except (UnknownObjectException, UnknownFrameException):
return False
@property
def obscured(self):
"""
Returns if the element's center point is covered by a non-descendant element.
:rtype: bool
:Example:
browser.button(value='Delete').obscured #=> False
"""
def func():
if not self.present:
return True
self.scroll.to()
return self._execute_js('elementObscured', self)
return self._element_call(func)
def style(self, prop=None):
"""
Returns given style property of this element
:param prop: property to get
:type prop: str
:rtype: str
:Example:
browser.button(value='Delete').style #=> "border: 4px solid red;"
browser.button(value='Delete').style('border') #=> "4px solid rgb(255, 0, 0)"
"""
if prop:
return self._element_call(lambda: self.el.value_of_css_property(prop))
else:
return str(self.attribute_value('style')).strip()
def to_subtype(self):
"""
Cast this Element instance to a more specific subtype
:Example:
browser.element(xpath="//input[@type='submit']").to_subtype() #=> #<Button>
"""
tag = self.tag_name
from .button import Button
from .check_box import CheckBox
from .file_field import FileField
from .html_elements import HTMLElement
from .radio import Radio
from .text_field import TextField
if tag == 'input':
elem_type = self.attribute_value('type')
if elem_type in Button.VALID_TYPES:
klass = Button
elif elem_type == 'checkbox':
klass = CheckBox
elif elem_type == 'radio':
klass = Radio
elif elem_type == 'file':
klass = FileField
else:
klass = TextField
else:
klass = nerodia.element_class_for(tag) or HTMLElement
el = klass(self.query_scope, selector=self.selector)
el.cache = self.wd
return el
@property
def browser(self):
"""
Returns browser
:rtype: nerodia.browser.Browser
"""
return self.query_scope.browser
@property
def stale(self):
"""
Returns True if a previously located element is no longer attached to the DOM
:rtype: bool
"""
if self.el is None:
raise Error('Can not check staleness of unused element')
self._ensure_context()
return self.stale_in_context
@property
def stale_in_context(self):
try:
self.el.value_of_css_property('staleness_check') # any wire call checks for staleness
return False
except StaleElementReferenceException:
return True
def reset(self):
self.el = None
def locate(self):
self._ensure_context()
self.locate_in_context()
return self
def build(self):
self.selector_builder.build(self.selector.copy())
@property
def cache(self):
return self.el
@cache.setter
def cache(self, element):
"""
Set the cached element. For use when element can be relocated with the provided selector.
"""
self.el = element
@property
def selector_string(self):
from ..browser import Browser
if isinstance(self.query_scope, Browser):
return repr(self.selector)
else:
return '{} --> {}'.format(self.query_scope.selector_string, self.selector)
def wait_for_exists(self):
if not nerodia.relaxed_locate:
return self.assert_exists()
if self._located: # Performance shortcut
return None
try:
if not isinstance(self.query_scope, Browser):
self.query_scope.wait_for_exists()
self.wait_until(lambda e: e.exists, element_reset=True)
except TimeoutError:
raise self._unknown_exception('timed out after {} seconds, waiting for {} to be '
'located'.format(nerodia.default_timeout, self))
def wait_for_present(self):
pres = self.present
if not nerodia.relaxed_locate or pres:
return pres
try:
if not isinstance(self.query_scope, Browser):
self.query_scope.wait_for_present()
self.wait_until(lambda e: e.present)
except TimeoutError as e:
raise self._unknown_exception('element located, but {}'.format(e))
def wait_for_enabled(self):
from .button import Button
from .input import Input
from .option import Option
from .select import Select
if not nerodia.relaxed_locate:
return self._assert_enabled()
self.wait_for_exists()
if not any(isinstance(self, klass) for klass in [Input, Button, Select, Option]) \
and not self._content_editable:
return
if self.enabled:
return
try:
self.wait_until(lambda e: e.enabled)
except TimeoutError:
self._raise_disabled()
def wait_for_writable(self):
self.wait_for_enabled()
if not nerodia.relaxed_locate:
if hasattr(self, 'readonly') and self.readonly:
self._raise_writable()
if not hasattr(self, 'readonly') or not self.readonly:
return
try:
self.wait_until(lambda e: not getattr(e, 'readonly', None) or not e.readonly)
except TimeoutError:
self._raise_writable()
def assert_exists(self):
"""
Locates if not previously found; does not check for staleness for performance reasons
"""
if not self._located:
self.locate()
if not self._located:
raise self._unknown_exception('unable to locate element: {}'.format(self))
def locate_in_context(self):
self.el = self.locator.locate(self.selector_builder.built)
return self.el
# private
@property
def _located(self):
"""
Returns if the element has previously been located
:rtype: bool
"""
return self.el is not None
@property
def _raise_writable(self):
raise ObjectReadOnlyException('element present and enabled, but timed out after {} '
'seconds, waiting for {} to not be '
'readonly'.format(nerodia.default_timeout, self))
def _raise_disabled(self):
raise ObjectDisabledException('element present, but timed out after {} '
'seconds, waiting for {} to be '
'enabled'.format(nerodia.default_timeout, self))
def _raise_present(self):
raise UnknownObjectException('element located, but timed out after {} seconds, waiting '
'for {} to be present'.format(nerodia.default_timeout, self))
@property
def _unknown_exception(self):
return UnknownObjectException
@property
def _element_class(self):
return self.__class__
def _ensure_context(self):
from nerodia.elements.i_frame import IFrame
if isinstance(self.query_scope, Browser) or self.query_scope._located is False or \
(self.query_scope._located is False and self.query_scope.stale):
self.query_scope.locate()
if isinstance(self.query_scope, IFrame):
self.query_scope.switch_to()
def _assert_enabled(self):
if not self._element_call(lambda: self.el.is_enabled()):
raise ObjectDisabledException('object is disabled {}'.format(self))
@classmethod
def _assert_is_element(cls, obj):
if not isinstance(obj, Element):
raise TypeError('expected nerodia.Element, '
'got {}:{}'.format(obj, obj.__class__.__name__))
def _display_check(self):
"""
Removes duplication in #present? & #visible? and makes setting deprecation notice easier
"""
check = self._display_check_retry()
if check is None:
return self._display_check_retry()
return check
def _display_check_retry(self):
try:
self.assert_exists()
return self.el.is_displayed()
except StaleElementReferenceException:
self.reset()
def _element_call(self, method, precondition=None):
caller = stack()[1][3]
already_locked = self.browser.timer.locked
if not already_locked:
from ..wait.timer import Timer
self.browser.timer = Timer(timeout=nerodia.default_timeout)
try:
return self._element_call_check(precondition, method, caller)
finally:
nerodia.logger.debug('<- `Completed {}#{}`'.format(self, caller))
if not already_locked:
self.browser.timer.reset()
def _check_condition(self, condition, caller):
nerodia.logger.debug('<- `Verifying precondition {}#{} for '
'{}`'.format(self, condition, caller))
try:
if not condition:
self.assert_exists()
else:
condition()
nerodia.logger.debug('<- `Verified precondition '
'{}#{!r}`'.format(self, condition or 'assert_exists'))
except self._unknown_exception:
if condition is None:
nerodia.logger.debug('<- `Unable to satisfy precondition '
'{}#{}`'.format(self, condition))
self._check_condition(self.wait_for_exists, caller)
else:
raise
def _element_call_check(self, precondition, method, caller):
nerodia.logger.debug('-> `Executing {}#{}`'.format(self, caller))
while True:
try:
self._check_condition(precondition, caller)
return method()
except self._unknown_exception as e:
if precondition is None:
self._element_call(method, self.wait_for_exists)
msg = str(e)
if self.query_scope.iframe().exists:
msg += '; Maybe look in an iframe?'
custom_attributes = []
if self.locator:
custom_attributes = self.selector_builder.custom_attributes
if custom_attributes:
msg += '; Nerodia treated {!r} as a non-HTML compliant attribute, ' \
'ensure that was intended'.format(custom_attributes)
raise self._unknown_exception(msg)
except StaleElementReferenceException:
self.reset()
self._check_condition(precondition, caller)
return method()
except ElementNotInteractableException:
if (self.browser.timer.remaining_time <= 0) or \
(precondition not in [self.wait_for_present, self.wait_for_enabled,
self.wait_for_writable]):
self._raise_present()
continue
except NoSuchWindowException:
raise NoMatchingWindowFoundException('browser window was closed')
def __getattribute__(self, name):
if search(SelectorBuilder.WILDCARD_ATTRIBUTE, name):
return self.attribute_value(name.replace('_', '-'))
else:
return object.__getattribute__(self, name)
def __getattr__(self, name):
if name in (_[0] for _ in getmembers(UserEditable, predicate=isroutine)) and \
self.is_content_editable:
self._content_editable = True
setattr(self, name, six.create_bound_method(
six.get_unbound_function(getattr(UserEditable, name)), self))
return getattr(self, name)
else:
raise AttributeError("Element '{}' has no attribute "
"'{}'".format(self.__class__.__name__.capitalize(), name))
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot.errors import (DataError, ExecutionFailed, ExecutionPassed,
PassExecution, ReturnFromKeyword,
UserKeywordExecutionFailed)
from robot.variables import is_list_var
from robot.output import LOGGER
from robot import utils
from .arguments import (ArgumentMapper, ArgumentResolver,
EmbeddedArguments, UserKeywordArgumentParser)
from .handlerstore import HandlerStore
from .keywordrunner import KeywordRunner
from .timeouts import KeywordTimeout
from .usererrorhandler import UserErrorHandler
class UserLibrary(object):
def __init__(self, user_keywords, path=None):
basename = os.path.basename(path) if path else None
self.name = os.path.splitext(basename)[0] if path else None
self.handlers = HandlerStore(basename)
for kw in user_keywords:
try:
handler, embedded = self._create_handler(kw)
self._validate_not_duplicate(handler)
except DataError as err:
LOGGER.error("Creating user keyword '%s' failed: %s"
% (kw.name, unicode(err)))
handler = UserErrorHandler(kw.name, unicode(err))
embedded = False
self.handlers.add(handler, embedded)
def _create_handler(self, kw):
embedded = EmbeddedArguments(kw.name)
if embedded:
if kw.args:
raise DataError('Keyword cannot have both normal and embedded '
'arguments.')
return EmbeddedArgsTemplate(kw, self.name, embedded), True
return UserKeywordHandler(kw, self.name), False
def _validate_not_duplicate(self, handler):
if handler.name in self.handlers:
self.handlers.remove(handler.name)
raise DataError('Keyword with same name defined multiple times.')
class UserKeywordHandler(object):
type = 'user'
def __init__(self, keyword, libname):
self.name = keyword.name
self.keywords = keyword.keywords.normal
self.return_value = tuple(keyword.return_)
self.teardown = keyword.keywords.teardown
self.libname = libname
self.doc = self._doc = unicode(keyword.doc)
self.arguments = UserKeywordArgumentParser().parse(tuple(keyword.args),
self.longname)
self._timeout = keyword.timeout
@property
def longname(self):
return '%s.%s' % (self.libname, self.name) if self.libname else self.name
@property
def shortdoc(self):
return self.doc.splitlines()[0] if self.doc else ''
def init_keyword(self, variables):
# TODO: Should use runner and not change internal state like this.
# Timeouts should also be cleaned up in general.
self.doc = variables.replace_string(self._doc, ignore_errors=True)
if self._timeout:
self.timeout = KeywordTimeout(self._timeout.value,
self._timeout.message,
variables)
else:
self.timeout = KeywordTimeout()
def run(self, context, arguments):
context.start_user_keyword(self)
try:
return self._run(context, arguments)
finally:
context.end_user_keyword()
def _run(self, context, arguments):
if context.dry_run:
return self._dry_run(context, arguments)
return self._normal_run(context, arguments)
def _dry_run(self, context, arguments):
positional, kwargs = self._resolve_arguments(arguments)
error, return_ = self._execute(context, positional, kwargs)
if error:
raise error
return None
def _normal_run(self, context, arguments):
positional, kwargs = self._resolve_arguments(arguments, context.variables)
error, return_ = self._execute(context, positional, kwargs)
if error and not error.can_continue(context.in_teardown):
raise error
return_value = self._get_return_value(context.variables, return_)
if error:
error.return_value = return_value
raise error
return return_value
def _resolve_arguments(self, arguments, variables=None):
resolver = ArgumentResolver(self.arguments)
mapper = ArgumentMapper(self.arguments)
positional, named = resolver.resolve(arguments, variables)
positional, kwargs = mapper.map(positional, named, variables)
return positional, kwargs
def _execute(self, context, positional, kwargs):
self._set_variables(positional, kwargs, context.variables)
context.output.trace(lambda: self._log_args(context.variables))
self._verify_keyword_is_valid()
self.timeout.start()
error = return_ = pass_ = None
runner = KeywordRunner(context)
try:
runner.run_keywords(self.keywords)
except ReturnFromKeyword as exception:
return_ = exception
error = exception.earlier_failures
except ExecutionPassed as exception:
pass_ = exception
error = exception.earlier_failures
except ExecutionFailed as exception:
error = exception
with context.keyword_teardown(error):
td_error = self._run_teardown(context)
if error or td_error:
error = UserKeywordExecutionFailed(error, td_error)
return error or pass_, return_
def _set_variables(self, positional, kwargs, variables):
before_varargs, varargs = self._split_args_and_varargs(positional)
for name, value in zip(self.arguments.positional, before_varargs):
variables['${%s}' % name] = value
if self.arguments.varargs:
variables['@{%s}' % self.arguments.varargs] = varargs
if self.arguments.kwargs:
variables['&{%s}' % self.arguments.kwargs] = kwargs
def _split_args_and_varargs(self, args):
if not self.arguments.varargs:
return args, []
positional = len(self.arguments.positional)
return args[:positional], args[positional:]
def _log_args(self, variables):
args = ['${%s}' % arg for arg in self.arguments.positional]
if self.arguments.varargs:
args.append('@{%s}' % self.arguments.varargs)
if self.arguments.kwargs:
args.append('&{%s}' % self.arguments.kwargs)
args = ['%s=%s' % (name, utils.prepr(variables[name]))
for name in args]
return 'Arguments: [ %s ]' % ' | '.join(args)
def _run_teardown(self, context):
if not self.teardown:
return None
try:
name = context.variables.replace_string(self.teardown.name)
except DataError as err:
return ExecutionFailed(unicode(err), syntax=True)
if name.upper() in ('', 'NONE'):
return None
runner = KeywordRunner(context)
try:
runner.run_keyword(self.teardown, name)
except PassExecution:
return None
except ExecutionFailed as err:
return err
return None
def _verify_keyword_is_valid(self):
if not (self.keywords or self.return_value):
raise DataError("User keyword '%s' contains no keywords."
% self.name)
def _get_return_value(self, variables, return_):
ret = self.return_value if not return_ else return_.return_value
if not ret:
return None
contains_list_var = any(is_list_var(item) for item in ret)
try:
ret = variables.replace_list(ret)
except DataError as err:
raise DataError('Replacing variables from keyword return value '
'failed: %s' % unicode(err))
if len(ret) != 1 or contains_list_var:
return ret
return ret[0]
class EmbeddedArgsTemplate(UserKeywordHandler):
def __init__(self, keyword, libname, embedded):
UserKeywordHandler.__init__(self, keyword, libname)
self.keyword = keyword
self.embedded_name = embedded.name
self.embedded_args = embedded.args
def matches(self, name):
return self.embedded_name.match(name) is not None
def create(self, name):
return EmbeddedArgs(name, self)
class EmbeddedArgs(UserKeywordHandler):
def __init__(self, name, template):
match = template.embedded_name.match(name)
if not match:
raise ValueError('Does not match given name')
UserKeywordHandler.__init__(self, template.keyword, template.libname)
self.embedded_args = zip(template.embedded_args, match.groups())
self.name = name
self.orig_name = template.name
def _run(self, context, args):
if not context.dry_run:
variables = context.variables
self._resolve_arguments(args, variables) # validates no args given
for name, value in self.embedded_args:
variables['${%s}' % name] = variables.replace_scalar(value)
return UserKeywordHandler._run(self, context, args)
|
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import math
from .. import ivi
from .. import dmm
from . import common
MeasurementFunctionMapping = {
'dc_volts': 'volt',
'ac_volts': 'volt:ac',
'dc_current': 'curr',
'ac_current': 'curr:ac',
'two_wire_resistance': 'res',
'four_wire_resistance': 'fres',
'frequency': 'freq',
'period': 'per',
'temperature': 'temp',
'capacitance': 'cap',
'continuity': 'cont',
'diode': 'diod'}
MeasurementRangeMapping = {
'dc_volts': 'volt:dc:range',
'ac_volts': 'volt:ac:range',
'dc_current': 'curr:dc:range',
'ac_current': 'curr:ac:range',
'two_wire_resistance': 'res:range',
'four_wire_resistance': 'fres:range',
'frequency': 'freq:range:lower',
'period': 'per:range:lower',
'capacitance': 'cap:range'}
MeasurementAutoRangeMapping = {
'dc_volts': 'volt:dc:range:auto',
'ac_volts': 'volt:ac:range:auto',
'dc_current': 'curr:dc:range:auto',
'ac_current': 'curr:ac:range:auto',
'two_wire_resistance': 'res:range:auto',
'four_wire_resistance': 'fres:range:auto',
'capacitance': 'cap:range:auto'}
MeasurementResolutionMapping = {
'dc_volts': 'volt:dc:resolution',
'ac_volts': 'volt:ac:resolution',
'dc_current': 'curr:dc:resolution',
'ac_current': 'curr:ac:resolution',
'two_wire_resistance': 'res:resolution',
'four_wire_resistance': 'fres:resolution'}
TriggerSourceMapping = {
'bus': 'bus',
'external': 'ext',
'immediate': 'imm'}
class Base(common.IdnCommand, common.ErrorQuery, common.Reset, common.SelfTest,
ivi.Driver,
dmm.Base):
"Generic SCPI IVI DMM driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
# early define of _do_scpi_init
self.__dict__.setdefault('_do_scpi_init', True)
super(Base, self).__init__(*args, **kwargs)
self._self_test_delay = 40
self._identity_description = "Generic SCPI IVI DMM driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = ""
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 4
self._identity_specification_minor_version = 1
self._identity_supported_instrument_models = ['DMM']
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(Base, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility.reset()
def _utility_disable(self):
pass
def _utility_lock_object(self):
pass
def _utility_unlock_object(self):
pass
def _get_measurement_function(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask(":sense:function?").lower().strip('"')
value = [k for k,v in MeasurementFunctionMapping.items() if v==value][0]
self._measurement_function = value
self._set_cache_valid()
return self._measurement_function
def _set_measurement_function(self, value):
if value not in MeasurementFunctionMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":sense:function '%s'" % MeasurementFunctionMapping[value])
self._measurement_function = value
self._set_cache_valid()
self._set_cache_valid(False, 'range')
self._set_cache_valid(False, 'auto_range')
self._set_cache_valid(False, 'resolution')
def _get_range(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
func = self._get_measurement_function()
if func in MeasurementRangeMapping:
cmd = MeasurementRangeMapping[func]
value = float(self._ask("%s?" % (cmd)))
self._range = value
self._set_cache_valid()
return self._range
def _set_range(self, value):
value = float(value)
# round up to even power of 10
value = math.pow(10, math.ceil(math.log10(value)))
if not self._driver_operation_simulate:
func = self._get_measurement_function()
if func in MeasurementRangeMapping:
cmd = MeasurementRangeMapping[func]
self._write("%s %g" % (cmd, value))
self._range = value
self._set_cache_valid()
def _get_auto_range(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
func = self._get_measurement_function()
if func in MeasurementAutoRangeMapping:
cmd = MeasurementAutoRangeMapping[func]
value = int(self._ask("%s?" % (cmd)))
if value == 0:
value = 'off'
elif value == 1:
value = 'on'
self._auto_range = value
self._set_cache_valid()
return self._auto_range
def _set_auto_range(self, value):
if value not in dmm.Auto2:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
func = self._get_measurement_function()
if func in MeasurementAutoRangeMapping:
cmd = MeasurementAutoRangeMapping[func]
self._write("%s %d" % (cmd, int(value == 'on')))
self._auto_range = value
self._set_cache_valid()
def _get_resolution(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
func = self._get_measurement_function()
if func in MeasurementResolutionMapping:
cmd = MeasurementResolutionMapping[func]
value = float(self._ask("%s?" % (cmd)))
self._resolution = value
self._set_cache_valid()
return self._resolution
def _set_resolution(self, value):
value = float(value)
# round up to even power of 10
value = math.pow(10, math.ceil(math.log10(value)))
if not self._driver_operation_simulate:
func = self._get_measurement_function()
if func in MeasurementResolutionMapping:
cmd = MeasurementResolutionMapping[func]
self._write("%s %g" % (cmd, value))
self._resolution = value
self._set_cache_valid()
def _get_trigger_delay(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = float(self._ask("trigger:delay?"))
self._trigger_delay = value
self._set_cache_valid()
return self._trigger_delay
def _set_trigger_delay(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write('trigger:delay %g' % value)
self._trigger_delay = value
self._set_cache_valid()
def _get_trigger_delay_auto(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = bool(int(self._ask("trigger:delay:auto?")))
self._trigger_delay_auto = value
self._set_cache_valid()
return self._trigger_delay_auto
def _set_trigger_delay_auto(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write('trigger:delay:auto %d' % int(value))
self._trigger_delay_auto = value
self._set_cache_valid()
def _get_trigger_source(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask("trigger:source?").lower()
value = [k for k,v in TriggerSourceMapping.items() if v==value][0]
self._trigger_source = value
self._set_cache_valid()
return self._trigger_source
def _set_trigger_source(self, value):
if value not in TriggerSourceMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":trigger:source %s" % TriggerSourceMapping[value])
self._trigger_source = value
def _measurement_abort(self):
if not self._driver_operation_simulate:
self._write(":abort")
def _measurement_fetch(self, max_time):
if not self._driver_operation_simulate:
return float(self._ask(":fetch?"))
return 0.0
def _measurement_initiate(self):
if not self._driver_operation_simulate:
self._write(":initiate")
def _measurement_is_out_of_range(self, value):
return self._measurement_is_over_range(value) or self._measurement_is_under_range(value)
def _measurement_is_over_range(self, value):
return value == 9.9e+37
def _measurement_is_under_range(self, value):
return value == -9.9e+37
def _measurement_read(self, max_time):
if not self._driver_operation_simulate:
return float(self._ask(":read?"))
return 0.0
class MultiPoint(dmm.MultiPoint):
"Extension IVI methods for DMMs capable of acquiring measurements based on multiple triggers"
def _get_trigger_measurement_complete_destination(self):
return self._trigger_measurement_complete_destination
def _set_trigger_measurement_complete_destination(self, value):
value = str(value)
self._trigger_measurement_complete_destination = value
def _get_trigger_multi_point_sample_count(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = int(self._ask("sample:count?"))
self._trigger_multi_point_sample_count = value
self._set_cache_valid()
return self._trigger_multi_point_sample_count
def _set_trigger_multi_point_sample_count(self, value):
value = int(value)
if not self._driver_operation_simulate:
self._write("sample:count %d" % value)
self._trigger_multi_point_sample_count = value
self._set_cache_valid()
def _get_trigger_multi_point_sample_interval(self):
return self._trigger_multi_point_sample_interval
def _set_trigger_multi_point_sample_interval(self, value):
value = int(value)
self._trigger_multi_point_sample_interval = value
def _get_trigger_multi_point_sample_trigger(self):
return self._trigger_multi_point_sample_trigger
def _set_trigger_multi_point_sample_trigger(self, value):
value = str(value)
self._trigger_multi_point_sample_trigger = value
def _get_trigger_multi_point_count(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
value = self._ask("trigger:count?")
if float(value) >= 9.9e37:
value = float('inf')
else:
value = int(float(value))
self._trigger_multi_point_count = value
self._set_cache_valid()
return self._trigger_multi_point_count
def _set_trigger_multi_point_count(self, value):
if float(value) >= 9.9e37 or float(value) == float('inf'):
value = float('inf')
else:
value = int(value)
if not self._driver_operation_simulate:
if value == float('inf'):
self._write("trigger:count inf")
else:
self._write("trigger:count %d" % value)
self._trigger_multi_point_count = value
self._set_cache_valid()
def _measurement_fetch_multi_point(self, max_time, num_of_measurements = 0):
if not self._driver_operation_simulate:
return self._ask_for_values(":fetch?", array=False)
return [0.0 for i in range(self._trigger_multi_point_count*self._trigger_multi_point_sample_count)]
def _measurement_read_multi_point(self, max_time, num_of_measurements = 0):
if not self._driver_operation_simulate:
return self._ask_for_values(":read?", array=False)
return [0.0 for i in range(self._trigger_multi_point_count*self._trigger_multi_point_sample_count)]
class SoftwareTrigger(dmm.SoftwareTrigger):
"Extension IVI methods for DMMs that can initiate a measurement based on a software trigger signal"
def _send_software_trigger(self):
if not self._driver_operation_simulate:
self._write("*trg")
|
|
#!/usr/bin/env python
#
# Copyright (c) 2016-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import multiprocessing.pool
import ctypes
import atexit
import sys
import os
from .api import *
from .api import __all__ as api__all
from .pool import *
from .pool import __all__ as pool__all
__all__ = ["Monkey", "is_active"] + api__all + pool__all
__doc__ = """
Python API for Intel(R) Threading Building Blocks (Intel(R) TBB)
extended with standard Python's pools implementation and monkey-patching.
Command-line interface example:
$ python -m tbb $your_script.py
Runs your_script.py in context of tbb.Monkey
"""
is_active = False
""" Indicates whether TBB context is activated """
ipc_enabled = False
""" Indicates whether IPC mode is enabled """
libirml = "libirml.so.1"
def _test(arg=None):
"""Some tests"""
import platform
if platform.system() == "Linux":
ctypes.CDLL(libirml)
assert 256 == os.system("ldd "+_api.__file__+"| grep -E 'libimf|libsvml|libintlc'")
from .test import test
test(arg)
print("done")
def tbb_process_pool_worker27(inqueue, outqueue, initializer=None, initargs=(),
maxtasks=None):
from multiprocessing.pool import worker
worker(inqueue, outqueue, initializer, initargs, maxtasks)
if ipc_enabled:
try:
librml = ctypes.CDLL(libirml)
librml.release_resources()
except:
print("Warning: Can not load ", libirml, file=sys.stderr)
class TBBProcessPool27(multiprocessing.pool.Pool):
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
from multiprocessing.util import debug
for i in range(self._processes - len(self._pool)):
w = self.Process(target=tbb_process_pool_worker27,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def __del__(self):
self.close()
for p in self._pool:
p.join()
def __exit__(self, *args):
self.close()
for p in self._pool:
p.join()
def tbb_process_pool_worker3(inqueue, outqueue, initializer=None, initargs=(),
maxtasks=None, wrap_exception=False):
from multiprocessing.pool import worker
worker(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)
if ipc_enabled:
try:
librml = ctypes.CDLL(libirml)
librml.release_resources()
except:
print("Warning: Can not load ", libirml, file=sys.stderr)
class TBBProcessPool3(multiprocessing.pool.Pool):
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
from multiprocessing.util import debug
for i in range(self._processes - len(self._pool)):
w = self.Process(target=tbb_process_pool_worker3,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild,
self._wrap_exception)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def __del__(self):
self.close()
for p in self._pool:
p.join()
def __exit__(self, *args):
self.close()
for p in self._pool:
p.join()
class Monkey:
"""
Context manager which replaces standard multiprocessing.pool
implementations with tbb.pool using monkey-patching. It also enables TBB
threading for Intel(R) Math Kernel Library (Intel(R) MKL). For example:
with tbb.Monkey():
run_my_numpy_code()
It allows multiple parallel tasks to be executed on the same thread pool
and coordinate number of threads across multiple processes thus avoiding
overheads from oversubscription.
"""
_items = {}
_modules = {}
def __init__(self, max_num_threads=None, benchmark=False):
"""
Create context manager for running under TBB scheduler.
:param max_num_threads: if specified, limits maximal number of threads
:param benchmark: if specified, blocks in initialization until requested number of threads are ready
"""
if max_num_threads:
self.ctl = global_control(global_control.max_allowed_parallelism, int(max_num_threads))
if benchmark:
if not max_num_threads:
max_num_threads = default_num_threads()
from .api import _concurrency_barrier
_concurrency_barrier(int(max_num_threads))
def _patch(self, class_name, module_name, obj):
m = self._modules[class_name] = __import__(module_name, globals(),
locals(), [class_name])
if m == None:
return
oldattr = getattr(m, class_name, None)
if oldattr == None:
self._modules[class_name] = None
return
self._items[class_name] = oldattr
setattr(m, class_name, obj)
def __enter__(self):
global is_active
assert is_active == False, "tbb.Monkey does not support nesting yet"
is_active = True
self.env_mkl = os.getenv('MKL_THREADING_LAYER')
os.environ['MKL_THREADING_LAYER'] = 'TBB'
self.env_numba = os.getenv('NUMBA_THREADING_LAYER')
os.environ['NUMBA_THREADING_LAYER'] = 'TBB'
if ipc_enabled:
if sys.version_info.major == 2 and sys.version_info.minor >= 7:
self._patch("Pool", "multiprocessing.pool", TBBProcessPool27)
elif sys.version_info.major == 3 and sys.version_info.minor >= 5:
self._patch("Pool", "multiprocessing.pool", TBBProcessPool3)
self._patch("ThreadPool", "multiprocessing.pool", Pool)
return self
def __exit__(self, exc_type, exc_value, traceback):
global is_active
assert is_active == True, "modified?"
is_active = False
if self.env_mkl is None:
del os.environ['MKL_THREADING_LAYER']
else:
os.environ['MKL_THREADING_LAYER'] = self.env_mkl
if self.env_numba is None:
del os.environ['NUMBA_THREADING_LAYER']
else:
os.environ['NUMBA_THREADING_LAYER'] = self.env_numba
for name in self._items.keys():
setattr(self._modules[name], name, self._items[name])
def init_sem_name():
try:
librml = ctypes.CDLL(libirml)
librml.set_active_sem_name()
librml.set_stop_sem_name()
except Exception as e:
print("Warning: Can not initialize name of shared semaphores:", e,
file=sys.stderr)
def tbb_atexit():
if ipc_enabled:
try:
librml = ctypes.CDLL(libirml)
librml.release_semaphores()
except:
print("Warning: Can not release shared semaphores",
file=sys.stderr)
def _main():
# Run the module specified as the next command line argument
# python -m TBB user_app.py
global ipc_enabled
import platform
import argparse
parser = argparse.ArgumentParser(prog="python -m tbb", description="""
Run your Python script in context of tbb.Monkey, which
replaces standard Python pools and threading layer of
Intel(R) Math Kernel Library by implementation based on
Intel(R) Threading Building Blocks. It enables multiple parallel
tasks to be executed on the same thread pool and coordinate
number of threads across multiple processes thus avoiding
overheads from oversubscription.
""", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
if platform.system() == "Linux":
parser.add_argument('--ipc', action='store_true',
help="Enable inter-process (IPC) coordination between Intel TBB schedulers")
parser.add_argument('-a', '--allocator', action='store_true',
help="Enable Intel TBB scalable allocator as a replacement for standard memory allocator")
parser.add_argument('--allocator-huge-pages', action='store_true',
help="Enable huge pages for Intel TBB allocator (implies: -a)")
parser.add_argument('-p', '--max-num-threads', default=default_num_threads(), type=int,
help="Initialize Intel TBB with P max number of threads per process", metavar='P')
parser.add_argument('-b', '--benchmark', action='store_true',
help="Block Intel TBB initialization until all the threads are created before continue the script. "
"This is necessary for performance benchmarks that want to exclude lazy scheduler initialization effects from the measurements")
parser.add_argument('-v', '--verbose', action='store_true',
help="Request verbose and version information")
parser.add_argument('-m', action='store_true', dest='module',
help="Executes following as a module")
parser.add_argument('name', help="Script or module name")
parser.add_argument('args', nargs=argparse.REMAINDER,
help="Command line arguments")
args = parser.parse_args()
if args.verbose:
os.environ["TBB_VERSION"] = "1"
if platform.system() == "Linux":
if args.allocator_huge_pages:
args.allocator = True
if args.allocator and not os.environ.get("_TBB_MALLOC_PRELOAD"):
libtbbmalloc_lib = 'libtbbmalloc_proxy.so.2'
ld_preload = 'LD_PRELOAD'
os.environ["_TBB_MALLOC_PRELOAD"] = "1"
preload_list = filter(None, os.environ.get(ld_preload, "").split(':'))
if libtbbmalloc_lib in preload_list:
print('Info:', ld_preload, "contains", libtbbmalloc_lib, "already\n")
else:
os.environ[ld_preload] = ':'.join([libtbbmalloc_lib] + list(preload_list))
if args.allocator_huge_pages:
assert platform.system() == "Linux"
try:
with open('/proc/sys/vm/nr_hugepages', 'r') as f:
pages = int(f.read())
if pages == 0:
print("TBB: Pre-allocated huge pages are not currently reserved in the system. To reserve, run e.g.:\n"
"\tsudo sh -c 'echo 2000 > /proc/sys/vm/nr_hugepages'")
os.environ["TBB_MALLOC_USE_HUGE_PAGES"] = "1"
except:
print("TBB: Failed to read number of pages from /proc/sys/vm/nr_hugepages\n"
"\tIs the Linux kernel configured with the huge pages feature?")
sys.exit(1)
os.execl(sys.executable, sys.executable, '-m', 'tbb', *sys.argv[1:])
assert False, "Re-execution failed"
sys.argv = [args.name] + args.args
ipc_enabled = platform.system() == "Linux" and args.ipc
os.environ["IPC_ENABLE"] = "1" if ipc_enabled else "0"
if ipc_enabled:
atexit.register(tbb_atexit)
init_sem_name()
if not os.environ.get("KMP_BLOCKTIME"): # TODO move
os.environ["KMP_BLOCKTIME"] = "0"
if '_' + args.name in globals():
return globals()['_' + args.name](*args.args)
else:
import runpy
runf = runpy.run_module if args.module else runpy.run_path
with Monkey(max_num_threads=args.max_num_threads, benchmark=args.benchmark):
runf(args.name, run_name='__main__')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpClientFailure(object):
"""HttpClientFailure operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head400(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 400 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get400(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 400 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put400(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 400 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch400(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 400 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post400(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 400 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete400(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 400 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/400'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head401(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 401 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/401'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get402(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 402 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/402'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get403(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 403 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/403'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put404(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 404 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/404'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch405(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 405 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/405'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post406(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 406 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/406'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete407(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 407 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/407'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put409(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 409 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/409'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head410(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 410 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/410'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get411(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 411 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/411'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get412(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 412 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/412'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def put413(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 413 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/413'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def patch414(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 414 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/414'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post415(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 415 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/415'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get416(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 416 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/416'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete417(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""
Return 417 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/417'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def head429(
self, custom_headers=None, raw=False, **operation_config):
"""
Return 429 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/client/429'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
|
import sys
from ..format import Layout
from .core import HTMLRuleBuilder, HTMLNode, make_joiner
from ..rulesets import basic
html_boxy = Layout()
html_boxy.layout = dict(
# Note: the classes "scalar", "sequence" and "empty" are added by
# rules in descr.ruleset.basic, so make sure to include that
# ruleset if you want to make your own layout with rules based on
# these classes.
# When the layout is applied, we add the rulesets open + close, so
# that rules can be added to open without overriding those in
# close. For instance classes meant for highlighting will be in
# close so that they get priority over all the others.
open = basic + HTMLRuleBuilder(
# Note: a class declaration like ".toplevel" will be
# inserted before every selector below, so that the rules'
# effect cannot "leak" outside of what we are displaying. So
# in effect, "" -> ".toplevel", ".{@str} > span" ->
# ".toplevel .{@str} > span", and so on.
# The following will therefore match the top level span:
("", {"display": "block"}),
# And this will match all spans inside that:
("span", {"display": "inline-block",
"vertical-align": "middle",
"font-family": "monospace"}),
# For text.
(".{text} *", {"display": "inline"}),
(".{pre} *", {"display": "inline",
"white-space": "pre"
}),
# scalar = @str, @int, @True, @False, ...
(".{scalar}", {"white-space": "pre",
"padding": "3px",
"margin": "3px",
"max-height": "300px",
"overflow": "auto"
}),
# we add a double quote "" before an empty string
(".{empty}.{@str}::before", {"content": '"\\"\\""'}),
(".{sequence}", {"padding": "3px",
"margin": "3px"}),
# For empty strings we insert the symbol for an empty set and
# we remove borders. If you wish to differentiate types of
# sequences visually using borders, including empty ones, you
# might need to disable the second rule, for example:
# (".empty.sequence", {"!clear": True})
(".{empty}.{sequence}::before", {"content": '"\\2205"'}),
# (".{empty}.{sequence}", {"border": "0px"}),
(".stack", {
"margin": "3px",
# Whitespace before "display" is a trick to have
# several entries for it in the CSS (since
# whitespace is significant here but not in the
# stylesheet). firefox and webkit will only see
# this property when prefixed with -moz- or
# -webkit-
"display": "inline-box",
" display": "-moz-inline-box",
" display": "-webkit-inline-box",
"-webkit-box-align": "middle",
"-moz-box-align": "middle",
"box-align": "middle",
"-webkit-box-orient": "horizontal",
"-moz-box-orient": "horizontal",
"box-orient": "horizontal",
}),
(".vstack", {
":+classes": "stack",
"-webkit-box-orient": "vertical",
"-moz-box-orient": "vertical",
"box-orient": "vertical",
}),
(".hstack", {
":+classes": "stack",
"-webkit-box-orient": "horizontal",
"-moz-box-orient": "horizontal",
"box-orient": "horizontal",
}),
(".{stack} > span", {"display": "block",
"margin": "0px",
# "width": "100%"
}),
# Note: to display key/value pairs horizontally you can change
# vstack to hstack. You might have to add a :-classes
# instruction to remove vstack if you try to do it
# programmatically.
(".{assoc}", {":join": make_joiner(HTMLNode({"assoc_separator"}, [])),
":+classes": "vstack",
}),
# Rules related to printing tracebacks
(".{@traceback}", {":+classes": "vstack",
":join": make_joiner(HTMLNode({"traceback_separator"}, []))}),
(".{@traceback} > *", {"display": "block"}),
(".source_excerpt", {":+classes": "vstack",
"width": "100%"
}),
(".source_excerpt > *", {"display": "block"}),
(".source_header", {"width": "98%"}),
(".source_header > .path, .source_header > .source_loc",
{"display": "block", "float": "right"}),
(".path, .source_loc, .{@frame} > .{+fname}", {":+classes": "scalar"}),
(".{+fname} + .path::before", {"content": '"in "'}),
(".path + .source_loc::before", {"content": '"@"',
"font-weight": "normal"}),
(".{source_code}", {":+classes": "pre"}),
(".lineno", {"padding-right": "5px",
"margin-right": "10px",
"display": "inline-block",
"text-align": "right"}),
# Quoting
(".quote.class_set > *", {"margin-left": "3px", "margin-right": "3px"}),
(".quote.class_set", {"padding": "3px", "margin": "3px"}),
(".quote.description", {"padding": "3px", "margin": "3px"}),
# Tables
(".table", {"display": "table",
"border-collapse": "collapse"}),
(".table > *", {"display": "table-row"}),
(".table > * > *", {"display": "table-cell",
"padding": "0px 10px 0px 10px"}),
(".table > .header", {"font-weight": "bold"}),
),
close = HTMLRuleBuilder(
(".{bold}", {"font-weight": "bold"}),
(".{black}", {"color": "#000"}),
(".{red}", {"color": "#f88"}),
(".{green}", {"color": "#8f8"}),
(".{yellow}", {"color": "#ff8"}),
(".{blue}", {"color": "#88f"}),
(".{magenta}", {"color": "#f8f"}),
(".{cyan}", {"color": "#8ff"}),
(".{white}", {"color": "#fff"}),
(".{hl}", {"font-weight": "bold"}),
(".{hl1}", {"font-weight": "bold"}),
(".{hl2}", {"font-weight": "bold"}),
(".{hl3}", {"font-weight": "bold"}),
(".{hlE}", {"font-weight": "bold"}),
(".{par}", {":wrap": lambda x: HTMLNode({}, [x], tag = p)}),
(".{line}", {":after": lambda a, b: [[{"raw"}, "<br/>"]]}),
(".{raw}", {":raw": True}),
))
# Rules for a "dark" theme (a black, or nearly black background is assumed).
html_boxy.styles["dark"] = dict(
open = HTMLRuleBuilder(
(".{scalar}", {"background-color": "#222"}),
(".{@True}", {"color": "#5f5"}),
(".{@False}", {"color": "#f55"}),
(".{@None}", {"color": "#a88"}),
(".{@int}", {"color": "#88f"}),
(".{@float}", {"color": "#88f"}),
(".{@complex}", {"color": "#88f"}),
(".{@str}", {"color": "#f88"}),
(".{empty}", {"color": "#888"}),
(".{sequence}", {"border": "2px solid #222"}),
(".{empty}.{sequence}", {"border": "2px solid #000"}),
(".{@tuple}:hover", {"border": "2px solid #888"}),
(".{@list}:hover", {"border": "2px solid #a44"}),
(".{@dict}:hover", {"border": "2px solid #484"}),
(".{@set}:hover", {"border": "2px solid #44a"}),
(".{fieldlist}", {"border-bottom": "2px solid #88f"}),
(".assoc:hover .assoc_separator", {"border": "2px solid #fff"}),
(".{assoc_separator}", { "border": "2px solid #888"}),
(".{objectlabel}", {"color": "#88f", "font-weight": "bold", ":+classes": "scalar"}),
(".{objectlabel} + .{assoc_separator}", { "border": "2px solid #88f"}),
(".{fieldlabel}", {"color": "#f88", ":+classes": "scalar"}),
# Traceback
(".{@traceback}", {"border": "1px dashed #888"}),
(".traceback_separator", {"border": "2px solid #888"}),
(".source_header", {"background-color": "#222"}),
(".{+fname} + .path::before", {"color": "#aaa"}),
(".path + .source_loc::before", {"color": "#aaa"}),
(".lineno", {"border-right": "4px solid #88f"}),
(".{@Exception} .{objectlabel}", {"color": "#f00", "text-align": "left"}),
(".{@Exception} .{objectlabel} + .{assoc_separator}", {"border": "2px solid #f88"}),
(".{@Exception} .{fieldlist}", {"border-bottom": "4px solid #f88"}),
(".exception_message", {"display": "block"}),
# Quoting
(".quote.class_set > *", {"background-color": "#222", "color": "#88f"}),
(".quote.class_set", {"border": "2px solid #88f"}),
(".quote.description", {"border": "2px solid #888"}),
(".quote.description:hover", {"border": "2px solid #fff"}),
# Table
(".table", {"border": "1px solid #fff"}),
(".table > .header", {"border-bottom": "2px solid #fff"}),
# HTMLNode
(".{@HTMLNode} > .{+classes}", {"background-color": "#222"}),
(".{@HTMLNode} > .{+classes} > *", {"color": "#88f", "padding": "3px"}),
),
close = HTMLRuleBuilder(
(".{hl}", {"font-weight": "bold"}),
(".{hl1}", {"color": "#ff8", "background-color": "#220"}),
(".{hl2}", {"color": "#8f8", "background-color": "#020"}),
(".{hl3}", {"color": "#88f", "background-color": "#003"}),
(".{hlE}", {"color": "#f88", "background-color": "#300"}),
(".hl.empty::before, .hl1.empty::before, .hl2.empty::before, .hl3.empty::before, .hlE.empty::before", {
"content": '"\\25B6"',
}),
))
# Rules for a "light" theme (a white, or nearly white background is assumed).
html_boxy.styles["light"] = dict(
open = HTMLRuleBuilder(
(".{scalar}", {"background-color": "#eee"}),
(".{@True}", {"color": "#080"}),
(".{@False}", {"color": "#f00"}),
(".{@None}", {"color": "#555"}),
(".{@int}", {"color": "#00a"}),
(".{@float}", {"color": "#00a"}),
(".{@complex}", {"color": "#00a"}),
(".{@str}", {"color": "#a00"}),
(".{empty}", {"color": "#888"}),
(".{sequence}", {"border": "2px solid #eee"}),
(".{empty}.{sequence}", {"border": "2px solid #fff"}),
(".{@tuple}:hover", {"border": "2px solid #bbb"}),
(".{@list}:hover", {"border": "2px solid #f88"}),
(".{@dict}:hover", {"border": "2px solid #6a6"}),
(".{@set}:hover", {"border": "2px solid #88f"}),
(".{fieldlist}", {"border-bottom": "2px solid #00f"}),
(".assoc:hover > .assoc_separator", {"border": "2px solid #000"}),
(".{assoc_separator}", {"border": "2px solid #888"}),
(".{objectlabel}", {"color": "#00f", "font-weight": "bold", ":+classes": "scalar"}),
(".{objectlabel} + .{assoc_separator}", { "border": "2px solid #00f"}),
(".{fieldlabel}", {"color": "#a00", ":+classes": "scalar"}),
# Traceback
(".{@traceback}", {"border": "1px dashed #888"}),
(".traceback_separator", {"border": "2px solid #888"}),
(".source_header", {"background-color": "#eee"}),
(".{+fname} + .path::before", {"color": "#666"}),
(".path + .source_loc::before", {"color": "#666"}),
(".lineno", {"border-right": "4px solid #00f"}),
(".{@Exception} .{objectlabel}", {"color": "#f00", "text-align": "left"}),
(".{@Exception} .{objectlabel} + .{assoc_separator}", {"border": "2px solid #800"}),
(".{@Exception} .{fieldlist}", {"border-bottom": "4px solid #800"}),
(".exception_message", {"display": "block"}),
# Quoting
(".quote.class_set > *", {"background-color": "#eee", "color": "blue"}),
(".quote.class_set", {"border": "2px solid blue"}),
(".quote.description", {"border": "2px solid #aaa"}),
(".quote.description:hover", {"border": "2px solid #000"}),
# Table
(".table", {"border": "1px solid #000"}),
(".table > .header", {"border-bottom": "2px solid #000"}),
# HTMLNode
(".{@HTMLNode} > .{+classes}", {"background-color": "#eee"}),
(".{@HTMLNode} > .{+classes} > *", {"color": "#00f", "padding": "3px"}),
),
close = HTMLRuleBuilder(
(".{hl}", {"font-weight": "bold"}),
(".{hl1}", {"color": "#00f", "background-color": "#eef"}),
(".{hl2}", {"color": "#0a0", "background-color": "#efe"}),
(".{hl3}", {"color": "#a60", "background-color": "#efc"}),
(".{hlE}", {"color": "#f00", "background-color": "#fee"}),
)
)
|
|
'''
synbiochem (c) University of Manchester 2015
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=no-member
# pylint: disable=redundant-unittest-assert
# pylint: disable=too-many-public-methods
import getpass
import os
import unittest
from synbiochem.utils import ice_utils, sbol_utils
from synbiochem.utils.ice_utils import ICEClient, ICEEntry
class TestICEEntry(unittest.TestCase):
'''Test class for ICEEntry.'''
@classmethod
def setUpClass(cls):
try:
std_input = raw_input
except NameError:
std_input = input
ice_url = std_input('ICE url: ')
username = std_input('ICE username: ')
password = getpass.getpass(prompt='ICE password: ')
cls.__ice_client = ICEClient(ice_url, username, password)
def test_get_ice_number(self):
'''Tests get_ice_number method.'''
ice_entry1 = ICEEntry(typ='PLASMID')
self.__ice_client.set_ice_entry(ice_entry1)
self.assertNotEqual(ice_entry1.get_ice_number(), None)
ice_entry2 = ICEEntry(metadata={'type': 'PLASMID'})
self.__ice_client.set_ice_entry(ice_entry2)
self.assertNotEqual(ice_entry2.get_ice_number(), None)
def test_get_ice_number_none(self):
'''Tests get_ice_number method.'''
ice_entry1 = ICEEntry(typ='PLASMID')
self.assertEqual(ice_entry1.get_ice_number(), None)
ice_entry2 = ICEEntry(metadata={'type': 'PLASMID'})
self.assertEqual(ice_entry2.get_ice_number(), None)
def test_get_record_id(self):
'''Tests get_record_id method.'''
ice_entry1 = ICEEntry(typ='PLASMID')
self.__ice_client.set_ice_entry(ice_entry1)
self.assertNotEqual(ice_entry1.get_record_id(), None)
ice_entry2 = ICEEntry(metadata={'type': 'PLASMID'})
self.__ice_client.set_ice_entry(ice_entry2)
self.assertNotEqual(ice_entry2.get_record_id(), None)
def test_get_record_id_none(self):
'''Tests get_record_id method.'''
ice_entry1 = ICEEntry(typ='PLASMID')
self.assertEqual(ice_entry1.get_record_id(), None)
ice_entry2 = ICEEntry(metadata={'type': 'PLASMID'})
self.assertEqual(ice_entry2.get_record_id(), None)
def test_get_type(self):
'''Tests get_type method.'''
ice_entry1 = ICEEntry(typ='PLASMID')
self.assertEqual(ice_entry1.get_type(), 'PLASMID')
ice_entry2 = ICEEntry(metadata={'type': 'PLASMID'})
self.assertEqual(ice_entry2.get_type(), 'PLASMID')
def test_get_metadata(self):
'''Tests get_metadata method.'''
ice_entry1 = ICEEntry(typ='PLASMID')
self.assertEqual(ice_entry1.get_metadata()['type'], 'PLASMID')
ice_entry2 = ICEEntry(metadata={'type': 'PLASMID'})
self.assertEqual(ice_entry2.get_metadata()['type'], 'PLASMID')
def test_get_sbol_doc(self):
'''Tests get_sbol_doc method.'''
dna = _read('sbol.xml')
ice_entry = ICEEntry(typ='PLASMID', dna=dna)
self.assertEqual(ice_entry.get_dna(), dna)
def test_set_value(self):
'''Tests set_value method.'''
ice_entry1 = ICEEntry(typ='PLASMID')
ice_entry1.set_value('creator', 'God')
self.assertEqual(ice_entry1.get_metadata()['type'], 'PLASMID')
self.assertEqual(ice_entry1.get_metadata()['creator'], 'God')
self.__ice_client.set_ice_entry(ice_entry1)
ice_entry2 = self.__ice_client.get_ice_entry(
ice_entry1.get_ice_number())
ice_entry2.set_value('creator', 'Aitor Karanka')
self.__ice_client.set_ice_entry(ice_entry2)
ice_entry3 = self.__ice_client.get_ice_entry(
ice_entry1.get_ice_number())
self.assertEqual(ice_entry3.get_metadata()['creator'], 'Aitor Karanka')
ice_entry4 = ICEEntry(metadata={'type': 'PLASMID'})
ice_entry4.set_value('creator', 'God')
self.assertEqual(ice_entry4.get_metadata()['type'], 'PLASMID')
self.assertEqual(ice_entry4.get_metadata()['creator'], 'God')
def test_set_values(self):
'''Tests set_values method.'''
ice_entry1 = ICEEntry(typ='PLASMID')
ice_entry1.set_values({'creator': 'God', 'name': 'Test'})
self.assertEqual(ice_entry1.get_metadata()['type'], 'PLASMID')
self.assertEqual(ice_entry1.get_metadata()['creator'], 'God')
self.assertEqual(ice_entry1.get_metadata()['name'], 'Test')
ice_entry2 = ICEEntry(metadata={'type': 'PLASMID'})
ice_entry2.set_values({'creator': 'God', 'name': 'Test'})
self.assertEqual(ice_entry2.get_metadata()['type'], 'PLASMID')
self.assertEqual(ice_entry2.get_metadata()['creator'], 'God')
self.assertEqual(ice_entry2.get_metadata()['name'], 'Test')
def test_set_dna(self):
'''Tests set_dna method.'''
dna1 = _read('sbol.xml')
dna2 = _read('sbol2.xml')
ice_entry = ICEEntry(typ='PLASMID', dna=dna1)
self.__ice_client.set_ice_entry(ice_entry)
ice_entry.set_dna(dna2)
self.__ice_client.set_ice_entry(ice_entry)
self.assertEqual(ice_entry.get_seq(), dna2['seq'])
def test_get_seq(self):
'''Tests get_seq method.'''
dna = _read('sbol.xml')
ice_entry = ICEEntry(typ='PLASMID', dna=dna)
self.__ice_client.set_ice_entry(ice_entry)
ice_entry = self.__ice_client.get_ice_entry(ice_entry.get_ice_id())
self.assertEqual(ice_entry.get_seq(), dna['seq'])
def test_set_get_parameter(self):
'''Tests set/get_parameter method.'''
ice_entry = ICEEntry(typ='PLASMID')
ice_entry.set_parameter('Cheese', 'Brie')
self.__ice_client.set_ice_entry(ice_entry)
ice_entry = self.__ice_client.get_ice_entry(ice_entry.get_ice_id())
self.assertEqual(ice_entry.get_parameter('Cheese'), 'Brie')
class TestICEClient(unittest.TestCase):
'''Test class for ICEClient.'''
@classmethod
def setUpClass(cls):
try:
std_input = raw_input
except NameError:
std_input = input
ice_url = std_input('ICE url: ')
username = std_input('ICE username: ')
password = getpass.getpass(prompt='ICE password: ')
cls.__ice_client = ICEClient(ice_url, username, password)
def test_get_ice_entry(self):
'''Tests get_ice_entry method.'''
dna = _read('sbol.xml')
ice_entry_in = ICEEntry(typ='PART', dna=dna)
self.__ice_client.set_ice_entry(ice_entry_in)
ice_entry_out = self.__ice_client.get_ice_entry(
ice_entry_in.get_ice_number())
self.assertEqual(ice_entry_out.get_seq(), dna['seq'])
def test_set_ice_entry(self):
'''Tests set_ice_entry method.'''
dna_in = _read('sbol.xml')
ice_entry_in = ICEEntry(typ='PLASMID', dna=dna_in)
self.__ice_client.set_ice_entry(ice_entry_in)
ice_entry_out = self.__ice_client.get_ice_entry(
ice_entry_in.get_ice_number())
self.assertNotEqual(ice_entry_out, None)
def test_do_blast(self):
'''Tests do_blast method.'''
result = self.__ice_client.do_blast('tcgagaattcaaaagatctgagataggtaga' +
'agctagacgagaaaccttcccaatcttatca' +
'ttacgaaaggacgtccctatgagcctgatta')
self.assertTrue(result['resultCount'] > 0)
def test_do_blast_2(self):
'''Tests do_blast method.'''
result = self.__ice_client.do_blast('aggcaaattcagtgaggctgacttctcatct' +
'taaatagttcccttcacgatagccgcctga')
self.assertTrue(result['resultCount'] > 0)
def test_get_ice_entries_by_seq(self):
'''Tests get_ice_entries_by_seq method.'''
dna = _read('sbol.xml')
# dna.set_seq(sequence_utils.get_random_dna(4096))
ice_entry = ICEEntry(typ='PLASMID', dna=dna)
self.__ice_client.set_ice_entry(ice_entry)
self.__ice_client.reconnect()
result = self.__ice_client.get_ice_entries_by_seq(dna['seq'])
self.assertTrue(len(result) > 0)
def test_get_ice_entries_by_seq_2(self):
'''Tests get_ice_entries_by_seq method.'''
seq = 'aggcaaattcagtgaggctgacttctcatcttaaatagttcccttcacgatagccgcctga'
result = self.__ice_client.get_ice_entries_by_seq(seq)
self.assertTrue(len(result) > 0)
def test_add_link(self):
'''Tests add_link method.'''
dna1 = _read('sbol.xml')
dna2 = _read('sbol2.xml')
ice_entry1 = ICEEntry(typ='PLASMID', dna=dna1)
self.__ice_client.set_ice_entry(ice_entry1)
ice_entry2 = ICEEntry(typ='PLASMID', dna=dna2)
self.__ice_client.set_ice_entry(ice_entry2)
self.__ice_client.add_link(
ice_entry1.get_ice_number(), ice_entry2.get_ice_number())
# "Refresh" (update metadata)
ice_entry1 = self.__ice_client.get_ice_entry(ice_entry1.get_ice_id())
ice_entry2 = self.__ice_client.get_ice_entry(ice_entry2.get_ice_id())
self.assertTrue(ice_entry1.get_ice_number() in
[par['id']
for par in ice_entry2.get_metadata()['parents']])
self.assertTrue(ice_entry2.get_ice_number() in
[link['id']
for link in ice_entry1.get_metadata()['linkedParts']])
def test_search_groups(self):
'''Tests get_group_id and search_groups method.'''
groups = self.__ice_client.get_groups()
for name in groups:
groups = self.__ice_client.search_groups(name[:-1])
self.assertTrue(name in [grp['label'] for grp in groups])
def test_add_permission(self):
'''Tests add_permission method.'''
dna = _read('sbol.xml')
ice_entry = ICEEntry(typ='PLASMID', dna=dna)
self.__ice_client.set_ice_entry(ice_entry)
groups = self.__ice_client.get_groups()
for group_num in groups.values():
self.__ice_client.add_permission(ice_entry.get_ice_id(), group_num)
# If test progresses to here, it has succeeded:
self.assertTrue(True)
def test_search(self):
'''Tests search method.'''
resp = self.__ice_client.search('PLASMID')
# If test progresses to here, it has succeeded:
self.assertTrue(resp['resultCount'] > 0)
def test_search_name(self):
'''Tests advanced search by name method.'''
resp = self.__ice_client.search_name('SBC_DE15_PL01', 'PLASMID')
# If test progresses to here, it has succeeded:
self.assertTrue(resp)
def test_search_design(self):
'''Tests advanced search by name method.'''
resp = self.__ice_client.search_design(15)
# If test progresses to here, it has succeeded:
self.assertTrue(len(resp) == 46)
def test_advanced_search(self):
'''Tests search method.'''
typ = 'PLASMID'
resp = self.__ice_client.advanced_search('*', typ)
self.assertTrue(len(resp['results']) == 5)
for result in resp['results']:
self.assertTrue(result['entryInfo']['type'] == typ)
def test_get_genbank(self):
'''Tests get_genbank method.'''
resp = self.__ice_client.get_genbank(6592)
self.assertTrue(resp)
class Test(unittest.TestCase):
'''Test class for ice_utils.'''
def test_get_ice_number(self):
'''Tests get_ice_number method.'''
self.assertEqual(ice_utils.get_ice_number('TEST00063', 'TEST'), '63')
self.assertEqual(ice_utils.get_ice_number('TEST63', 'TEST'), '63')
self.assertEqual(ice_utils.get_ice_number('63', 'TEST'), '63')
self.assertEqual(ice_utils.get_ice_number(63, 'TEST'), '63')
def _read(filename):
'''Reads sbol file.'''
directory = os.path.dirname(os.path.realpath(__file__))
return sbol_utils.read(os.path.join(directory, filename))
if __name__ == "__main__":
unittest.main()
|
|
# Copyright (c) 2012, Daniel Zerbino
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3)The name of the author may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
"""Representing the molecular tree underlying the clonal tree"""
import math
import cnavg.history.debug
from numpy import array
from numpy import ones
import scipy.stats.poisson as poisson
PARAMETER = float(50)
###################################################
## Copy number change
###################################################
def computeCopyNumberChange(event, overlaps):
if event in overlaps:
if event.cycle[overlaps[event][0]].value > 0:
return -len(overlaps[event]) * math.ceil(abs(event.cycle[0].value))
else:
return len(overlaps[event]) * math.ceil(abs(event.cycle[0].value))
else:
return 0
###################################################
## Enumerating CNV SliceTrees
###################################################
class CNVSliceTree(list):
def __init__(self, iter, count, event, prior=None):
super(CNVSliceTree, self).__init__(iter)
self.count = count
self.event = event
self.prior = prior
# Because list not hashable
def __hash__(self):
return id(self)
def __or__(self, other):
return CNVSliceTree(super(CNVSliceTree, self).__add__(other), self.count * other.count, self.event)
def nodes(self):
# Careful, nodes can be carried over across slices, so set() removes doubles
return set(sum(map(list, self), []))
def originators(self):
return filter(lambda X: X.parent is None, self.nodes())
def organizeByEvents(eventSliceTrees, sliceTrees):
if sliceTrees[0].event in eventSliceTrees:
eventSliceTrees[sliceTrees[0].event].extend(sliceTrees)
else:
eventSliceTrees[sliceTrees[0].event] = sliceTrees
return eventSliceTrees
def crossProduct(list, newList):
if len(list) == 0:
return newList
else:
return [X | Y for X in list for Y in newList]
class CNVSlice(set):
def __init__(self, iter, parent, count, event):
super(CNVSlice, self).__init__(iter)
self.count = count
self.weight = float(sum(X.copynumber for X in self))
if parent is not None:
parent.children.append(self)
self.children = []
self.event = event
# Because set not hashable
def __hash__(self):
return id(self)
def __eq__(self, other):
return id(self) == id(other)
def extractSliceTrees(self):
if len(self.children) == 0:
return [CNVSliceTree([self], self.count, self.event)]
elif len(self.children) == 1:
return [CNVSliceTree(list(X) + [self], X.count * self.count, self.event) for X in self.children[0].extractSliceTrees()]
else:
# Recursion
childSliceTrees = map(lambda X: X.extractSliceTrees(), self.children)
# Organize by creator event to avoid self-products
childEventSliceTrees = reduce(organizeByEvents, childSliceTrees, dict())
# Cardinal product of child SliceTrees
sliceTrees = reduce(crossProduct, childEventSliceTrees.values(), [])
# Iterative step
return [CNVSliceTree(list(X) + [self], X.count * self.count, self.event) for X in sliceTrees]
def cnvNodeWeight(self, cnvNode):
if self.weight == 0:
return 1
else:
return cnvNode.copynumber / self.weight
def add(self, element):
super(CNVSlice, self).add(element)
self.weight += element.copynumber
def remove(self, element):
super(CNVSlice, self).remove(element)
self.weight -= element.copynumber
class CNVNode(object):
def __init__(self, copynumber, creator, parent):
self.copynumber = copynumber
self.creator = creator
self.parent = parent
def shadow(self):
return CNVNode(self.copynumber - 1, None, self)
def __str__(self):
return "\t".join(map(str, [self.copynumber, self.creator, self.parent.event]))
def newHistorySlice_Node(previousSlice, event, change, cnvNode):
if cnvNode.copynumber == 0:
return []
else:
res = CNVSlice(previousSlice, previousSlice, cnvNode.copynumber, event)
res.remove(cnvNode)
if cnvNode.copynumber - 1 > 0:
res.add(cnvNode.shadow())
if change > 0:
res.add(CNVNode(change + 1, event, cnvNode))
else:
res.add(CNVNode(0, event, cnvNode))
return [res]
def newHistorySlice(previousSlice, event, change):
return sum((newHistorySlice_Node(previousSlice, event, change, cnvNode) for cnvNode in previousSlice), [])
def virginBirth(event, change):
if change < 0:
return CNVNode(0, event, None)
elif change > 0:
return CNVNode(change + 1, event, None)
else:
assert False
def newHistorySlices(previousSlices, event, change):
if change == 0:
return [CNVSlice(X, X, 1, event) for X in previousSlices]
else:
res = sum((newHistorySlice(slice, event, change) for slice in previousSlices), [])
if len(res) > 0:
return res
else:
# If all the nodes have copy number 0, then the result is an empty list the history is messed up but we
# try to pick up the pieces as we can
return [CNVSlice(list(slice) + [virginBirth(event, change)], slice, 1, event) for slice in previousSlices]
def getChildren(children, node):
if node.parent is not None:
children[node.parent].append(node)
return children
def updateLikelihoods(likelihoods, sliceTree, data):
if data[0] not in likelihoods:
likelihoods[data[0]] = dict()
if sliceTree not in likelihoods[data[0]]:
likelihoods[data[0]][sliceTree] = dict()
likelihoods[data[0]][sliceTree][event] = data[1]
return likelihoods
###################################################
## CNVTree
###################################################
class CNVTree(object):
###################################################
## Overall construction
###################################################
def __init__(self, cactusHistory, overlaps, originalCopyNumber):
# Recoding atomic info
events = cactusHistory.parent.keys()
self.cactusHistory = cactusHistory
if overlaps is None:
self.copyNumberChanges = None
else:
self.copyNumberChanges = dict((event, computeCopyNumberChange(event, overlaps)) for event in events)
self.impactChanges = dict((event, event.ratio * self.copyNumberChanges[event]) for event in events)
# Reconstructing possible molecular histories
self.rootNode = CNVNode(originalCopyNumber, None, None)
self.rootSlice = CNVSlice([self.rootNode], None, 1, None)
map(lambda X: self.computeHistorySlices(X,[self.rootSlice]), cactusHistory.roots)
preSliceTrees = self.rootSlice.extractSliceTrees()
totalSliceTreeCount = float(sum(X.count for X in preSliceTrees))
self.sliceTrees = [CNVSliceTree(list(X), X.count, X.event, X.count/totalSliceTreeCount) for X in preSliceTrees]
# Computing expected impacts
self.children = dict((X, self.getSliceTreeChildren(X)) for X in self.sliceTrees)
self.cumulativeImpactChanges = dict((sliceTree, self.cumulativeImpactChanges_SliceTree(sliceTree)) for sliceTree in self.sliceTrees)
self.totalImpacts = dict((sliceTree, self.totalImpact(sliceTree)) for sliceTree in self.sliceTrees)
###################################################
## Building the damn thing
###################################################
def computeHistorySlices(self, event, previousSlices):
eventSlices = newHistorySlices(previousSlices, event, self.copyNumberChanges[event])
map(lambda X: self.computeHistorySlices(X, eventSlices), self.cactusHistory.children[event])
def getSliceTreeChildren(self, sliceTree):
return reduce(getChildren, sliceTree.nodes(), dict((node, []) for node in sliceTree.nodes()))
def cumulativeImpactChanges_CNVNode(self, cumulativeImpactChanges, cnvNode, sliceTree):
cumulativeImpactChanges = reduce(lambda X, Y: self.cumulativeImpactChanges_CNVNode(X,Y,sliceTree), self.children[sliceTree][cnvNode], cumulativeImpactChanges)
cumulativeImpactChanges[cnvNode] = sum(cumulativeImpactChanges[X] for X in self.children[sliceTree][cnvNode]) + sum(C.creator.ratio * self.copyNumberChanges[C.creator] for C in self.children[sliceTree][cnvNode] if C.creator is not None)
return cumulativeImpactChanges
def cumulativeImpactChanges_SliceTree(self, sliceTree):
return reduce(lambda X, Y: self.cumulativeImpactChanges_CNVNode(X,Y,sliceTree), sliceTree.originators(), dict())
def totalImpact_Node(self, sliceTree, cnvNode):
if cnvNode.creator is not None:
return max(cnvNode.creator.ratio + self.cumulativeImpactChanges[sliceTree][cnvNode], 0)
else:
return max(1 + self.cumulativeImpactChanges[sliceTree][self.rootNode], 0)
def totalImpact(self, sliceTree):
return max(sum(self.totalImpact_Node(sliceTree, cnvNode) for cnvNode in sliceTree.originators()), 0.00001)
###################################################
## Likelihoods
###################################################
def expectedImpact(self, slice, sliceTree, cnvNode):
if slice.event is not None:
return max(slice.event.ratio + self.cumulativeImpactChanges[sliceTree][cnvNode], 0)
else:
return max(1 + self.cumulativeImpactChanges[sliceTree][self.rootNode], 0)
def expectedRatio(self, slice, sliceTree, cnvNode):
return self.expectedImpact(slice, sliceTree, cnvNode) / float(self.totalImpacts[sliceTree])
def expectedCoverages(self, cnvNode, slice, sliceTree, totals):
return FloatVector(self.expectedRatio(slice, sliceTree, cnvNode) * totals)
# Poisson likelihoods (R function then numpy array)
def conditionedLikelihoods(self, cnvNode, slice, sliceTree, counts, totals):
return poisson.cdf(counts, self.expectedCoverages(cnvNode, slice, sliceTree, totals))
def segregatingLikelihood(self, vals):
# TODO The likelihood function of a segregating SNP could be refined
if vals[1] == 0:
return 0
else:
return PARAMETER * math.exp(-(vals[0]/float(vals[1])) * PARAMETER)
def segregatingLikelihoodArray(self, counts, totals):
return map(self.segregatingLikelihood, zip(counts, totals))
# Weighted sum
def likelihoodArray(self, slice, sliceTree, counts, totals):
if slice is self.rootSlice:
return self.segregatingLikelihoodArray(counts,totals)
else:
return sum(slice.cnvNodeWeight(cnvNode) * self.conditionedLikelihoods(cnvNode, slice, sliceTree, counts, totals) for cnvNode in slice)
# Going through events
# Returns dictionary: snv -> likelihood
def sliceLikelihoods(self, slice, sliceTree, snvs, counts, totals):
return dict(zip(snvs, self.likelihoodArray(slice, sliceTree, counts, totals)))
# Turns event -> snv -> likelihood
# Into array -> likelihood
def snvLikelihoods(self, snv, timings, eventLikelihoods):
return array([eventLikelihoods[timing][snv] for timing in timings])
# Going through slices
# Returns dictionary: snv -> array -> likelihood
def sliceTreeLikelihoods(self, sliceTree, snvs, timings, counts, totals):
# Computing sliceTree stats
eventLikelihoods = dict((slice.event, self.sliceLikelihoods(slice, sliceTree, snvs, counts, totals)) for slice in sliceTree)
return dict((snv, self.snvLikelihoods(snv, timings, eventLikelihoods)) for snv in snvs)
def defaultLikelihoods_SNV(self, timing, counts, totals):
if timing is None:
return poisson.cdf(counts, totals)
else:
return array(dpois(counts, FloatVector(timing.ratio * totals)))
# Returns dictionary: snv -> array -> likelihood
def defaultLikelihoods_Event(self, snvs, timing, counts, totals):
return dict(zip(snvs, self.defaultLikelihoods_SNV(timing, counts, totals)))
# Returns dictionary: snv -> array -> likelihood
def defaultLikelihoods(self, snvs, timings, counts, totals):
eventLikelihoods = dict((timing, self.defaultLikelihoods_Event(snvs, timing, counts, totals)) for timing in timings)
return dict((snv, self.snvLikelihoods(snv, timings, eventLikelihoods)) for snv in snvs)
# Returns dictionary: sliceTree -> snv -> array -> likelihood
def likelihoods(self, counts_list, totals_list, snvs, events, phasedRatio):
timings = events + [None]
# Creating objects to avoid multiple redundant creations
counts = IntVector(counts_list)
totals = array(totals_list) * phasedRatio
if self.copyNumberChanges is None:
return dict([(CNVSliceTree([], 1, None, 1), self.defaultLikelihoods(snvs, timings, counts, totals))])
else:
return dict((sliceTree, self.sliceTreeLikelihoods(sliceTree, snvs, timings, counts, totals)) for sliceTree in self.sliceTrees)
###################################################
## Convenience
###################################################
def eventString(self, event):
string = '%i [label="%i,%f,%f,%f,%f"]' % (id(event), self.copyNumberChanges[event], event.ratio, self.impactChanges[event], self.cumulativeImpactChanges[event])
string += "\n" + "\n".join("%i -> %i" % (id(event), id(X)) for X in self.cactusHistory.children[event])
return string
def __str__(self):
string = "digraph G {\n"
string += "\n".join(map(self.eventString, self.cactusHistory.parent.keys())) + "\n"
string += "}\n"
return string
##############################################
## Unit testing
##############################################
def main():
sys.setrecursionlimit(5000)
if len(sys.argv) == 1:
G = avg.randomNearEulerianGraph(10)
C = cactus.Cactus(G)
N = normalizedCactus.NormalizedCactus(C)
O = oriented.OrientedCactus(N)
H = cycleCover.initialHistory(O)
else:
file = open(sys.argv[1])
H = None
while True:
try:
tmp = pickle.load(file)
if tmp is not None:
H = tmp
except:
break
file.close()
FH = flattened.flattenGraph(H)
S = FH.simplifyStubsAndTrivials()
F = S.removeLowRatioEvents(debug.RATIO_CUTOFF)
if len(sys.argv) == 1:
print F
overlapTable = dict()
for e in F.parent:
overlapTable = overlap.addEventToOverlapTable(overlapTable, e)
counts = []
totalCounts = []
for edgeIndex in overlapTable.keys():
if edgeIndex[2] > -1:
t = CNVTree(F, overlapTable[edgeIndex], 1)
counts.append(len(t.sliceTrees))
totalCounts.append(sum(X.count for X in t.sliceTrees))
print [len(X.originators()) for X in t.sliceTrees]
print [len(X.nodes()) for X in t.sliceTrees]
print "\n".join(["\t".join(X) for X in zip(map(str, counts), map(str, totalCounts))])
if __name__ == "__main__":
import sys
import cPickle as pickle
import cnavg.avg.graph as avg
import cnavg.cactus.graph as cactus
import cnavg.cactus.oriented as oriented
import cnavg.cycleSampling.cycleCover as cycleCover
import cnavg.cactusSampling.sampling as normalizedCactus
import cnavg.history.flattened as flattened
import cnavg.history.overlap as overlap
main()
|
|
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod
"""GUI font inspector.
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools import misc, ttLib, cffLib
import pygtk
pygtk.require('2.0')
import gtk
import sys
class Row(object):
def __init__(self, parent, index, key, value, font):
self._parent = parent
self._index = index
self._key = key
self._value = value
self._font = font
if isinstance(value, ttLib.TTFont):
self._add_font(value)
return
if not isinstance(value, basestring):
# Try sequences
is_sequence = True
try:
len(value)
iter(value)
# It's hard to differentiate list-type sequences
# from dict-type ones. Try fetching item 0.
value[0]
except (TypeError, AttributeError, KeyError, IndexError):
is_sequence = False
if is_sequence:
self._add_list(key, value)
return
if hasattr(value, '__dict__'):
self._add_object(key, value)
return
if hasattr(value, 'items'):
self._add_dict(key, value)
return
if isinstance(value, basestring):
self._value_str = '"'+value+'"'
self._children = []
return
# Everything else
self._children = []
def _filter_items(self):
items = []
for k,v in self._items:
if isinstance(v, ttLib.TTFont):
continue
if k in ['reader', 'file', 'tableTag', 'compileStatus', 'recurse']:
continue
if isinstance(k, basestring) and k[0] == '_':
continue
items.append((k,v))
self._items = items
def _add_font(self, font):
self._items = [(tag,font[tag]) for tag in font.keys()]
def _add_object(self, key, value):
# Make sure item is decompiled
try:
value["asdf"]
except (AttributeError, KeyError, TypeError, ttLib.TTLibError):
pass
if isinstance(value, ttLib.getTableModule('glyf').Glyph):
# Glyph type needs explicit expanding to be useful
value.expand(self._font['glyf'])
if isinstance(value, misc.psCharStrings.T2CharString):
try:
value.decompile()
except TypeError: # Subroutines can't be decompiled
pass
if isinstance(value, cffLib.BaseDict):
for k in value.rawDict.keys():
getattr(value, k)
if isinstance(value, cffLib.Index):
# Load all items
for i in range(len(value)):
value[i]
# Discard offsets as should not be needed anymore
if hasattr(value, 'offsets'):
del value.offsets
self._value_str = value.__class__.__name__
if isinstance(value, ttLib.tables.DefaultTable.DefaultTable):
self._value_str += ' (%d Bytes)' % self._font.reader.tables[key].length
self._items = sorted(value.__dict__.items())
self._filter_items()
def _add_dict(self, key, value):
self._value_str = '%s of %d items' % (value.__class__.__name__, len(value))
self._items = sorted(value.items())
def _add_list(self, key, value):
if len(value) and len(value) <= 32:
self._value_str = str(value)
else:
self._value_str = '%s of %d items' % (value.__class__.__name__,
len(value))
self._items = list(enumerate(value))
def __len__(self):
if hasattr(self, '_children'):
return len(self._children)
if hasattr(self, '_items'):
return len(self._items)
assert False
def _ensure_children(self):
if hasattr(self, '_children'):
return
children = []
for i,(k,v) in enumerate(self._items):
children.append(Row(self, i, k, v, self._font))
self._children = children
del self._items
def __getitem__(self, n):
if n >= len(self):
return None
if not hasattr(self, '_children'):
self._children = [None] * len(self)
c = self._children[n]
if c is None:
k,v = self._items[n]
c = self._children[n] = Row(self, n, k, v, self._font)
self._items[n] = None
return c
def get_parent(self):
return self._parent
def get_index(self):
return self._index
def get_key(self):
return self._key
def get_value(self):
return self._value
def get_value_str(self):
if hasattr(self,'_value_str'):
return self._value_str
return str(self._value)
class FontTreeModel(gtk.GenericTreeModel):
__gtype_name__ = 'FontTreeModel'
def __init__(self, font):
super(FontTreeModel, self).__init__()
self._columns = (str, str)
self.font = font
self._root = Row(None, 0, "font", font, font)
def on_get_flags(self):
return 0
def on_get_n_columns(self):
return len(self._columns)
def on_get_column_type(self, index):
return self._columns[index]
def on_get_iter(self, path):
rowref = self._root
while path:
rowref = rowref[path[0]]
path = path[1:]
return rowref
def on_get_path(self, rowref):
path = []
while rowref != self._root:
path.append(rowref.get_index())
rowref = rowref.get_parent()
path.reverse()
return tuple(path)
def on_get_value(self, rowref, column):
if column == 0:
return rowref.get_key()
else:
return rowref.get_value_str()
def on_iter_next(self, rowref):
return rowref.get_parent()[rowref.get_index() + 1]
def on_iter_children(self, rowref):
return rowref[0]
def on_iter_has_child(self, rowref):
return bool(len(rowref))
def on_iter_n_children(self, rowref):
return len(rowref)
def on_iter_nth_child(self, rowref, n):
if not rowref: rowref = self._root
return rowref[n]
def on_iter_parent(self, rowref):
return rowref.get_parent()
class Inspect(object):
def _delete_event(self, widget, event, data=None):
gtk.main_quit()
return False
def __init__(self, fontfile):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("%s - pyftinspect" % fontfile)
self.window.connect("delete_event", self._delete_event)
self.window.set_size_request(400, 600)
self.scrolled_window = gtk.ScrolledWindow()
self.window.add(self.scrolled_window)
self.font = ttLib.TTFont(fontfile, lazy=True)
self.treemodel = FontTreeModel(self.font)
self.treeview = gtk.TreeView(self.treemodel)
#self.treeview.set_reorderable(True)
for i in range(2):
col_name = ('Key', 'Value')[i]
col = gtk.TreeViewColumn(col_name)
col.set_sort_column_id(-1)
self.treeview.append_column(col)
cell = gtk.CellRendererText()
col.pack_start(cell, True)
col.add_attribute(cell, 'text', i)
self.treeview.set_search_column(1)
self.scrolled_window.add(self.treeview)
self.window.show_all()
def main(args):
if len(args) < 1:
print("usage: pyftinspect font...", file=sys.stderr)
sys.exit(1)
for arg in args:
Inspect(arg)
gtk.main()
if __name__ == "__main__":
main(sys.argv[1:])
|
|
import os
from arcproject.waterquality import classes
from arcproject.scripts.load_data_bulk import slurp
# path to location with data
data = r"C:\Users\Andy\Desktop\ArcData" # or location on x drive
def jan():
print("January 2014")
path = os.path.join(data, "Jan_2014", "Arc_011314")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 3, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Jan_2014", "Arc_011714")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def feb():
print("Feburary 2014")
path = os.path.join(data, "Feb_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 3, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'Arc_022814']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Feb_2014", "Arc_022814")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', ]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def mar():
print("March 2014")
path = os.path.join(data, "Mar_2014", "Arc_031714")
s = slurp.Slurper()
s.add_new_sites = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Mar_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.transect_gps_pattern = '*PosnPnt*.shp'
s.site_function_params = {"site_part": 3, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'Arc_031714', 'Arc_032014']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Mar_2014", "Arc_032014")
s = slurp.Slurper()
s.add_new_sites = True
s.transect_gps_pattern = '*PosnPnt2.shp'
s.site_function_params = {"site_part": 3, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'Arc_031714']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def apr():
print("April 2014")
path = os.path.join(data, "Apr_2014", "Arc_042214")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
print("April 2014")
path = os.path.join(data, "Apr_2014", "Arc_042114")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Apr_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 3, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII', 'Arc_042214', "Arc_042114"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def may():
print("May 2014")
path = os.path.join(data, "May_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
s.transect_gps_pattern = "*PosnPnt.shp" # included PosnPnt2.shp results in integrity error
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def jun():
print("June 2014")
path = os.path.join(data, "Jun_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII', "Arc_062314"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
print("June 2014")
path = os.path.join(data, "Jun_2014", "Arc_062314")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def jul():
print("July 2014")
path = os.path.join(data, "Jul_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def aug():
print("Aug 2014")
path = os.path.join(data, "Aug_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 3, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII', "Arc_082614", "Arc_082814"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
print("Aug 2014")
path = os.path.join(data, "Aug_2014", "Arc_082614")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
print("Aug 2014")
path = os.path.join(data, "Aug_2014", "Arc_082814")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def sep():
print("Sep 2014")
path = os.path.join(data, "Sep_2014", "Arc_091514")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 3, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Sep_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII', "Arc_091514"]
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def oct():
print("Oct 2014")
path = os.path.join(data, "Oct_2014", "Arc_101314")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Oct_2014", "Arc_101414")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 3, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Oct_2014", "Arc_101714")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 3}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def nov():
print("Nov 2014")
path = os.path.join(data, "Nov_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def dec():
print("Dec 2014")
path = os.path.join(data, "Dec_2014")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII', 'Arc_121614']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
path = os.path.join(data, "Dec_2014", "Arc_121614")
s = slurp.Slurper()
s.add_new_sites = True
s.dst = True
s.site_function_params = {"site_part": 2, "gain_part": 4}
s.exclude = ['StatePlaneCAII', 'SummaryFiles', 'StatePlaneII']
print("Adding gain files to database")
s.slurp_gains(path)
print("Adding water quality transects to database")
s.slurp_trans(path)
def main(month="ALL"):
if month == "ALL":
jan()
feb()
mar()
apr()
may()
jun()
jul()
aug()
sep()
oct()
nov()
dec()
else:
month
if __name__ == '__main__':
main()
|
|
# No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Agent Models
============
Models and interface classes related to the agent.
"""
import re
import uuid
from datetime import datetime
from sqlalchemy import or_
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.orm import validates
from netaddr import AddrFormatError, IPAddress
from pyfarm.core.enums import (
AgentState, STRING_TYPES, UseAgentAddress, INTEGER_TYPES, WorkState)
from pyfarm.master.config import config
from pyfarm.master.application import db
from pyfarm.models.core.functions import repr_ip
from pyfarm.models.core.mixins import (
ValidatePriorityMixin, UtilityMixins, ReprMixin, ValidateWorkStateMixin)
from pyfarm.models.core.types import (
id_column, IPv4Address, IDTypeAgent, IDTypeWork, UseAgentAddressEnum,
OperatingSystemEnum, AgentStateEnum, MACAddress)
from pyfarm.models.jobtype import JobTypeVersion
from pyfarm.models.job import Job
__all__ = ("Agent", )
ALLOW_AGENT_LOOPBACK = config.get("allow_agents_from_loopback")
REGEX_HOSTNAME = re.compile("^(?!-)[A-Z\d-]{1,63}(?<!-)"
"(\.(?!-)[A-Z\d-]{1,63}(?<!-))*\.?$",
re.IGNORECASE)
AgentSoftwareVersionAssociation = db.Table(
config.get("table_agent_software_version_assoc"), db.metadata,
db.Column(
"agent_id", IDTypeAgent,
db.ForeignKey("%s.id" % config.get("table_agent")),
primary_key=True),
db.Column(
"software_version_id", db.Integer,
db.ForeignKey("%s.id" % config.get("table_software_version")),
primary_key=True))
AgentTagAssociation = db.Table(
config.get("table_agent_tag_assoc"), db.metadata,
db.Column(
"agent_id", IDTypeAgent,
db.ForeignKey("%s.id" % config.get("table_agent")),
primary_key=True),
db.Column(
"tag_id", db.Integer,
db.ForeignKey("%s.id" % config.get("table_tag")),
primary_key=True))
FailedTaskInAgent = db.Table(
config.get("table_failed_task_in_agent"), db.metadata,
db.Column(
"agent_id", IDTypeAgent,
db.ForeignKey("%s.id" % config.get("table_agent")),
primary_key=True),
db.Column(
"task_id", IDTypeWork,
db.ForeignKey("%s.id" % config.get("table_task")),
primary_key=True))
GPUInAgent = db.Table(
config.get("table_gpu_in_agent"), db.metadata,
db.Column(
"agent_id", IDTypeAgent,
db.ForeignKey("%s.id" % config.get("table_agent")),
primary_key=True),
db.Column(
"gpu_id", db.Integer,
db.ForeignKey("%s.id" % config.get("table_gpu")),
primary_key=True))
class AgentTaggingMixin(object):
"""
Mixin used which provides some common structures to
:class:`.AgentTag` and :class:`.AgentSoftware`
"""
@validates("tag", "software")
def validate_string_column(self, key, value):
"""
Ensures `value` is a string or something that can be converted
to a string.
"""
if isinstance(value, INTEGER_TYPES):
value = str(value)
elif not isinstance(value, STRING_TYPES):
raise ValueError("expected a string for `%s`" % key)
return value
class AgentMacAddress(db.Model):
__tablename__ = config.get("table_agent_mac_address")
__table_args__ = (UniqueConstraint("agent_id", "mac_address"), )
agent_id = db.Column(
IDTypeAgent,
db.ForeignKey("%s.id" % config.get("table_agent")),
primary_key=True, nullable=False)
mac_address = db.Column(
MACAddress,
primary_key=True, nullable=False, autoincrement=False)
class Agent(db.Model, ValidatePriorityMixin, ValidateWorkStateMixin,
UtilityMixins, ReprMixin):
"""
Stores information about an agent include its network address,
state, allocation configuration, etc.
.. note::
This table enforces two forms of uniqueness. The :attr:`id` column
must be unique and the combination of these columns must also be
unique to limit the frequency of duplicate data:
* :attr:`hostname`
* :attr:`port`
* :attr:`id`
"""
__tablename__ = config.get("table_agent")
__table_args__ = (UniqueConstraint("hostname", "port", "id"), )
STATE_ENUM = AgentState
STATE_DEFAULT = "online"
REPR_COLUMNS = (
"id", "hostname", "port", "state", "remote_ip",
"cpus", "ram", "free_ram")
REPR_CONVERT_COLUMN = {"remote_ip": repr_ip}
URL_TEMPLATE = config.get("agent_api_url_template")
MIN_PORT = config.get("agent_min_port")
MAX_PORT = config.get("agent_max_port")
MIN_CPUS = config.get("agent_min_cpus")
MAX_CPUS = config.get("agent_max_cpus")
MIN_RAM = config.get("agent_min_ram")
MAX_RAM = config.get("agent_max_ram")
# quick check of the configured data
assert MIN_PORT >= 1, "`agent_min_port` must be > 0"
assert MAX_PORT >= 1, "`agent_max_port` must be > 0"
assert MAX_PORT >= MIN_PORT, "MIN_PORT must be <= MAX_PORT"
assert MIN_CPUS >= 1, "`agent_min_cpus` must be > 0"
assert MAX_CPUS >= 1, "`agent_max_cpus` must be > 0"
assert MAX_CPUS >= MIN_CPUS, "MIN_CPUS must be <= MAX_CPUS"
assert MIN_RAM >= 1, "`agent_min_ram` must be > 0"
assert MAX_RAM >= 1, "`agent_max_ram` must be > 0"
assert MAX_RAM >= MIN_RAM, "`agent_min_ram` must be <= `agent_max_ram`"
id = id_column(IDTypeAgent, default=uuid.uuid4, autoincrement=False)
# basic host attribute information
hostname = db.Column(
db.String(config.get("max_hostname_length")),
nullable=False,
doc="The hostname we should use to talk to this host. "
"Preferably this value will be the fully qualified "
"name instead of the base hostname alone.")
notes = db.Column(
db.Text,
default="",
doc="Free form notes about this agent")
remote_ip = db.Column(
IPv4Address, nullable=True,
doc="the remote address which came in with the request")
use_address = db.Column(
UseAgentAddressEnum,
nullable=False, default=UseAgentAddress.REMOTE,
doc="The address we should use when communicating with the agent")
# TODO Make non-nullable later
os_class = db.Column(
OperatingSystemEnum,
doc="The type of operating system running on the "
"agent; 'linux', 'windows', or 'mac'.")
os_fullname = db.Column(
db.String(config.get("max_osname_length")),
doc="The full human-readable name of the agent's OS, as returned "
"by platform.platform()")
ram = db.Column(
db.Integer,
nullable=False,
doc="The amount of ram installed on the agent in megabytes")
free_ram = db.Column(
db.Integer,
nullable=False,
doc="The amount of ram which was last considered free")
cpus = db.Column(
db.Integer,
nullable=False,
doc="The number of logical CPU cores installed on the agent")
cpu_name = db.Column(
db.String(config.get("max_cpuname_length")),
doc="The make and model of CPUs in this agents")
port = db.Column(
db.Integer,
nullable=False,
doc="The port the agent is currently running on")
time_offset = db.Column(
db.Integer,
nullable=False, default=0,
doc="The offset in seconds the agent is from an official time server")
version = db.Column(
db.String(16),
nullable=True,
doc="The pyfarm version number this agent is running.")
upgrade_to = db.Column(
db.String(16),
nullable=True,
doc="The version this agent should upgrade to.")
restart_requested = db.Column(
db.Boolean,
default=False, nullable=False,
doc="If True, the agent will be restarted")
# host state
state = db.Column(
AgentStateEnum,
default=AgentState.ONLINE, nullable=False,
doc="Stores the current state of the host. This value can be "
"changed either by a master telling the host to do "
"something with a task or from the host via REST api.")
last_heard_from = db.Column(
db.DateTime,
default=datetime.utcnow,
doc="Time we last had contact with this agent")
last_success_on = db.Column(
db.DateTime,
nullable=True,
doc="The last time this agent has set a task to `done`")
last_polled = db.Column(
db.DateTime,
doc="Time we last tried to contact the agent")
# Max allocation of the two primary resources which `1.0` is 100%
# allocation. For `cpu_allocation` 100% allocation typically means
# one task per cpu.
ram_allocation = db.Column(
db.Float,
default=config.get("agent_ram_allocation"),
doc="The amount of ram the agent is allowed to allocate "
"towards work. A value of 1.0 would mean to let the "
"agent use all of the memory installed on the system "
"when assigning work.")
cpu_allocation = db.Column(
db.Float,
default=config.get("agent_cpu_allocation"),
doc="The total amount of cpu space an agent is allowed to "
"process work in. A value of 1.0 would mean an agent "
"can handle as much work as the system could handle "
"given the requirements of a task. For example if "
"an agent has 8 cpus, cpu_allocation is .5, and a "
"task requires 4 cpus then only that task will "
"run on the system.")
#
# Relationships
#
tasks = db.relationship(
"Task",
backref="agent", lazy="dynamic",
doc="Relationship between an :class:`Agent` and any "
":class:`pyfarm.models.Task` objects")
tags = db.relationship(
"Tag",
secondary=AgentTagAssociation,
backref=db.backref("agents", lazy="dynamic"),
lazy="dynamic",
doc="Tags associated with this agent")
software_versions = db.relationship(
"SoftwareVersion",
secondary=AgentSoftwareVersionAssociation,
backref=db.backref("agents", lazy="dynamic"),
lazy="dynamic",
doc="software this agent has installed or is configured for")
mac_addresses = db.relationship(
"AgentMacAddress", backref="agent",
lazy="dynamic",
doc="The MAC addresses this agent has",
cascade="save-update, merge, delete, delete-orphan")
gpus = db.relationship(
"GPU",
secondary=GPUInAgent,
backref=db.backref("agents", lazy="dynamic"),
lazy="dynamic",
doc="The graphics cards that are installed in this agent")
disks = db.relationship(
"AgentDisk",
backref=db.backref("agent"),
lazy="dynamic",
doc="The known disks available to this agent",
cascade="save-update, merge, delete, delete-orphan")
failed_tasks = db.relationship(
"Task",
secondary=FailedTaskInAgent,
backref=db.backref("failed_in_agents", lazy="dynamic"),
lazy="dynamic",
doc="The tasks this agents failed to execute")
def is_offline(self):
return self.state == AgentState.OFFLINE
def is_disabled(self):
return self.state == AgentState.DISABLED
def get_supported_types(self):
try:
return self.support_jobtype_versions
except AttributeError:
jobtype_versions_query = JobTypeVersion.query.filter(
JobTypeVersion.jobs.any(
or_(Job.state == None, Job.state == WorkState.RUNNING)))
self.support_jobtype_versions = []
for jobtype_version in jobtype_versions_query:
if self.satisfies_jobtype_requirements(jobtype_version):
self.support_jobtype_versions.append(jobtype_version.id)
return self.support_jobtype_versions
def satisfies_jobtype_requirements(self, jobtype_version):
requirements_to_satisfy = list(jobtype_version.software_requirements)
for software_version in self.software_versions:
for requirement in list(requirements_to_satisfy):
if (software_version.software == requirement.software and
(requirement.min_version == None or
requirement.min_version.rank <= software_version.rank) and
(requirement.max_version == None or
requirement.max_version.rank >= software_version.rank)):
requirements_to_satisfy.remove(requirement)
return len(requirements_to_satisfy) == 0
def satisfies_job_requirements(self, job):
if not self.satisfies_jobtype_requirements(job.jobtype_version):
return False
if self.cpus < job.cpus:
return False
if self.free_ram < job.ram:
return False
for tag_requirement in job.tag_requirements:
if (not tag_requirement.negate and
tag_requirement.tag not in self.tags):
return False
if (tag_requirement.negate and
tag_requirement.tag in self.tags):
return False
return True
@classmethod
def validate_hostname(cls, key, value):
"""
Ensures that the hostname provided by `value` matches a regular
expression that expresses what a valid hostname is.
"""
# ensure hostname does not contain characters we can't use
if not REGEX_HOSTNAME.match(value):
raise ValueError("%s is not valid for %s" % (value, key))
return value
@classmethod
def validate_resource(cls, key, value):
"""
Ensure the ``value`` provided for ``key`` is within an expected
range. This classmethod retrieves the min and max values from
the :class:`Agent` class directory using:
>>> min_value = getattr(Agent, "MIN_%s" % key.upper())
>>> max_value = getattr(Agent, "MAX_%s" % key.upper())
"""
min_value = getattr(cls, "MIN_%s" % key.upper())
max_value = getattr(cls, "MAX_%s" % key.upper())
# check the provided input
if not min_value <= value <= max_value:
msg = "value for `%s` must be between " % key
msg += "%s and %s" % (min_value, max_value)
raise ValueError(msg)
return value
@classmethod
def validate_ipv4_address(cls, _, value):
"""
Ensures the :attr:`ip` address is valid. This checks to ensure
that the value provided is:
* not a hostmask
* not link local (:rfc:`3927`)
* not used for multicast (:rfc:`1112`)
* not a netmask (:rfc:`4632`)
* not reserved (:rfc:`6052`)
* a private address (:rfc:`1918`)
"""
if value is None:
return value
try:
address = IPAddress(value)
except (AddrFormatError, ValueError) as e:
raise ValueError(
"%s is not a valid address format: %s" % (value, e))
if ALLOW_AGENT_LOOPBACK:
loopback = lambda: False
else:
loopback = address.is_loopback
if any([address.is_hostmask(), address.is_link_local(),
loopback(), address.is_multicast(),
address.is_netmask(), address.is_reserved()]):
raise ValueError("%s is not a valid address type" % value)
return value
def api_url(self):
"""
Returns the base url which should be used to access the api
of this specific agent.
:except ValueError:
Raised if this function is called while the agent's
:attr:`use_address` column is set to ``PASSIVE``
"""
if self.use_address == UseAgentAddress.REMOTE:
return self.URL_TEMPLATE.format(
host=self.remote_ip,
port=self.port
)
elif self.use_address == UseAgentAddress.HOSTNAME:
return self.URL_TEMPLATE.format(
host=self.hostname,
port=self.port
)
else:
raise ValueError(
"Cannot construct an agent API url using mode %r "
"`use_address`" % self.use_address)
@validates("hostname")
def validate_hostname_column(self, key, value):
"""Validates the hostname column"""
return self.validate_hostname(key, value)
@validates("ram", "cpus", "port")
def validate_numeric_column(self, key, value):
"""
Validates several numerical columns. Columns such as ram, cpus
and port a are validated with this method.
"""
return self.validate_resource(key, value)
@validates("remote_ip")
def validate_remote_ip(self, key, value):
"""Validates the remote_ip column"""
return self.validate_ipv4_address(key, value)
|
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of dictionary creations.
Dictionary creations might be directly translated to constants, or they might
become nodes that build dictionaries.
For Python3.5, unpacking can happen while creating dictionaries, these are
being re-formulated to an internal function.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementReleaseVariable,
)
from nuitka.nodes.AttributeNodes import ExpressionAttributeLookup
from nuitka.nodes.BuiltinIteratorNodes import ExpressionBuiltinIter1
from nuitka.nodes.BuiltinNextNodes import ExpressionBuiltinNext1
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTuple
from nuitka.nodes.DictionaryNodes import (
ExpressionKeyValuePair,
StatementDictOperationUpdate,
makeExpressionMakeDict,
makeExpressionMakeDictOrConstant,
makeExpressionPairs,
)
from nuitka.nodes.ExceptionNodes import (
ExpressionBuiltinMakeException,
StatementRaiseException,
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionCall,
ExpressionFunctionCreation,
ExpressionFunctionRef,
)
from nuitka.nodes.LoopNodes import StatementLoop, StatementLoopBreak
from nuitka.nodes.OperatorNodes import makeBinaryOperationNode
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.TypeNodes import ExpressionBuiltinType1
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef,
)
from nuitka.PythonVersions import python_version
from nuitka.specs.ParameterSpecs import ParameterSpec
from .InternalModule import (
internal_source_ref,
makeInternalHelperFunctionBody,
once_decorator,
)
from .ReformulationTryExceptStatements import makeTryExceptSingleHandlerNode
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .TreeHelpers import (
buildNode,
buildNodeList,
makeStatementsSequenceFromStatement,
makeStatementsSequenceFromStatements,
)
def buildDictionaryNode(provider, node, source_ref):
if python_version >= 0x350:
for key in node.keys:
if key is None:
return buildDictionaryUnpacking(
provider=provider, node=node, source_ref=source_ref
)
return makeExpressionMakeDictOrConstant(
pairs=makeExpressionPairs(
keys=buildNodeList(provider, node.keys, source_ref),
values=buildNodeList(provider, node.values, source_ref),
),
user_provided=True,
source_ref=source_ref,
)
@once_decorator
def getDictUnpackingHelper():
helper_name = "_unpack_dict"
result = makeInternalHelperFunctionBody(
name=helper_name,
parameters=ParameterSpec(
ps_name=helper_name,
ps_normal_args=(),
ps_list_star_arg="args",
ps_dict_star_arg=None,
ps_default_count=0,
ps_kw_only_args=(),
ps_pos_only_args=(),
),
)
temp_scope = None
tmp_result_variable = result.allocateTempVariable(temp_scope, "dict")
tmp_iter_variable = result.allocateTempVariable(temp_scope, "iter")
tmp_item_variable = result.allocateTempVariable(temp_scope, "keys")
loop_body = makeStatementsSequenceFromStatements(
makeTryExceptSingleHandlerNode(
tried=StatementAssignmentVariable(
variable=tmp_item_variable,
source=ExpressionBuiltinNext1(
value=ExpressionTempVariableRef(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
exception_name="StopIteration",
handler_body=StatementLoopBreak(source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
makeTryExceptSingleHandlerNode(
tried=StatementDictOperationUpdate(
dict_arg=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
value=ExpressionTempVariableRef(
variable=tmp_item_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
exception_name="AttributeError",
handler_body=StatementRaiseException(
exception_type=ExpressionBuiltinMakeException(
exception_name="TypeError",
args=(
makeBinaryOperationNode(
operator="Mod",
left=makeConstantRefNode(
constant="""\
'%s' object is not a mapping""",
source_ref=internal_source_ref,
user_provided=True,
),
right=makeExpressionMakeTuple(
elements=(
ExpressionAttributeLookup(
expression=ExpressionBuiltinType1(
value=ExpressionTempVariableRef(
variable=tmp_item_variable,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
attribute_name="__name__",
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
),
source_ref=internal_source_ref,
),
exception_value=None,
exception_trace=None,
exception_cause=None,
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
)
args_variable = result.getVariableForAssignment(variable_name="args")
final = (
StatementReleaseVariable(
variable=tmp_result_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_iter_variable, source_ref=internal_source_ref
),
StatementReleaseVariable(
variable=tmp_item_variable, source_ref=internal_source_ref
),
)
tried = makeStatementsSequenceFromStatements(
StatementAssignmentVariable(
variable=tmp_iter_variable,
source=ExpressionBuiltinIter1(
value=ExpressionVariableRef(
variable=args_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
StatementAssignmentVariable(
variable=tmp_result_variable,
source=makeConstantRefNode(constant={}, source_ref=internal_source_ref),
source_ref=internal_source_ref,
),
StatementLoop(loop_body=loop_body, source_ref=internal_source_ref),
StatementReturn(
expression=ExpressionTempVariableRef(
variable=tmp_result_variable, source_ref=internal_source_ref
),
source_ref=internal_source_ref,
),
)
result.setChild(
"body",
makeStatementsSequenceFromStatement(
makeTryFinallyStatement(
provider=result,
tried=tried,
final=final,
source_ref=internal_source_ref,
)
),
)
return result
def buildDictionaryUnpackingArgs(provider, keys, values, source_ref):
result = []
for key, value in zip(keys, values):
# TODO: We could be a lot cleverer about the dictionaries for non-starred
# arguments, but lets get this to work first.
if key is None:
result.append(buildNode(provider, value, source_ref))
elif type(key) is str:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=key, source_ref=source_ref
),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
else:
result.append(
makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=buildNode(provider, key, source_ref),
value=buildNode(provider, value, source_ref),
source_ref=source_ref,
),
),
source_ref=source_ref,
)
)
return result
def buildDictionaryUnpacking(provider, node, source_ref):
helper_args = buildDictionaryUnpackingArgs(
provider, node.keys, node.values, source_ref
)
result = ExpressionFunctionCall(
function=ExpressionFunctionCreation(
function_ref=ExpressionFunctionRef(
function_body=getDictUnpackingHelper(), source_ref=source_ref
),
defaults=(),
kw_defaults=None,
annotations=None,
source_ref=source_ref,
),
values=(makeExpressionMakeTuple(helper_args, source_ref),),
source_ref=source_ref,
)
result.setCompatibleSourceReference(helper_args[-1].getCompatibleSourceReference())
return result
|
|
#!/usr/bin/env python3
from netcdfTools import *
import sys
import argparse
import numpy as np
from utilities import filesFromList, writeLog
'''
Description:
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='extractReynoldsStressNetCdf.py')
parser.add_argument("fileKey", default=None,\
help="Search string for collecting files.")
parser.add_argument("-o", "--outstr",type=str, default="RS",\
help="Prefix for the output NETCDF file. Default=RS.")
parser.add_argument("-vn", "--vnames",type=str, nargs=3, default=['up','vp','wp'],\
help="Names of the V or V^prime comps in (x,y,z)-order. Default = ['up','vp','wp'].")
parser.add_argument("-np", "--notPrimes", action="store_true", default=False,\
help="Input data not as V^prime. They should be computed.")
parser.add_argument("-sn", "--sname",type=str, default=None,\
help="Name of scalar s^prime. Default = None.")
parser.add_argument("-nt", "--ntimeskip", type=int, default=0,\
help="Skip <nt> number of time steps. Default = 0.")
parser.add_argument("-c", "--coarse", type=int, default=1,\
help="Coarsening level. Int > 1. Default = 1.")
args = parser.parse_args()
writeLog( parser, args )
#==========================================================#
# Initial renaming operations and variable declarations
fileKey = args.fileKey
outstr = args.outstr
vnames = args.vnames
sname = args.sname
notPrimes = args.notPrimes
nt = args.ntimeskip
cl = abs(int(args.coarse))
'''
Establish two boolean variables which indicate whether the created variable is an
independent or dependent variable in function createNetcdfVariable().
'''
parameter = True; variable = False
# Obtain a list of files to include.
fileNos, fileList = filesFromList( fileKey+'*' )
for fn in fileNos:
fileout = outstr+fileList[fn].split('_')[-1]
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Read in data.
dataDict = read3dDataFromNetCDF( fileList[fn] , vnames[0], cl )
up = dataDict['v']
# Commented out temporarily by Mikko #
'''
dataDict = read3dDataFromNetCDF( fileList[fn] , vnames[1], cl )
vp = dataDict['v']
'''
dataDict = read3dDataFromNetCDF( fileList[fn] , vnames[2], cl )
wp = dataDict['v']
if( notPrimes ):
# Perform coord. rotation for horizontal components
um = np.mean( up, axis=(0) ); vm = np.mean( vp , axis=(0) )
a = np.arctan( vm/(um+1.e-5) )
u1 = up * np.cos(a) + vp * np.sin(a) # Streamwise comp.
v1 =-up * np.sin(a) + vp * np.cos(a) # Spanwise comp.
up = u1; vp = v1
up -= um
vp -= vm
wp -= np.mean( wp, axis=(0) )
# Coords and time:
x = dataDict['x']; y = dataDict['y']; z = dataDict['z']
time = dataDict['time']; time_dim = len(time)
dataDict = None
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Create a NETCDF output dataset (dso) for writing out the data.
dso = netcdfOutputDataset( fileout )
# Create the output independent variables right away and empty memory.
tv = createNetcdfVariable( dso, time,'time', time_dim,'s','f4',('time',), parameter )
time = None
xv = createNetcdfVariable( dso, x , 'x' , len(x) , 'm', 'f4', ('x',) , parameter )
x = None
yv = createNetcdfVariable( dso, y , 'y' , len(y) , 'm', 'f4', ('y',) , parameter )
y = None
zv = createNetcdfVariable( dso, z , 'z' , len(z) , 'm', 'f4', ('z',) , parameter )
z = None
if( notPrimes ):
# Streamwise velocity
u1o = createNetcdfVariable(\
dso, up, 'u1', time_dim, 'm s^(-1)', 'f4',('time','z','y','x',) , variable )
u2o = createNetcdfVariable(\
dso, vp, 'u2', time_dim, 'm s^(-1)', 'f4',('time','z','y','x',) , variable )
u1mo = createNetcdfVariable(\
dso, um, 'um1', time_dim, 'm s^(-1)', 'f4',('z','y','x',) , variable )
u2mo = createNetcdfVariable(\
dso, vm, 'um2', time_dim, 'm s^(-1)', 'f4',('z','y','x',) , variable )
# First resolved TKE:
tns = 0.5*( up**2 + wp**2 ) ## 0.5*( up**2 + vp**2 + wp**2 )
tkeo = createNetcdfVariable(\
dso, tns, 'tns', time_dim, 'm^2 s^(-2)', 'f4',('time','z','y','x',) , variable )
tns = None
tke = 0.5*( np.mean(up**2, axis=0) + np.mean(wp**2, axis=0) ) #0.5*( np.mean(up**2, axis=0) + np.mean(vp**2, axis=0) + np.mean(wp**2, axis=0) )
rtke = createNetcdfVariable(\
dso, tke, 'tke', time_dim, 'm^2 s^(-2)', 'f4',('z','y','x',) , variable )
tke = None
# uu
uu = up * up
uuo = createNetcdfVariable(\
dso, uu, 'cov_uu', time_dim, 'm^2 s^(-2)', 'f4',('time','z','y','x',) , variable )
ruu = createNetcdfVariable(\
dso, np.mean(uu, axis=0), 'r_uu', time_dim, 'm^2 s^(-2)', 'f4',('z','y','x',) , variable )
uu = None
'''
# uv
uv = up * vp
uvo = createNetcdfVariable(\
dso, uv, 'cov_uv', time_dim, 'm^2 s^(-2)', 'f4',('time','z','y','x',) , variable )
ruv = createNetcdfVariable(\
dso, np.mean(uv, axis=0), 'r_uv', time_dim, 'm^2 s^(-2)', 'f4',('z','y','x',) , variable )
uv = None
'''
# uw
uw = up * wp
uwo = createNetcdfVariable(\
dso, uw, 'cov_uw', time_dim, 'm^2 s^(-2)', 'f4',('time','z','y','x',) , variable )
ruw = createNetcdfVariable(\
dso, np.mean(uw, axis=0), 'r_uw', time_dim, 'm^2 s^(-2)', 'f4',('z','y','x',) , variable )
uw = None
'''
# vv
vv = vp * vp
vvo = createNetcdfVariable(\
dso, vv, 'cov_vv', time_dim, 'm^2 s^(-2)', 'f4',('time','z','y','x',) , variable )
rvv = createNetcdfVariable(\
dso, np.mean(vv, axis=0), 'r_vv', time_dim, 'm^2 s^(-2)', 'f4',('z','y','x',) , variable )
vv = None
vw = vp * wp
vwo = createNetcdfVariable(\
dso, vw, 'cov_vw', time_dim, 'm^2 s^(-2)', 'f4',('time','z','y','x',) , variable )
rvw = createNetcdfVariable(\
dso, np.mean(vw, axis=0), 'r_vw', time_dim, 'm^2 s^(-2)', 'f4',('z','y','x',) , variable )
vw = None
'''
# ww
ww = wp * wp
wwo = createNetcdfVariable(\
dso, ww, 'cov_ww', time_dim, 'm^2 s^(-2)', 'f4',('time','z','y','x',) , variable )
rww = createNetcdfVariable(\
dso, np.mean(ww, axis=0), 'r_ww', time_dim, 'm^2 s^(-2)', 'f4',('z','y','x',) , variable )
ww = None
if( sname ):
dataDict = read3dDataFromNetCDF( fileList[fn] , sname, cl )
sp = dataDict['v']
if( notPrimes ):
sp -= np.mean( sp, axis=(0) )
dataDict = None
sstr = sname[:-1] # Remove the 'p' (or similar) from the end.
us = up * sp
uso = createNetcdfVariable(\
dso, us, 'cov_u'+sstr, time_dim, 'm s^(-1) []', 'f4',('time','z','y','x',), variable)
rus = createNetcdfVariable(\
dso, np.mean(us, axis=0), 'r_u'+sstr, time_dim, 'm s^(-1) []', 'f4',('z','y','x',), variable)
us = None
vs = vp * sp
vso = createNetcdfVariable(\
dso, vs, 'cov_v'+sstr, time_dim, 'm s^(-1) []', 'f4',('time','z','y','x',), variable)
rvs = createNetcdfVariable(\
dso, np.mean(vs, axis=0), 'r_v'+sstr, time_dim, 'm s^(-1) []', 'f4',('z','y','x',), variable)
vs = None
ws = wp * sp
wso = createNetcdfVariable(\
dso, ws, 'cov_w'+sstr , time_dim, 'm s^(-1) []', 'f4',('time','z','y','x',), variable)
rws = createNetcdfVariable(\
dso, np.mean(ws, axis=0), 'r_w'+sstr, time_dim, 'm s^(-1) []', 'f4',('z','y','x',), variable)
ws = None
# - - - - Done , finalize the output - - - - - - - - - -
netcdfWriteAndClose( dso )
|
|
import xmlrpc.client
import ssl
import socket # Required for network/socket connections
import os # Required for Forking/child processes
import time # Required for sleep call
import threading # Required for communication sub-threads
import pymysql
import server_monitor as myServer
import certs.gencert as gencert
import config
import logging
from logging.config import fileConfig
# Load logging config
fileConfig('setup/logging.conf')
log = logging.getLogger(__name__)
# Global Variables -- Don't change. [No need to change.]
CERTFILE = "certs/domains/local.cert" # Placeholder; updated when executed
KEYFILE = "certs/domains/local.key" # Default; updated when executed
hostIP = "localhost" # Default; updated when executed
admin_selected = False
# Return ip address of local host where server is running
def getMyIP():
log.info('Getting Host ip address.')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 53))
ipAdd = s.getsockname()[0]
s.close()
log.debug('Socket closed: ipAdd=%s' % ipAdd)
return ipAdd
# Return host name/fqdn of based on give ip address
def findHostName(ipAddress):
log.info('Finding Host Name based on ip address')
try:
log.debug('Trying now...')
name, alias, addresslist = socket.gethostbyaddr(ipAddress)
log.debug('Returning name: %s' % name)
return name
except socket.herror:
log.exception("Hostname/FQDN not found: Hostname/FQDN Required. "
"Correct by adding record in DNS server or within local"
"hosts file (/etc/hosts) and then restart controller.")
return "None"
# Create SSL certs for current ip address if not already present
def verifyCerts():
global CERTFILE
global KEYFILE
# Determine file path based on current ip address
CERTFILE = ''.join([config.certPath, config.rootDomain, ".cert"])
KEYFILE = ''.join([config.certPath, config.rootDomain, ".key"])
log.debug("CERTFILE: %s" % (CERTFILE))
log.debug("KEYFILE: %s" % (KEYFILE))
# If cert or key file not present, create new certs
if not os.path.isfile(CERTFILE) or not os.path.isfile(KEYFILE):
gencert.gencert(config.rootDomain)
log.info("Certfile(s) NOT present; new certs created.")
print("Certfile(s) NOT present; new certs created.")
else:
log.info("Certfiles Verified Present")
print("Certfiles Verified Present.")
# Start a thread child to run server connection as a daemon
def startServer():
log.info("Starting Server...")
# Now, start thread
log.debug("Starting new thread...")
t = threading.Thread(name="Monitor_ServerDaemon",
target=myServer.runServer,
args=(hostIP,
config.mntrServerPort,
CERTFILE,
KEYFILE
)
)
t.daemon = True
t.start()
log.debug("Thread started; end of startServer fn.")
# Check and Display the status of all child processes
def checkStatus():
log.debug("Checking Status of Threads...")
totalThreads = threading.active_count()
subThreads = totalThreads - 1
print("\nSub-Thread(s): %d" % (subThreads))
main_thread = threading.currentThread()
k = 1
for t in threading.enumerate():
if t is main_thread:
continue
print("Thread #%d:" % (k))
print("Name: %s" % (t.name))
print("Ident: %d" % (t.ident))
ans = "unknown"
if t.is_alive():
ans = "YES"
else:
ans = "NO"
print("Alive? %s\n" % (ans))
k = k+1
log.debug("End of checkStatus fn.")
# Display Status of all Hosts currently connected
def displayStatus():
log.debug("Displaying agents now")
print("Displaying agents currently connected...")
# Connect to database to query agents
log.debug("Connecting to database")
db = pymysql.connect(host=config.mysqlHost, port=config.mysqlPort,
user=config.mntrMysqlUser, passwd=config.mntrMysqlPwd,
db=config.mysqlDB)
cursor = db.cursor()
# Query to retrieve id/time of registration
sql = "SELECT distinct agent FROM status;"
agents = []
# Get agents
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists
results = cursor.fetchall()
for row in results:
thisAgent = row[0]
agents.append(thisAgent)
log.debug("Agent Received as: %s" % (thisAgent))
except:
log.exception("ERROR in db query>> %s" % sql)
print("FOUND %d agent(s) monitored.\n" % len(agents))
# Query to retrieve each agents's data
for k in range(len(agents)):
sql = "SELECT agent, status, timestamp, alias, id FROM "\
"status where agent = '%s' ORDER BY id "\
"DESC LIMIT 1" % (agents[k])
# Get host info
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists
results = cursor.fetchall()
print("Agent #%d" % (k + 1))
for row in results:
thisAgent = row[0]
thisStatus = row[1]
thisTime = row[2]
thisAlias = row[3]
thisID = row[4]
print("Agent: %s" % thisAgent)
print("Alias: %s" % thisAlias)
print("Status: %s" % thisStatus)
print("Time Connected: %s" % thisTime)
print("ID Number: %s\n" % thisID)
log.debug("Host %d Displayed" % (k + 1))
except:
log.exception("ERROR in db query>> %s" % sql)
# Disconnect from database
db.close()
# Simple test function to ensure communication is working
def mathTest():
log.debug("Start of Math Test Function...")
myContext = ssl.create_default_context()
myContext.load_verify_locations(config.CACERTFILE)
myurl = ''.join(['https://', config.agntHostName, ':',
str(config.agntServerPort)])
with xmlrpc.client.ServerProxy(myurl,
context=myContext) as proxy:
try:
print("5 + 9 is %d" % (proxy.add(5, 9)))
print("21 x 3 is: %d" % (proxy.multiply(21, 3)))
except ConnectionRefusedError:
log.warning("Connection to Agent FAILED")
print("Connection to Agent Server FAILED:\n",
"Is Agent listening? Confirm connection",
"settings and try again.")
print("Settings used: '%s'" % myurl)
except:
log.warning("Connection to Agent FAILED")
print("Connection Failed. Suspected incorrect URL.")
print("Settings used: '%s'" % myurl)
# Quit gracefully after terminting all child processes
def myQuit():
log.info("Monitor Exiting. Goodbye.")
print("Monitor Exiting. Goodbye.\n")
raise SystemExit
# Stop Controller Server
def stopServer():
log.debug("Stopping Monitor Server.")
# TODO Determine if it is possible to stop a daemon thread
# without stopping the whole program; for now, this just
# ends the entire program
print("Monitor Server Stopping.")
myQuit()
def invalid(choice):
log.debug("Invalid choice: %s" % choice)
print("INVALID CHOICE!")
def adminMenu():
log.debug("Displaying admin menu")
print("\nAdmin Menu:")
print("a) Connection Test with Agent (simple math test)")
print("b) SSL Verification (verify certificates")
print("c) STOP Monitor Server (program will exit)")
print("d) START* Monitor Server (*only if not running already)")
print("9) BACK (return to 'Menu')")
return input("Make a Choice\n>>> ")
def adminSelection():
global admin_selected
adminChoice = adminMenu()
if adminChoice == "a":
mathTest()
elif adminChoice == "b":
verifyCerts()
elif adminChoice == "c":
stopServer()
elif adminChoice == "d":
startServer()
elif adminChoice == "9":
log.debug("Admin is De-selected")
print("Back to Main Menu...")
admin_selected = False
elif adminChoice == "r":
# Refresh Menu (do nothing)
log.info("Refreshing Menu")
elif adminChoice in ["q", ":q"]:
myQuit()
else:
invalid(adminChoice)
def menu():
log.debug("Displaying menu")
print("\n\nMENU[Monitor]:")
print("1) Check MONITOR server status")
print("2) Display Current Status")
print("9) ADMIN MENU")
print("q) QUIT")
return input("Make a Choice\n>>> ")
def myMenu():
global admin_selected
choice = 0
if admin_selected:
choice = "9"
else:
choice = menu()
if choice == "1":
checkStatus()
elif choice == "2":
displayStatus()
elif choice == "9":
admin_selected = True
log.debug("Admin is Selected")
adminSelection()
elif choice in ["q", ":q"]:
myQuit()
elif choice == "r":
# Refresh Menu (do nothing)
log.info("Refreshing Menu")
else:
invalid(choice)
# Start of Main
if __name__ == '__main__':
log.info("Starting Monitor Main.")
hostIP = getMyIP()
verifyHostName = findHostName(hostIP)
pid = os.getpid()
print("Host IP: %s" % (hostIP))
print("Hostname: %s" % (verifyHostName))
log.debug("PID: %d" % (pid))
if verifyHostName == "None":
log.debug("Hostname not found: Returned 'None'")
elif verifyHostName in [config.ctlrHostName, config.mntrHostName]:
log.debug("HostName verified.")
log.debug("Verifying certificates.")
# Verify certificates present prior to displaying menu
verifyCerts()
# Starting Server
startServer()
time.sleep(2)
# Display Menu [repeatedly] for user
while True:
myMenu()
time.sleep(1)
else:
log.error("Hostname incorrect. "
"Hostname Found: %s; Hostname "
"Required: %s." % (verifyHostName, config.mntrHostName))
|
|
"""Functions defined in TVM."""
# pylint: disable=invalid-name,unused-import,redefined-builtin
from __future__ import absolute_import as _abs
from numbers import Integral as _Integral
from ._ffi.base import string_types
from ._ffi.node import register_node, NodeBase
from ._ffi.node import convert_to_node as _convert_to_node
from ._ffi.function import Function
from ._ffi.function import _init_api, register_func, get_global_func, extract_ext_funcs
from ._ffi.function import convert_to_tvm_func as _convert_tvm_func
from ._ffi.runtime_ctypes import TVMType
from . import _api_internal
from . import make as _make
from . import expr as _expr
from . import tensor as _tensor
from . import schedule as _schedule
from . import container as _container
from . import tag as _tag
int8 = "int8"
int32 = "int32"
float32 = "float32"
handle = "handle"
def min_value(dtype):
"""minimum value of dtype"""
return _api_internal._min_value(dtype)
def max_value(dtype):
"""maximum value of dtype"""
return _api_internal._max_value(dtype)
def const(value, dtype=None):
"""construct a constant"""
if dtype is None:
if isinstance(value, _Integral):
dtype = 'int32'
else:
dtype = 'float32'
return _api_internal._const(value, dtype)
def convert(value):
"""Convert value to TVM node or function.
Parameters
----------
value : python value
Returns
-------
tvm_val : Node or Function
Converted value in TVM
"""
if isinstance(value, (Function, NodeBase)):
return value
if callable(value):
return _convert_tvm_func(value)
return _convert_to_node(value)
def load_json(json_str):
"""Load tvm object from json_str.
Parameters
----------
json_str : str
The json string
Returns
-------
node : Node
The loaded tvm node.
"""
return _api_internal._load_json(json_str)
def save_json(node):
"""Load tvm object as json string.
Parameters
----------
node : Node
A TVM Node object to be saved.
Returns
-------
json_str : str
Saved json string.
"""
return _api_internal._save_json(node)
def var(name="tindex", dtype=int32):
"""Create a new variable with specified name and dtype
Parameters
----------
name : str
The name
dtype : int
The data type
Returns
-------
var : Var
The result symbolic variable.
"""
return _api_internal._Var(name, dtype)
def any(*args):
"""Create a new experssion of the union of all conditions in the arguments
Parameters
----------
args : list
List of symbolic boolean expressions
Returns
-------
expr: Expr
Expression
"""
if not args:
raise ValueError("Any must take at least 1 argument")
if len(args) == 1:
return args[0]
ret = _make.Or(args[0], args[1])
for i in range(2, len(args)):
ret = _make.Or(ret, args[i])
return ret
def all(*args):
"""Create a new experssion of the intersection of all conditions in the
arguments
Parameters
----------
args : list
List of symbolic boolean expressions
Returns
-------
expr: Expr
Expression
"""
if not args:
raise ValueError("Any must take at least 1 argument")
if len(args) == 1:
return args[0]
ret = _make.And(args[0], args[1])
for i in range(2, len(args)):
ret = _make.And(ret, args[i])
return ret
def placeholder(shape, dtype=None, name="placeholder"):
"""Construct an empty tensor object.
Parameters
----------
shape: Tuple of Expr
The shape of the tensor
dtype: str, optional
The data type of the tensor
name: str, optional
The name hint of the tensor
Returns
-------
tensor: Tensor
The created tensor
"""
shape = (shape,) if isinstance(shape, _expr.Expr) else shape
dtype = float32 if dtype is None else dtype
return _api_internal._Placeholder(
shape, dtype, name)
def compute(shape, fcompute, name="compute", tag=""):
"""Construct a new tensor by computing over the shape domain.
The compute rule is result[axis] = fcompute(axis)
Parameters
----------
shape: Tuple of Expr
The shape of the tensor
fcompute: lambda function of indices-> value
Specifies the input source expression
name: str, optional
The name hint of the tensor
Returns
-------
tensor: Tensor
The created tensor
"""
if _tag.TagScope.current is not None:
if tag != "":
raise ValueError("nested tag is not allowed for now")
tag = _tag.TagScope.current.tag
shape = (shape,) if isinstance(shape, _expr.Expr) else shape
ndim = len(shape)
code = fcompute.__code__
if fcompute.__code__.co_argcount == 0:
arg_names = ["i%d" % i for i in range(ndim)]
else:
arg_names = code.co_varnames[:code.co_argcount]
if ndim != len(arg_names):
raise ValueError("fcompute do not match dimension, ndim=%d" % ndim)
dim_var = [_IterVar((0, s), x, 0) for x, s in zip(arg_names, shape)]
body = fcompute(*[v.var for v in dim_var])
if not isinstance(body, (list, tuple)):
body = [body]
body = convert(body)
op_node = _api_internal._ComputeOp(
name, tag, dim_var, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def scan(init, update, state_placeholder, inputs=None, name="scan", tag=""):
"""Construct new tensors by scanning over axis.
Parameters
----------
init: Tensor or list of Tensor
The initial condition of first init.shape[0] timestamps
update: Tensor or list of Tensor
The update rule of the scan given by symbolic tensor.
state_placeholder: Tensor or list of Tensor
The placeholder variables used by update.
inputs: Tensor or list of Tensor, optional
The list of inputs to the scan. This is not required, but can
be useful for the compiler to detect scan body faster.
name: str, optional
The name hint of the tensor
Returns
-------
tensor: Tensor or list of Tensors
The created tensor or tuple of tensors it it contains multiple outputs.
Example
-------
.. code-block:: python
# The following code is equivalent to numpy.cumsum
m = tvm.var("m")
n = tvm.var("n")
X = tvm.placeholder((m, n), name="X")
s_state = tvm.placeholder((m, n))
s_init = tvm.compute((1, n), lambda _, i: X[0, i])
s_update = tvm.compute((m, n), lambda t, i: s_state[t-1, i] + X[t, i])
res = tvm.scan(s_init, s_update, s_state, X)
"""
if _tag.TagScope.current is not None:
if tag != "":
raise ValueError("nested tag is not allowed for now")
tag = _tag.TagScope.current.tag
if isinstance(init, _tensor.Tensor):
init = [init]
if isinstance(update, _tensor.Tensor):
update = [update]
if isinstance(state_placeholder, _tensor.Tensor):
state_placeholder = [state_placeholder]
if isinstance(inputs, _tensor.Tensor):
inputs = [inputs]
if inputs is None:
inputs = []
if len(init) != len(update) or len(init) != len(state_placeholder):
raise ValueError("init, update, state_placeholder must have same length")
axis = _IterVar((init[0].shape[0], update[0].shape[0]), "%s.idx" % name, 3)
op = _api_internal._ScanOp(name, tag, axis, init, update,
state_placeholder, inputs)
res = [op.output(i) for i in range(len(update))]
return res[0] if len(res) == 1 else res
def extern(shape, inputs, fcompute, name="extern", dtype=None, tag=""):
"""Compute several tensor via extern function.
Parameters
----------
shape: tuple or list of tuples.
The shape of the outputs.
inputs: list of Tensor
The inputs
fcompute: lambda function of inputs, outputs-> stmt
Specifies the IR statement to do the computation.
See the following note for function signature of fcompute
.. note::
**Parameters**
- **ins** (list of :any:`Buffer`) - Placeholder for each inputs
- **outs** (list of :any:`Buffer`) - Placeholder for each outputs
**Returns**
- **stmt** (:any:`Stmt`) - The statement that carries out array computation.
name: str, optional
The name hint of the tensor
dtype: str or list of str, optional
The data types of outputs,
by default dtype will be same as inputs.
Returns
-------
tensor: Tensor or list of Tensors
The created tensor or tuple of tensors it it contains multiple outputs.
Example
-------
In the code below, C is generated by calling external PackedFunc
`tvm.contrib.cblas.matmul`
.. code-block:: python
A = tvm.placeholder((n, l), name='A')
B = tvm.placeholder((l, m), name='B')
C = tvm.extern((n, m), [A, B],
lambda ins, outs: tvm.call_packed(
"tvm.contrib.cblas.matmul",
ins[0], ins[1], outs[0], 0, 0), name="C")
"""
if _tag.TagScope.current is not None:
if tag != "":
raise ValueError("nested tag is not allowed for now")
tag = _tag.TagScope.current.tag
shape = (shape,) if isinstance(shape, (_expr.Expr, _Integral)) else shape
shape = [shape] if isinstance(shape[0], (_expr.Expr, _Integral)) else shape
input_placeholders = []
output_placeholders = []
types = set()
for t in inputs:
if not isinstance(t, _tensor.Tensor):
raise ValueError("expect inputs to be tensor")
input_placeholders.append(
decl_buffer(t.shape, t.dtype, t.op.name))
types.add(t.dtype)
if dtype is None:
if len(types) != 1:
raise ValueError("Cannot infer output type, please provide dtype argument")
infered_type = types.pop()
dtype = [infered_type for _ in shape]
if isinstance(dtype, str):
dtype = [dtype]
for shp, dt in zip(shape, dtype):
output_placeholders.append(decl_buffer(shp, dt, name))
body = fcompute(input_placeholders, output_placeholders)
if isinstance(body, _expr.Expr):
body = _make.Evaluate(body)
op = _api_internal._ExternOp(name, tag, inputs, input_placeholders,
output_placeholders, body)
res = [op.output(i) for i in range(len(output_placeholders))]
return res[0] if len(res) == 1 else res
def decl_buffer(shape,
dtype=None,
name="buffer",
data=None,
strides=None,
elem_offset=None,
scope="",
data_alignment=-1,
offset_factor=0):
"""Decleare a new symbolic buffer.
Normally buffer is created automatically during lower and build.
This is only needed if user want to specify their own buffer layout.
See the note below for detailed discussion on usage of buffer.
Parameters
----------
shape : tuple of Expr
The shape of the buffer.
dtype : str, optional
The data type of the buffer.
name : str, optional
The name of the buffer.
data : Var, optional
The data pointer in the buffer.
strides: array of Expr
The stride of the buffer.
elem_offset: Expr, optional
The beginning offset of the array to data.
In terms of number of elements of dtype.
scope: str, optional
The storage scope of the buffer, if not global.
If scope equals empty string, it means it is global memory.
data_alignment: int, optional
The alignment of data pointer in bytes.
If -1 is passed, the alignment will be set to TVM's internal default.
offset_factor: int, optional
The factor of elem_offset field, when set,
elem_offset is required to be multiple of offset_factor.
If 0 is pssed, the alignment will be set to 1.
if non-zero is passed, we will created a Var for elem_offset if elem_offset is not None.
Returns
-------
buffer : Buffer
The created buffer
Note
----
Buffer data structure reflects the DLTensor structure in dlpack.
While DLTensor data structure is very general, it is usually helpful
to create function that only handles specific case of data structure
and make compiled function benefit from it.
If user pass strides and elem_offset is passed as None
when constructing the function, then the function will be specialized
for the DLTensor that is compact and aligned.
If user pass a fully generic symbolic array to the strides,
then the resulting function becomes fully generic.
"""
shape = (shape,) if isinstance(shape, (_expr.Expr, _Integral)) else shape
dtype = float32 if dtype is None else dtype
strides = () if strides is None else strides
if offset_factor != 0 and elem_offset is None:
elem_offset = var('%s_elem_offset' % name, shape[0].dtype)
if data is None:
data = var(name, "handle")
return _api_internal._Buffer(
data, dtype, shape, strides, elem_offset, name, scope,
data_alignment, offset_factor)
def _IterVar(dom, name, iter_type, thread_tag=''):
"""Internal function to create IterVar
Parameters
----------
dom : Range
The domain of iteration.
name : str
The name of iteration variable.
iter_type : int
The type of iteration.
thread_tag : str
The thread tag of the iteration variable.
Returns
-------
iter_var : IterVar
The result itervar
"""
if dom is not None:
if isinstance(dom, (list, tuple)):
if len(dom) != 2:
raise TypeError("need to be list of ranges")
dom = Range(dom[0], dom[1])
if not isinstance(dom, _container.Range):
raise TypeError("dom need to be Range")
name = name if name else 'iter'
v = var(name)
return _api_internal._IterVar(dom, v, iter_type, thread_tag)
def thread_axis(dom=None, tag='', name=''):
"""Create a new IterVar to represent thread index.
Parameters
----------
dom : Range or str
The domain of iteration
When str is passed, dom is set to None and str is used as tag
tag : str, optional
The thread tag
name : str, optional
The name of the var.
Returns
-------
axis : IterVar
The thread itervar.
"""
if isinstance(dom, string_types):
tag, dom = dom, None
if not tag:
raise ValueError("tag must be given as Positional or keyword argument")
name = name if name else tag
return _IterVar(dom, name, 1, tag)
def reduce_axis(dom, name="rv"):
"""Create a new IterVar for reduction.
Parameters
----------
dom : Range
The domain of iteration.
name : str
The name of the variable.
Returns
-------
axis : IterVar
An iteration variable representing the value.
"""
return _IterVar(dom, name, 2)
def select(cond, t, f):
"""Construct a select branch
Parameters
----------
cond : Expr
The condition
t : Expr
The result expression if cond is true.
f : Expr
The result expression if cond is false.
Returns
-------
node : Node
The tvm.expr.Select node
"""
return _make.Select(convert(cond), convert(t), convert(f))
def comm_reducer(fcombine, fidentity, name="reduce"):
"""Create a commutative reducer for reduction.
Parameters
----------
fcombine : function(Expr -> Expr -> Expr)
A binary function which takes two Expr as input to return a Expr.
fidentity : function(str -> Expr)
A function which takes a type string as input to return a const Expr.
Returns
-------
reducer : function
A function which creates a reduce expression over axis.
There are two ways to use it:
1. accept (expr, axis, where) to produce an Reduce Expr on
specified axis;
2. simply use it with multiple Exprs.
Example
-------
.. code-block:: python
n = tvm.var('n')
m = tvm.var('m')
mysum = tvm.comm_reducer(lambda x, y: x+y,
lambda t: tvm.const(0, dtype=t), name="mysum")
A = tvm.placeholder((n, m), name='A')
k = tvm.reduce_axis((0, m), name='k')
B = tvm.compute((n,), lambda i: mysum(A[i, k], axis=k), name='B')
"""
def _reduce_directly(*args):
num = len(args)
# process `where` is None
if num == 3 and args[2] is None:
num = 2
res = args[0]
for i in range(num-1):
res = fcombine(res, args[i+1])
return res
def _make_reduce(expr, axis, where=None):
code = fcombine.__code__
assert fcombine.__code__.co_argcount == 2
expr = convert(expr)
if isinstance(expr, _container.Array):
size = len(expr)
larr = []
rarr = []
dtypes = []
for i in range(size):
dtype = expr[i].dtype
dtypes.append(dtype)
lname = code.co_varnames[0] + '_' + str(i)
larr.append(var(lname, dtype))
rname = code.co_varnames[1] + '_' + str(i)
rarr.append(var(rname, dtype))
lhs = convert(larr)
rhs = convert(rarr)
result = fcombine(lhs, rhs)
id_elem = fidentity(*dtypes)
else:
assert isinstance(expr, _expr.Expr)
size = 1
dtype = expr.dtype
lvar = var(code.co_varnames[0], dtype)
rvar = var(code.co_varnames[1], dtype)
result = [fcombine(lvar, rvar)]
id_elem = [fidentity(dtype)]
lhs = convert([lvar])
rhs = convert([rvar])
expr = convert([expr])
result = convert(result)
id_elem = convert(id_elem)
combiner = _make.CommReducer(lhs, rhs, result, id_elem)
axis = convert(axis if isinstance(axis, (list, tuple)) else [axis])
if where is None:
where = convert(True)
outputs = tuple(_make.Reduce(combiner, expr, axis, where, i)
for i in range(size))
return outputs[0] if size == 1 else outputs
def reducer(expr, axis, where=None, *args):
if isinstance(axis, (_schedule.IterVar, list, tuple)):
assert not args
return _make_reduce(expr, axis, where)
if where is None:
assert not args
return _reduce_directly(expr, axis)
return _reduce_directly(expr, axis, where, *args)
doc_str = """Create a {0} expression over axis.
Parameters
----------
expr : Expr
The source expression.
axis : IterVar
The reduction IterVar axis
where : optional, Expr
Filtering predicate of the reduction.
Returns
-------
value : Expr
The result value.
Example
-------
.. code-block:: python
m = tvm.var("m")
n = tvm.var("n")
A = tvm.placeholder((m, n), name="A")
k = tvm.reduce_axis((0, n), name="k")
# there are two way to use this {0} reducer:
# mode 1, accept (expr, axis, where) to produce an Reduce Expr
B = tvm.compute((m,), lambda i: tvm.{0}(A[i, k], axis=k), name="B")
# mode 2, simply use it with multiple Exprs:
{0}_res = tvm.{0}(m, n)
"""
reducer.__doc__ = doc_str.format(name)
return reducer
_init_api("tvm.api")
#pylint: disable=unnecessary-lambda
sum = comm_reducer(lambda x, y: x+y, lambda t: const(0, dtype=t), name="sum")
min = comm_reducer(lambda x, y: _make.Min(x, y), max_value, name='min')
max = comm_reducer(lambda x, y: _make.Max(x, y), min_value, name='max')
|
|
"""Disassembler of Python byte code into mnemonics."""
import sys
import types
import collections
import io
from opcode import *
from opcode import __all__ as _opcodes_all
__all__ = ["code_info", "dis", "disassemble", "distb", "disco",
"findlinestarts", "findlabels", "show_code",
"get_instructions", "Instruction", "Bytecode"] + _opcodes_all
del _opcodes_all
_have_code = (types.MethodType, types.FunctionType, types.CodeType,
classmethod, staticmethod, type)
FORMAT_VALUE = opmap['FORMAT_VALUE']
def _try_compile(source, name):
"""Attempts to compile the given source, first as an expression and
then as a statement if the first approach fails.
Utility function to accept strings in functions that otherwise
expect code objects
"""
try:
c = compile(source, name, 'eval')
except SyntaxError:
c = compile(source, name, 'exec')
return c
def dis(x=None, *, file=None):
"""Disassemble classes, methods, functions, generators, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb(file=file)
return
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if hasattr(x, 'gi_code'): # Generator
x = x.gi_code
if hasattr(x, '__dict__'): # Class or module
items = sorted(x.__dict__.items())
for name, x1 in items:
if isinstance(x1, _have_code):
print("Disassembly of %s:" % name, file=file)
try:
dis(x1, file=file)
except TypeError as msg:
print("Sorry:", msg, file=file)
print(file=file)
elif hasattr(x, 'co_code'): # Code object
disassemble(x, file=file)
elif isinstance(x, (bytes, bytearray)): # Raw bytecode
_disassemble_bytes(x, file=file)
elif isinstance(x, str): # Source code
_disassemble_str(x, file=file)
else:
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__)
def distb(tb=None, *, file=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError("no last traceback to disassemble")
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti, file=file)
# The inspect module interrogates this dictionary to build its
# list of CO_* constants. It is also used by pretty_flags to
# turn the co_flags field into a human readable list.
COMPILER_FLAG_NAMES = {
1: "OPTIMIZED",
2: "NEWLOCALS",
4: "VARARGS",
8: "VARKEYWORDS",
16: "NESTED",
32: "GENERATOR",
64: "NOFREE",
128: "COROUTINE",
256: "ITERABLE_COROUTINE",
512: "ASYNC_GENERATOR",
}
def pretty_flags(flags):
"""Return pretty representation of code flags."""
names = []
for i in range(32):
flag = 1<<i
if flags & flag:
names.append(COMPILER_FLAG_NAMES.get(flag, hex(flag)))
flags ^= flag
if not flags:
break
else:
names.append(hex(flags))
return ", ".join(names)
def _get_code_object(x):
"""Helper to handle methods, functions, generators, strings and raw code objects"""
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if hasattr(x, 'gi_code'): # Generator
x = x.gi_code
if isinstance(x, str): # Source code
x = _try_compile(x, "<disassembly>")
if hasattr(x, 'co_code'): # Code object
return x
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__)
def code_info(x):
"""Formatted details of methods, functions, or code."""
return _format_code_info(_get_code_object(x))
def _format_code_info(co):
lines = []
lines.append("Name: %s" % co.co_name)
lines.append("Filename: %s" % co.co_filename)
lines.append("Argument count: %s" % co.co_argcount)
lines.append("Kw-only arguments: %s" % co.co_kwonlyargcount)
lines.append("Number of locals: %s" % co.co_nlocals)
lines.append("Stack size: %s" % co.co_stacksize)
lines.append("Flags: %s" % pretty_flags(co.co_flags))
if co.co_consts:
lines.append("Constants:")
for i_c in enumerate(co.co_consts):
lines.append("%4d: %r" % i_c)
if co.co_names:
lines.append("Names:")
for i_n in enumerate(co.co_names):
lines.append("%4d: %s" % i_n)
if co.co_varnames:
lines.append("Variable names:")
for i_n in enumerate(co.co_varnames):
lines.append("%4d: %s" % i_n)
if co.co_freevars:
lines.append("Free variables:")
for i_n in enumerate(co.co_freevars):
lines.append("%4d: %s" % i_n)
if co.co_cellvars:
lines.append("Cell variables:")
for i_n in enumerate(co.co_cellvars):
lines.append("%4d: %s" % i_n)
return "\n".join(lines)
def show_code(co, *, file=None):
"""Print details of methods, functions, or code to *file*.
If *file* is not provided, the output is printed on stdout.
"""
print(code_info(co), file=file)
_Instruction = collections.namedtuple("_Instruction",
"opname opcode arg argval argrepr offset starts_line is_jump_target")
_Instruction.opname.__doc__ = "Human readable name for operation"
_Instruction.opcode.__doc__ = "Numeric code for operation"
_Instruction.arg.__doc__ = "Numeric argument to operation (if any), otherwise None"
_Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg"
_Instruction.argrepr.__doc__ = "Human readable description of operation argument"
_Instruction.offset.__doc__ = "Start index of operation within bytecode sequence"
_Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None"
_Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False"
class Instruction(_Instruction):
"""Details for a bytecode operation
Defined fields:
opname - human readable name for operation
opcode - numeric code for operation
arg - numeric argument to operation (if any), otherwise None
argval - resolved arg value (if known), otherwise same as arg
argrepr - human readable description of operation argument
offset - start index of operation within bytecode sequence
starts_line - line started by this opcode (if any), otherwise None
is_jump_target - True if other code jumps to here, otherwise False
"""
def _disassemble(self, lineno_width=3, mark_as_current=False):
"""Format instruction details for inclusion in disassembly output
*lineno_width* sets the width of the line number field (0 omits it)
*mark_as_current* inserts a '-->' marker arrow as part of the line
"""
fields = []
# Column: Source code line number
if lineno_width:
if self.starts_line is not None:
lineno_fmt = "%%%dd" % lineno_width
fields.append(lineno_fmt % self.starts_line)
else:
fields.append(' ' * lineno_width)
# Column: Current instruction indicator
if mark_as_current:
fields.append('-->')
else:
fields.append(' ')
# Column: Jump target marker
if self.is_jump_target:
fields.append('>>')
else:
fields.append(' ')
# Column: Instruction offset from start of code sequence
fields.append(repr(self.offset).rjust(4))
# Column: Opcode name
fields.append(self.opname.ljust(20))
# Column: Opcode argument
if self.arg is not None:
fields.append(repr(self.arg).rjust(5))
# Column: Opcode argument details
if self.argrepr:
fields.append('(' + self.argrepr + ')')
return ' '.join(fields).rstrip()
def get_instructions(x, *, first_line=None):
"""Iterator for the opcodes in methods, functions or code
Generates a series of Instruction named tuples giving the details of
each operations in the supplied code.
If *first_line* is not None, it indicates the line number that should
be reported for the first source line in the disassembled code.
Otherwise, the source line information (if any) is taken directly from
the disassembled code object.
"""
co = _get_code_object(x)
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(findlinestarts(co))
if first_line is not None:
line_offset = first_line - co.co_firstlineno
else:
line_offset = 0
return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
co.co_consts, cell_names, linestarts,
line_offset)
def _get_const_info(const_index, const_list):
"""Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr().
"""
argval = const_index
if const_list is not None:
argval = const_list[const_index]
return argval, repr(argval)
def _get_name_info(name_index, name_list):
"""Helper to get optional details about named references
Returns the dereferenced name as both value and repr if the name
list is defined.
Otherwise returns the name index and its repr().
"""
argval = name_index
if name_list is not None:
argval = name_list[name_index]
argrepr = argval
else:
argrepr = repr(argval)
return argval, argrepr
def _get_instructions_bytes(code, varnames=None, names=None, constants=None,
cells=None, linestarts=None, line_offset=0):
"""Iterate over the instructions in a bytecode string.
Generates a sequence of Instruction namedtuples giving the details of each
opcode. Additional information about the code's runtime environment
(e.g. variable names, constants) can be specified using optional
arguments.
"""
labels = findlabels(code)
starts_line = None
for offset, op, arg in _unpack_opargs(code):
if linestarts is not None:
starts_line = linestarts.get(offset, None)
if starts_line is not None:
starts_line += line_offset
is_jump_target = offset in labels
argval = None
argrepr = ''
if arg is not None:
# Set argval to the dereferenced value of the argument when
# available, and argrepr to the string representation of argval.
# _disassemble_bytes needs the string repr of the
# raw name index for LOAD_GLOBAL, LOAD_CONST, etc.
argval = arg
if op in hasconst:
argval, argrepr = _get_const_info(arg, constants)
elif op in hasname:
argval, argrepr = _get_name_info(arg, names)
elif op in hasjrel:
argval = offset + 2 + arg
argrepr = "to " + repr(argval)
elif op in haslocal:
argval, argrepr = _get_name_info(arg, varnames)
elif op in hascompare:
argval = cmp_op[arg]
argrepr = argval
elif op in hasfree:
argval, argrepr = _get_name_info(arg, cells)
elif op == FORMAT_VALUE:
argval = ((None, str, repr, ascii)[arg & 0x3], bool(arg & 0x4))
argrepr = ('', 'str', 'repr', 'ascii')[arg & 0x3]
if argval[1]:
if argrepr:
argrepr += ', '
argrepr += 'with format'
yield Instruction(opname[op], op,
arg, argval, argrepr,
offset, starts_line, is_jump_target)
def disassemble(co, lasti=-1, *, file=None):
"""Disassemble a code object."""
cell_names = co.co_cellvars + co.co_freevars
linestarts = dict(findlinestarts(co))
_disassemble_bytes(co.co_code, lasti, co.co_varnames, co.co_names,
co.co_consts, cell_names, linestarts, file=file)
def _disassemble_bytes(code, lasti=-1, varnames=None, names=None,
constants=None, cells=None, linestarts=None,
*, file=None, line_offset=0):
# Omit the line number column entirely if we have no line number info
show_lineno = linestarts is not None
# TODO?: Adjust width upwards if max(linestarts.values()) >= 1000?
lineno_width = 3 if show_lineno else 0
for instr in _get_instructions_bytes(code, varnames, names,
constants, cells, linestarts,
line_offset=line_offset):
new_source_line = (show_lineno and
instr.starts_line is not None and
instr.offset > 0)
if new_source_line:
print(file=file)
is_current_instr = instr.offset == lasti
print(instr._disassemble(lineno_width, is_current_instr), file=file)
def _disassemble_str(source, *, file=None):
"""Compile the source string, then disassemble the code object."""
disassemble(_try_compile(source, '<dis>'), file=file)
disco = disassemble # XXX For backwards compatibility
def _unpack_opargs(code):
extended_arg = 0
for i in range(0, len(code), 2):
op = code[i]
if op >= HAVE_ARGUMENT:
arg = code[i+1] | extended_arg
extended_arg = (arg << 8) if op == EXTENDED_ARG else 0
else:
arg = None
yield (i, op, arg)
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
for offset, op, arg in _unpack_opargs(code):
if arg is not None:
if op in hasjrel:
label = offset + 2 + arg
elif op in hasjabs:
label = arg
else:
continue
if label not in labels:
labels.append(label)
return labels
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = code.co_lnotab[0::2]
line_increments = code.co_lnotab[1::2]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
if line_incr >= 0x80:
# line_increments is an array of 8-bit signed integers
line_incr -= 0x100
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
class Bytecode:
"""The bytecode operations of a piece of code
Instantiate this with a function, method, string of code, or a code object
(as returned by compile()).
Iterating over this yields the bytecode operations as Instruction instances.
"""
def __init__(self, x, *, first_line=None, current_offset=None):
self.codeobj = co = _get_code_object(x)
if first_line is None:
self.first_line = co.co_firstlineno
self._line_offset = 0
else:
self.first_line = first_line
self._line_offset = first_line - co.co_firstlineno
self._cell_names = co.co_cellvars + co.co_freevars
self._linestarts = dict(findlinestarts(co))
self._original_object = x
self.current_offset = current_offset
def __iter__(self):
co = self.codeobj
return _get_instructions_bytes(co.co_code, co.co_varnames, co.co_names,
co.co_consts, self._cell_names,
self._linestarts,
line_offset=self._line_offset)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__,
self._original_object)
@classmethod
def from_traceback(cls, tb):
""" Construct a Bytecode from the given traceback """
while tb.tb_next:
tb = tb.tb_next
return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
def info(self):
"""Return formatted information about the code object."""
return _format_code_info(self.codeobj)
def dis(self):
"""Return a formatted view of the bytecode operations."""
co = self.codeobj
if self.current_offset is not None:
offset = self.current_offset
else:
offset = -1
with io.StringIO() as output:
_disassemble_bytes(co.co_code, varnames=co.co_varnames,
names=co.co_names, constants=co.co_consts,
cells=self._cell_names,
linestarts=self._linestarts,
line_offset=self._line_offset,
file=output,
lasti=offset)
return output.getvalue()
def _test():
"""Simple test program to disassemble a file."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=argparse.FileType(), nargs='?', default='-')
args = parser.parse_args()
with args.infile as infile:
source = infile.read()
code = compile(source, args.infile.name, "exec")
dis(code)
if __name__ == "__main__":
_test()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import time
from oslo.config import cfg
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
from nova.api import validator
from nova import availability_zones
from nova import block_device
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.image import s3
from nova import network
from nova.network.security_group import quantum_driver
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
ec2_opts = [
cfg.StrOpt('ec2_host',
default='$my_ip',
help='the ip of the ec2 api server'),
cfg.StrOpt('ec2_dmz_host',
default='$my_ip',
help='the internal ip of the ec2 api server'),
cfg.IntOpt('ec2_port',
default=8773,
help='the port of the ec2 api server'),
cfg.StrOpt('ec2_scheme',
default='http',
help='the protocol to use when connecting to the ec2 api '
'server (http, https)'),
cfg.StrOpt('ec2_path',
default='/services/Cloud',
help='the path prefix used to call the ec2 api server'),
cfg.ListOpt('region_list',
default=[],
help='list of region=fqdn pairs separated by commas'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vpn_key_suffix', 'nova.cloudpipe.pipelib')
CONF.import_opt('internal_service_availability_zone',
'nova.availability_zones')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def validate_ec2_id(val):
if not validator.validate_str()(val):
raise exception.InvalidInstanceIDMalformed(val=val)
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
raise exception.InvalidInstanceIDMalformed(val=val)
# EC2 API can return the following values as documented in the EC2 API
# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
# ApiReference-ItemType-InstanceStateType.html
# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 |
# stopped 80
_STATE_DESCRIPTION_MAP = {
None: inst_state.PENDING,
vm_states.ACTIVE: inst_state.RUNNING,
vm_states.BUILDING: inst_state.PENDING,
vm_states.DELETED: inst_state.TERMINATED,
vm_states.SOFT_DELETED: inst_state.TERMINATED,
vm_states.STOPPED: inst_state.STOPPED,
vm_states.PAUSED: inst_state.PAUSE,
vm_states.SUSPENDED: inst_state.SUSPEND,
vm_states.RESCUED: inst_state.RESCUE,
vm_states.RESIZED: inst_state.RESIZE,
}
def _state_description(vm_state, _shutdown_terminate):
"""Map the vm state to the server status string."""
# Note(maoy): We do not provide EC2 compatibility
# in shutdown_terminate flag behavior. So we ignore
# it here.
name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name),
'name': name}
def _parse_block_device_mapping(bdm):
"""Parse BlockDeviceMappingItemType into flat hash
BlockDevicedMapping.<N>.DeviceName
BlockDevicedMapping.<N>.Ebs.SnapshotId
BlockDevicedMapping.<N>.Ebs.VolumeSize
BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
BlockDevicedMapping.<N>.Ebs.NoDevice
BlockDevicedMapping.<N>.VirtualName
=> remove .Ebs and allow volume id in SnapshotId
"""
ebs = bdm.pop('ebs', None)
if ebs:
ec2_id = ebs.pop('snapshot_id', None)
if ec2_id:
if ec2_id.startswith('snap-'):
bdm['snapshot_id'] = ec2utils.ec2_snap_id_to_uuid(ec2_id)
elif ec2_id.startswith('vol-'):
bdm['volume_id'] = ec2utils.ec2_vol_id_to_uuid(ec2_id)
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
return bdm
def _properties_get_mappings(properties):
return block_device.mappings_prepend_dev(properties.get('mappings', []))
def _format_block_device_mapping(bdm):
"""Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
keys = (('deviceName', 'device_name'),
('virtualName', 'virtual_name'))
item = {}
for name, k in keys:
if k in bdm:
item[name] = bdm[k]
if bdm.get('no_device'):
item['noDevice'] = True
if ('snapshot_id' in bdm) or ('volume_id' in bdm):
ebs_keys = (('snapshotId', 'snapshot_id'),
('snapshotId', 'volume_id'), # snapshotId is abused
('volumeSize', 'volume_size'),
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
if k in bdm:
if k == 'snapshot_id':
ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k])
elif k == 'volume_id':
ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k])
else:
ebs[name] = bdm[k]
assert 'snapshotId' in ebs
item['ebs'] = ebs
return item
def _format_mappings(properties, result):
"""Format multiple BlockDeviceMappingItemType."""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _properties_get_mappings(properties)
if block_device.is_swap_or_ephemeral(m['virtual'])]
block_device_mapping = [_format_block_device_mapping(bdm) for bdm in
properties.get('block_device_mapping', [])]
# NOTE(yamahata): overwrite mappings with block_device_mapping
for bdm in block_device_mapping:
for i in range(len(mappings)):
if bdm['deviceName'] == mappings[i]['deviceName']:
del mappings[i]
break
mappings.append(bdm)
# NOTE(yamahata): trim ebs.no_device == true. Is this necessary?
mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))]
if mappings:
result['blockDeviceMapping'] = mappings
class CloudController(object):
"""CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.security_group_api = get_cloud_security_group_api()
self.compute_api = compute.API(network_api=self.network_api,
volume_api=self.volume_api,
security_group_api=self.security_group_api)
self.keypair_api = compute_api.KeypairAPI()
self.servicegroup_api = servicegroup.API()
def __str__(self):
return 'CloudController'
def _enforce_valid_instance_ids(self, context, instance_ids):
# NOTE(mikal): Amazon's implementation of the EC2 API requires that
# _all_ instance ids passed in be valid.
instances = {}
if instance_ids:
for ec2_id in instance_ids:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
instances[ec2_id] = instance
return instances
def _get_image_state(self, image):
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
if state == 'active':
state = 'available'
return image['properties'].get('image_state', state)
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_admin):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
result = []
for zone in available_zones:
# Hide internal_service_availability_zone
if zone == CONF.internal_service_availability_zone:
continue
result.append({'zoneName': zone,
'zoneState': "available"})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
# Available services
enabled_services = db.service_get_all(context, False)
enabled_services = availability_zones.set_availability_zones(context,
enabled_services)
zone_hosts = {}
host_services = {}
for service in enabled_services:
zone_hosts.setdefault(service['availability_zone'], [])
if service['host'] not in zone_hosts[service['availability_zone']]:
zone_hosts[service['availability_zone']].append(
service['host'])
host_services.setdefault(service['availability_zone'] +
service['host'], [])
host_services[service['availability_zone'] + service['host']].\
append(service)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
'zoneState': "available"})
for host in zone_hosts[zone]:
result.append({'zoneName': '|- %s' % host,
'zoneState': ''})
for service in host_services[zone + host]:
alive = self.servicegroup_api.service_is_up(service)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if service['disabled']:
active = 'disabled'
result.append({'zoneName': '| |- %s' % service['binary'],
'zoneState': ('%s %s %s'
% (active, art,
service['updated_at']))})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def describe_regions(self, context, region_name=None, **kwargs):
if CONF.region_list:
regions = []
for region in CONF.region_list:
name, _sep, host = region.partition('=')
endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme,
host,
CONF.ec2_port,
CONF.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme,
CONF.ec2_host,
CONF.ec2_port,
CONF.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
if snapshot_id:
snapshots = []
for ec2_id in snapshot_id:
internal_id = ec2utils.ec2_snap_id_to_uuid(ec2_id)
snapshot = self.volume_api.get_snapshot(
context,
snapshot_id=internal_id)
snapshots.append(snapshot)
else:
snapshots = self.volume_api.get_all_snapshots(context)
formatted_snapshots = []
for s in snapshots:
formatted = self._format_snapshot(context, s)
if formatted:
formatted_snapshots.append(formatted)
return {'snapshotSet': formatted_snapshots}
def _format_snapshot(self, context, snapshot):
# NOTE(mikal): this is just a set of strings in cinder. If they
# implement an enum, then we should move this code to use it. The
# valid ec2 statuses are "pending", "completed", and "error".
status_map = {'new': 'pending',
'creating': 'pending',
'available': 'completed',
'active': 'completed',
'deleting': 'pending',
'deleted': None,
'error': 'error'}
mapped_status = status_map.get(snapshot['status'], snapshot['status'])
if not mapped_status:
return None
s = {}
s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id'])
s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id'])
s['status'] = mapped_status
s['startTime'] = snapshot['created_at']
s['progress'] = snapshot['progress']
s['ownerId'] = snapshot['project_id']
s['volumeSize'] = snapshot['volume_size']
s['description'] = snapshot['display_description']
return s
def create_snapshot(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
LOG.audit(_("Create snapshot of volume %s"), volume_id,
context=context)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
volume = self.volume_api.get(context, volume_id)
args = (context, volume, kwargs.get('name'), kwargs.get('description'))
if kwargs.get('force', False):
snapshot = self.volume_api.create_snapshot_force(*args)
else:
snapshot = self.volume_api.create_snapshot(*args)
db.ec2_snapshot_create(context, snapshot['id'])
return self._format_snapshot(context, snapshot)
def delete_snapshot(self, context, snapshot_id, **kwargs):
snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id)
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
self.volume_api.delete_snapshot(context, snapshot)
return True
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = self.keypair_api.get_key_pairs(context, context.user_id)
if key_name is not None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
#If looking for non existent key pair
if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
raise exception.KeypairNotFound(msg,
code="InvalidKeyPair.Duplicate")
result = []
for key_pair in key_pairs:
# filter out the vpn keys
suffix = CONF.vpn_key_suffix
if context.is_admin or not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
})
return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
try:
keypair = self.keypair_api.create_key_pair(context,
context.user_id,
key_name)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg, code='ResourceLimitExceeded')
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint'],
'keyMaterial': keypair['private_key']}
# TODO(vish): when context is no longer an object, pass it here
def import_key_pair(self, context, key_name, public_key_material,
**kwargs):
LOG.audit(_("Import key %s"), key_name, context=context)
public_key = base64.b64decode(public_key_material)
try:
keypair = self.keypair_api.import_key_pair(context,
context.user_id,
key_name,
public_key)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg)
except exception.InvalidKeypair:
msg = _("Keypair data is invalid")
raise exception.EC2APIError(msg)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint']}
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
self.keypair_api.delete_key_pair(context, context.user_id,
key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
return True
def describe_security_groups(self, context, group_name=None, group_id=None,
**kwargs):
search_opts = ec2utils.search_opts_from_filters(kwargs.get('filter'))
raw_groups = self.security_group_api.list(context,
group_name,
group_id,
context.project_id,
search_opts=search_opts)
groups = [self._format_security_group(context, g) for g in raw_groups]
return {'securityGroupInfo':
list(sorted(groups,
key=lambda k: (k['ownerId'], k['groupName'])))}
def _format_security_group(self, context, group):
g = {}
g['groupDescription'] = group['description']
g['groupName'] = group['name']
g['ownerId'] = group['project_id']
g['ipPermissions'] = []
for rule in group['rules']:
r = {}
r['groups'] = []
r['ipRanges'] = []
if rule['group_id']:
if rule.get('grantee_group'):
source_group = rule['grantee_group']
r['groups'] += [{'groupName': source_group['name'],
'userId': source_group['project_id']}]
else:
# rule is not always joined with grantee_group
# for example when using quantum driver.
source_group = self.security_group_api.get(
context, id=rule['group_id'])
r['groups'] += [{'groupName': source_group.get('name'),
'userId': source_group.get('project_id')}]
if rule['protocol']:
r['ipProtocol'] = rule['protocol'].lower()
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
g['ipPermissions'] += [dict(r)]
else:
for protocol, min_port, max_port in (('icmp', -1, -1),
('tcp', 1, 65535),
('udp', 1, 65535)):
r['ipProtocol'] = protocol
r['fromPort'] = min_port
r['toPort'] = max_port
g['ipPermissions'] += [dict(r)]
else:
r['ipProtocol'] = rule['protocol']
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
r['ipRanges'] += [{'cidrIp': rule['cidr']}]
g['ipPermissions'] += [r]
return g
def _rule_args_to_dict(self, context, kwargs):
rules = []
if 'groups' not in kwargs and 'ip_ranges' not in kwargs:
rule = self._rule_dict_last_step(context, **kwargs)
if rule:
rules.append(rule)
return rules
if 'ip_ranges' in kwargs:
rules = self._cidr_args_split(kwargs)
else:
rules = [kwargs]
finalset = []
for rule in rules:
if 'groups' in rule:
groups_values = self._groups_args_split(rule)
for groups_value in groups_values:
final = self._rule_dict_last_step(context, **groups_value)
finalset.append(final)
else:
final = self._rule_dict_last_step(context, **rule)
finalset.append(final)
return finalset
def _cidr_args_split(self, kwargs):
cidr_args_split = []
cidrs = kwargs['ip_ranges']
for key, cidr in cidrs.iteritems():
mykwargs = kwargs.copy()
del mykwargs['ip_ranges']
mykwargs['cidr_ip'] = cidr['cidr_ip']
cidr_args_split.append(mykwargs)
return cidr_args_split
def _groups_args_split(self, kwargs):
groups_args_split = []
groups = kwargs['groups']
for key, group in groups.iteritems():
mykwargs = kwargs.copy()
del mykwargs['groups']
if 'group_name' in group:
mykwargs['source_security_group_name'] = group['group_name']
if 'user_id' in group:
mykwargs['source_security_group_owner_id'] = group['user_id']
if 'group_id' in group:
mykwargs['source_security_group_id'] = group['group_id']
groups_args_split.append(mykwargs)
return groups_args_split
def _rule_dict_last_step(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None, user_id=None,
source_security_group_name=None,
source_security_group_owner_id=None):
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = db.security_group_get_by_name(
context.elevated(),
source_project_id,
source_security_group_name)
notfound = exception.SecurityGroupNotFound
if not source_security_group:
raise notfound(security_group_id=source_security_group_name)
group_id = source_security_group['id']
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr_ip)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def _validate_group_identifier(self, group_name, group_id):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
def _validate_rulevalues(self, rulesvalues):
if not rulesvalues:
err = _("%s Not enough parameters to build a valid rule")
raise exception.EC2APIError(err % rulesvalues)
def _validate_security_group_protocol(self, values):
validprotocols = ['tcp', 'udp', 'icmp', '6', '17', '1']
if 'ip_protocol' in values and \
values['ip_protocol'] not in validprotocols:
protocol = values['ip_protocol']
err = _("Invalid IP protocol %(protocol)s.") % locals()
raise exception.EC2APIError(message=err, code="400")
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
rule_ids = []
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
rule_ids.append(self.security_group_api.rule_exists(
security_group, values_for_rule))
rule_ids = [id for id in rule_ids if id]
if rule_ids:
self.security_group_api.remove_rules(context, security_group,
rule_ids)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
postvalues = []
for values in prevalues:
self._validate_security_group_protocol(values)
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
if self.security_group_api.rule_exists(security_group,
values_for_rule):
err = _('%s - This rule already exists in group')
raise exception.EC2APIError(err % values_for_rule)
postvalues.append(values_for_rule)
if postvalues:
self.security_group_api.add_rules(context, security_group['id'],
security_group['name'], postvalues)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
source_parts = source_security_group_owner_id.split(':')
# If no project name specified, assume it's same as user name.
# Since we're looking up by project name, the user name is not
# used here. It's only read for EC2 API compatibility.
if len(source_parts) == 2:
source_project_id = source_parts[1]
else:
source_project_id = source_parts[0]
else:
source_project_id = context.project_id
return source_project_id
def create_security_group(self, context, group_name, group_description):
if isinstance(group_name, unicode):
group_name = group_name.encode('utf-8')
if CONF.ec2_strict_validation:
# EC2 specification gives constraints for name and description:
# Accepts alphanumeric characters, spaces, dashes, and underscores
allowed = '^[a-zA-Z0-9_\- ]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
self.security_group_api.validate_property(group_description,
'description', allowed)
else:
# Amazon accepts more symbols.
# So, allow POSIX [:print:] characters.
allowed = r'^[\x20-\x7E]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
security_group = self.security_group_api.get(context, group_name,
group_id)
self.security_group_api.destroy(context, security_group)
return True
def get_password_data(self, context, instance_id, **kwargs):
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = password.extract_password(instance)
# NOTE(vish): this should be timestamp from the metadata fields
# but it isn't important enough to implement properly
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"passwordData": output}
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = self.compute_api.get_console_output(context, instance)
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
validate_ec2_id(ec2_id)
internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
volume = self.volume_api.get(context, internal_id)
volumes.append(volume)
else:
volumes = self.volume_api.get_all(context)
volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
instance_ec2_id = None
instance_data = None
if volume.get('instance_uuid', None):
instance_uuid = volume['instance_uuid']
instance = db.instance_get_by_uuid(context.elevated(),
instance_uuid)
instance_ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
instance_data = '%s[%s]' % (instance_ec2_id,
instance['host'])
v = {}
v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id'])
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if context.is_admin:
# NOTE(dprince): project_id and host_id are unset w/ Cinder
v['status'] = '%s (%s, %s, %s, %s)' % (
volume['status'],
volume.get('project_id', ''),
volume.get('host', ''),
instance_data,
volume['mountpoint'])
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
if volume.get('snapshot_id') is not None:
v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id'])
else:
v['snapshotId'] = None
return v
def create_volume(self, context, **kwargs):
snapshot_ec2id = kwargs.get('snapshot_id', None)
if snapshot_ec2id is not None:
snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id'])
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
LOG.audit(_("Create volume from snapshot %s"), snapshot_ec2id,
context=context)
else:
snapshot = None
LOG.audit(_("Create volume of %s GB"),
kwargs.get('size'),
context=context)
create_kwargs = dict(snapshot=snapshot,
volume_type=kwargs.get('volume_type'),
metadata=kwargs.get('metadata'),
availability_zone=kwargs.get('availability_zone'))
volume = self.volume_api.create(context,
kwargs.get('size'),
kwargs.get('name'),
kwargs.get('description'),
**create_kwargs)
db.ec2_volume_create(context, volume['id'])
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return self._format_volume(context, dict(volume))
def delete_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.delete(context, volume)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Delete Failed'))
return True
def attach_volume(self, context,
volume_id,
instance_id,
device, **kwargs):
validate_ec2_id(instance_id)
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
try:
self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Attach Failed.'))
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(instance_uuid),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _get_instance_from_volume(self, context, volume):
if volume['instance_uuid']:
try:
return db.instance_get_by_uuid(context,
volume['instance_uuid'])
except exception.InstanceNotFound:
pass
raise exception.VolumeUnattached(volume_id=volume['id'])
def detach_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
instance = self._get_instance_from_volume(context, volume)
try:
self.compute_api.detach_volume(context, instance, volume)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Detach Volume Failed.'))
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(
volume['instance_uuid']),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _format_kernel_id(self, context, instance_ref, result, key):
kernel_uuid = instance_ref['kernel_id']
if kernel_uuid is None or kernel_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki')
def _format_ramdisk_id(self, context, instance_ref, result, key):
ramdisk_uuid = instance_ref['ramdisk_id']
if ramdisk_uuid is None or ramdisk_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid,
'ari')
def describe_instance_attribute(self, context, instance_id, attribute,
**kwargs):
def _unsupported_attribute(instance, result):
raise exception.EC2APIError(_('attribute not supported: %s') %
attribute)
def _format_attr_block_device_mapping(instance, result):
tmp = {}
self._format_instance_root_device_name(instance, tmp)
self._format_instance_bdm(context, instance['uuid'],
tmp['rootDeviceName'], result)
def _format_attr_disable_api_termination(instance, result):
result['disableApiTermination'] = instance['disable_terminate']
def _format_attr_group_set(instance, result):
CloudController._format_group_set(instance, result)
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
if instance['shutdown_terminate']:
result['instanceInitiatedShutdownBehavior'] = 'terminate'
else:
result['instanceInitiatedShutdownBehavior'] = 'stop'
def _format_attr_instance_type(instance, result):
self._format_instance_type(instance, result)
def _format_attr_kernel(instance, result):
self._format_kernel_id(context, instance, result, 'kernel')
def _format_attr_ramdisk(instance, result):
self._format_ramdisk_id(context, instance, result, 'ramdisk')
def _format_attr_root_device_name(instance, result):
self._format_instance_root_device_name(instance, result)
def _format_attr_source_dest_check(instance, result):
_unsupported_attribute(instance, result)
def _format_attr_user_data(instance, result):
result['userData'] = base64.b64decode(instance['user_data'])
attribute_formatter = {
'blockDeviceMapping': _format_attr_block_device_mapping,
'disableApiTermination': _format_attr_disable_api_termination,
'groupSet': _format_attr_group_set,
'instanceInitiatedShutdownBehavior':
_format_attr_instance_initiated_shutdown_behavior,
'instanceType': _format_attr_instance_type,
'kernel': _format_attr_kernel,
'ramdisk': _format_attr_ramdisk,
'rootDeviceName': _format_attr_root_device_name,
'sourceDestCheck': _format_attr_source_dest_check,
'userData': _format_attr_user_data,
}
fn = attribute_formatter.get(attribute)
if fn is None:
raise exception.EC2APIError(
_('attribute not supported: %s') % attribute)
validate_ec2_id(instance_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
result = {'instance_id': instance_id}
fn(instance, result)
return result
def describe_instances(self, context, **kwargs):
# Optional DescribeInstances argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters)
def describe_instances_v6(self, context, **kwargs):
# Optional DescribeInstancesV6 argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters,
use_v6=True)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
def _format_terminate_instances(self, context, instance_id,
previous_states):
instances_set = []
for (ec2_id, previous_state) in zip(instance_id, previous_states):
i = {}
i['instanceId'] = ec2_id
i['previousState'] = _state_description(previous_state['vm_state'],
previous_state['shutdown_terminate'])
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
i['currentState'] = _state_description(instance['vm_state'],
instance['shutdown_terminate'])
except exception.NotFound:
i['currentState'] = _state_description(vm_states.DELETED,
True)
instances_set.append(i)
return {'instancesSet': instances_set}
def _format_instance_bdm(self, context, instance_uuid, root_device_name,
result):
"""Format InstanceBlockDeviceMappingResponseItemType."""
root_device_type = 'instance-store'
mapping = []
for bdm in db.block_device_mapping_get_all_by_instance(context,
instance_uuid):
volume_id = bdm['volume_id']
if (volume_id is None or bdm['no_device']):
continue
if (bdm['device_name'] == root_device_name and
(bdm['snapshot_id'] or bdm['volume_id'])):
assert not bdm['virtual_name']
root_device_type = 'ebs'
vol = self.volume_api.get(context, volume_id)
LOG.debug(_("vol = %s\n"), vol)
# TODO(yamahata): volume attach time
ebs = {'volumeId': volume_id,
'deleteOnTermination': bdm['delete_on_termination'],
'attachTime': vol['attach_time'] or '',
'status': vol['status'], }
res = {'deviceName': bdm['device_name'],
'ebs': ebs, }
mapping.append(res)
if mapping:
result['blockDeviceMapping'] = mapping
result['rootDeviceType'] = root_device_type
@staticmethod
def _format_instance_root_device_name(instance, result):
result['rootDeviceName'] = (instance.get('root_device_name') or
block_device.DEFAULT_ROOT_DEV_NAME)
@staticmethod
def _format_instance_type(instance, result):
instance_type = instance_types.extract_instance_type(instance)
result['instanceType'] = instance_type['name']
@staticmethod
def _format_group_set(instance, result):
security_group_names = []
if instance.get('security_groups'):
for security_group in instance['security_groups']:
security_group_names.append(security_group['name'])
result['groupSet'] = utils.convert_to_list_dict(
security_group_names, 'groupId')
def _format_instances(self, context, instance_id=None, use_v6=False,
instances_cache=None, **search_opts):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
if not instances_cache:
instances_cache = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
instances = []
for ec2_id in instance_id:
if ec2_id in instances_cache:
instances.append(instances_cache[ec2_id])
else:
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context,
ec2_id)
instance = self.compute_api.get(context, instance_uuid)
except exception.NotFound:
continue
instances.append(instance)
else:
try:
# always filter out deleted instances
search_opts['deleted'] = False
instances = self.compute_api.get_all(context,
search_opts=search_opts,
sort_dir='asc')
except exception.NotFound:
instances = []
for instance in instances:
if not context.is_admin:
if pipelib.is_vpn_image(instance['image_ref']):
continue
i = {}
instance_uuid = instance['uuid']
ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
i['instanceId'] = ec2_id
image_uuid = instance['image_ref']
i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid)
self._format_kernel_id(context, instance, i, 'kernelId')
self._format_ramdisk_id(context, instance, i, 'ramdiskId')
i['instanceState'] = _state_description(
instance['vm_state'], instance['shutdown_terminate'])
fixed_ip = None
floating_ip = None
ip_info = ec2utils.get_ip_info_for_instance(context, instance)
if ip_info['fixed_ips']:
fixed_ip = ip_info['fixed_ips'][0]
if ip_info['floating_ips']:
floating_ip = ip_info['floating_ips'][0]
if ip_info['fixed_ip6s']:
i['dnsNameV6'] = ip_info['fixed_ip6s'][0]
if CONF.ec2_private_dns_show_ip:
i['privateDnsName'] = fixed_ip
else:
i['privateDnsName'] = instance['hostname']
i['privateIpAddress'] = fixed_ip
i['publicDnsName'] = floating_ip
i['ipAddress'] = floating_ip or fixed_ip
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
i['productCodesSet'] = utils.convert_to_list_dict([],
'product_codes')
self._format_instance_type(instance, i)
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
self._format_instance_root_device_name(instance, i)
self._format_instance_bdm(context, instance['uuid'],
i['rootDeviceName'], i)
host = instance['host']
zone = ec2utils.get_availability_zone_by_host(host)
i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
self._format_group_set(instance, r)
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
def describe_addresses(self, context, public_ip=None, **kwargs):
if public_ip:
floatings = []
for address in public_ip:
floating = self.network_api.get_floating_ip_by_address(context,
address)
floatings.append(floating)
else:
floatings = self.network_api.get_floating_ips_by_project(context)
addresses = [self._format_address(context, f) for f in floatings]
return {'addressesSet': addresses}
def _format_address(self, context, floating_ip):
ec2_id = None
if floating_ip['fixed_ip_id']:
fixed_id = floating_ip['fixed_ip_id']
fixed = self.network_api.get_fixed_ip(context, fixed_id)
if fixed['instance_uuid'] is not None:
ec2_id = ec2utils.id_to_ec2_inst_id(fixed['instance_uuid'])
address = {'public_ip': floating_ip['address'],
'instance_id': ec2_id}
if context.is_admin:
details = "%s (%s)" % (address['instance_id'],
floating_ip['project_id'])
address['instance_id'] = details
return address
def allocate_address(self, context, **kwargs):
LOG.audit(_("Allocate address"), context=context)
try:
public_ip = self.network_api.allocate_floating_ip(context)
except exception.FloatingIpLimitExceeded:
raise exception.EC2APIError(_('No more floating IPs available'))
return {'publicIp': public_ip}
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_("Release address %s"), public_ip, context=context)
try:
self.network_api.release_floating_ip(context, address=public_ip)
return {'return': "true"}
except exception.FloatingIpNotFound:
raise exception.EC2APIError(_('Unable to release IP Address.'))
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to"
" instance %(instance_id)s") % locals(), context=context)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
cached_ipinfo = ec2utils.get_ip_info_for_instance(context, instance)
fixed_ips = cached_ipinfo['fixed_ips'] + cached_ipinfo['fixed_ip6s']
if not fixed_ips:
msg = _('Unable to associate IP Address, no fixed_ips.')
raise exception.EC2APIError(msg)
# TODO(tr3buchet): this will associate the floating IP with the
# first fixed_ip an instance has. This should be
# changed to support specifying a particular fixed_ip if
# multiple exist but this may not apply to ec2..
if len(fixed_ips) > 1:
msg = _('multiple fixed_ips exist, using the first: %s')
LOG.warning(msg, fixed_ips[0])
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=public_ip,
fixed_address=fixed_ips[0])
return {'return': 'true'}
except exception.FloatingIpAssociated:
msg = _('Floating ip is already associated.')
raise exception.EC2APIError(msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed.')
raise exception.EC2APIError(msg)
except Exception:
msg = _('Error, unable to associate floating ip.')
LOG.exception(msg)
raise exception.EC2APIError(msg)
def disassociate_address(self, context, public_ip, **kwargs):
instance_id = self.network_api.get_instance_id_by_floating_address(
context, public_ip)
instance = self.compute_api.get(context, instance_id)
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
try:
self.network_api.disassociate_floating_ip(context, instance,
address=public_ip)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated.')
raise exception.EC2APIError(msg)
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise exception.EC2APIError(msg)
return {'return': "true"}
def run_instances(self, context, **kwargs):
min_count = int(kwargs.get('min_count', 1))
if kwargs.get('kernel_id'):
kernel = self._get_image(context, kwargs['kernel_id'])
kwargs['kernel_id'] = ec2utils.id_to_glance_id(context,
kernel['id'])
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context,
ramdisk['id'])
for bdm in kwargs.get('block_device_mapping', []):
_parse_block_device_mapping(bdm)
image = self._get_image(context, kwargs['image_id'])
image_uuid = ec2utils.id_to_glance_id(context, image['id'])
if image:
image_state = self._get_image_state(image)
else:
raise exception.ImageNotFoundEC2(image_id=kwargs['image_id'])
if image_state != 'available':
raise exception.EC2APIError(_('Image must be available'))
(instances, resv_id) = self.compute_api.create(context,
instance_type=instance_types.get_instance_type_by_name(
kwargs.get('instance_type', None)),
image_href=image_uuid,
max_count=int(kwargs.get('max_count', min_count)),
min_count=min_count,
kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'availability_zone'),
block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context, resv_id)
def _ec2_ids_to_instances(self, context, instance_id):
"""Get all instances first, to prevent partial executions."""
instances = []
for ec2_id in instance_id:
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
instances.append(instance)
return instances
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
previous_states = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to start terminating instances"))
for instance in previous_states:
self.compute_api.delete(context, instance)
return self._format_terminate_instances(context,
instance_id,
previous_states)
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids."""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for instance in instances:
self.compute_api.reboot(context, instance, 'HARD')
return True
def stop_instances(self, context, instance_id, **kwargs):
"""Stop each instances in instance_id.
Here instance_id is a list of instance ids"""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to stop instances"))
for instance in instances:
self.compute_api.stop(context, instance)
return True
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
Here instance_id is a list of instance ids"""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to start instances"))
for instance in instances:
self.compute_api.start(context, instance)
return True
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
filters = {'name': ec2_id}
images = self.image_service.detail(context, filters=filters)
try:
return images[0]
except IndexError:
raise exception.ImageNotFound(image_id=ec2_id)
image_type = ec2_id.split('-')[0]
if ec2utils.image_type(image.get('container_format')) != image_type:
raise exception.ImageNotFound(image_id=ec2_id)
return image
def _format_image(self, image):
"""Convert from format defined by GlanceImageService to S3 format."""
i = {}
image_type = ec2utils.image_type(image.get('container_format'))
ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image.get('owner')
img_loc = image['properties'].get('image_location')
if img_loc:
i['imageLocation'] = img_loc
else:
i['imageLocation'] = "%s (%s)" % (img_loc, name)
i['name'] = name
if not name and img_loc:
# This should only occur for images registered with ec2 api
# prior to that api populating the glance name
i['name'] = img_loc
i['imageState'] = self._get_image_state(image)
i['description'] = image.get('description')
display_mapping = {'aki': 'kernel',
'ari': 'ramdisk',
'ami': 'machine'}
i['imageType'] = display_mapping.get(image_type)
i['isPublic'] = not not image.get('is_public')
i['architecture'] = image['properties'].get('architecture')
properties = image['properties']
root_device_name = block_device.properties_root_device_name(properties)
root_device_type = 'instance-store'
for bdm in properties.get('block_device_mapping', []):
if (block_device.strip_dev(bdm.get('device_name')) ==
block_device.strip_dev(root_device_name) and
('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')):
root_device_type = 'ebs'
i['rootDeviceName'] = (root_device_name or
block_device.DEFAULT_ROOT_DEV_NAME)
i['rootDeviceType'] = root_device_type
_format_mappings(properties, i)
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
if image_id:
images = []
for ec2_id in image_id:
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
image = self._get_image(context, image_id)
internal_id = image['id']
self.image_service.delete(context, internal_id)
return True
def _register_image(self, context, metadata):
image = self.image_service.create(context, metadata)
image_type = ec2utils.image_type(image.get('container_format'))
image_id = ec2utils.image_ec2_id(image['id'], image_type)
return image_id
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and kwargs.get('name'):
image_location = kwargs['name']
if image_location is None:
raise exception.EC2APIError(_('imageLocation is required'))
metadata = {'properties': {'image_location': image_location}}
if kwargs.get('name'):
metadata['name'] = kwargs['name']
else:
metadata['name'] = image_location
if 'root_device_name' in kwargs:
metadata['properties']['root_device_name'] = kwargs.get(
'root_device_name')
mappings = [_parse_block_device_mapping(bdm) for bdm in
kwargs.get('block_device_mapping', [])]
if mappings:
metadata['properties']['block_device_mapping'] = mappings
image_id = self._register_image(context, metadata)
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
def _block_device_mapping_attribute(image, result):
_format_mappings(image['properties'], result)
def _launch_permission_attribute(image, result):
result['launchPermission'] = []
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
def _root_device_name_attribute(image, result):
_prop_root_dev_name = block_device.properties_root_device_name
result['rootDeviceName'] = _prop_root_dev_name(image['properties'])
if result['rootDeviceName'] is None:
result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME
def _kernel_attribute(image, result):
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
result['kernel'] = {
'value': ec2utils.image_ec2_id(kernel_id, 'aki')
}
def _ramdisk_attribute(image, result):
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
result['ramdisk'] = {
'value': ec2utils.image_ec2_id(ramdisk_id, 'ari')
}
supported_attributes = {
'blockDeviceMapping': _block_device_mapping_attribute,
'launchPermission': _launch_permission_attribute,
'rootDeviceName': _root_device_name_attribute,
'kernel': _kernel_attribute,
'ramdisk': _ramdisk_attribute,
}
fn = supported_attributes.get(attribute)
if fn is None:
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
result = {'imageId': image_id}
fn(image, result)
return result
def modify_image_attribute(self, context, image_id, attribute,
operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
if 'user_group' not in kwargs:
raise exception.EC2APIError(_('user or group not specified'))
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.EC2APIError(_('only group "all" is supported'))
if operation_type not in ['add', 'remove']:
msg = _('operation_type must be add or remove')
raise exception.EC2APIError(msg)
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
internal_id = image['id']
del(image['id'])
image['is_public'] = (operation_type == 'add')
try:
return self.image_service.update(context, internal_id, image)
except exception.ImageNotAuthorized:
msg = _('Not allowed to modify attributes for image %s')
raise exception.EC2APIError(msg % image_id)
def update_image(self, context, image_id, **kwargs):
internal_id = ec2utils.ec2_id_to_id(image_id)
result = self.image_service.update(context, internal_id, dict(kwargs))
return result
# TODO(yamahata): race condition
# At the moment there is no way to prevent others from
# manipulating instances/volumes/snapshots.
# As other code doesn't take it into consideration, here we don't
# care of it for now. Ostrich algorithm
def create_image(self, context, instance_id, **kwargs):
# NOTE(yamahata): name/description are ignored by register_image(),
# do so here
no_reboot = kwargs.get('no_reboot', False)
name = kwargs.get('name')
validate_ec2_id(instance_id)
ec2_instance_id = instance_id
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_instance_id)
instance = self.compute_api.get(context, instance_uuid)
bdms = self.compute_api.get_instance_bdms(context, instance)
# CreateImage only supported for the analogue of EBS-backed instances
if not self.compute_api.is_volume_backed_instance(context, instance,
bdms):
root = instance['root_device_name']
msg = _("Invalid value '%(ec2_instance_id)s' for instanceId. "
"Instance does not have a volume attached at root "
"(%(root)s)") % locals()
raise exception.InvalidParameterValue(err=msg)
# stop the instance if necessary
restart_instance = False
if not no_reboot:
vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
if vm_state == vm_states.ACTIVE:
restart_instance = True
self.compute_api.stop(context, instance)
# wait instance for really stopped
start_time = time.time()
while vm_state != vm_states.STOPPED:
time.sleep(1)
instance = self.compute_api.get(context, instance_uuid)
vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
timeout = 1 * 60 * 60
if time.time() > start_time + timeout:
raise exception.EC2APIError(
_('Couldn\'t stop instance with in %d sec') % timeout)
glance_uuid = instance['image_ref']
ec2_image_id = ec2utils.glance_id_to_ec2_id(context, glance_uuid)
src_image = self._get_image(context, ec2_image_id)
image_meta = dict(src_image)
def _unmap_id_property(properties, name):
if properties[name]:
properties[name] = ec2utils.id_to_glance_id(context,
properties[name])
# ensure the ID properties are unmapped back to the glance UUID
_unmap_id_property(image_meta['properties'], 'kernel_id')
_unmap_id_property(image_meta['properties'], 'ramdisk_id')
# meaningful image name
name_map = dict(instance=instance['uuid'], now=timeutils.isotime())
name = name or _('image of %(instance)s at %(now)s') % name_map
new_image = self.compute_api.snapshot_volume_backed(context,
instance,
image_meta,
name)
ec2_id = ec2utils.glance_id_to_ec2_id(context, new_image['id'])
if restart_instance:
self.compute_api.start(context, instance)
return {'imageId': ec2_id}
class EC2SecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exception.InvalidParameterValue(err=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.InvalidGroup(reason=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
if decoding_exception:
raise decoding_exception
else:
raise exception.EC2APIError(_("Invalid CIDR"))
@staticmethod
def raise_over_quota(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_not_found(msg):
pass
class CloudSecurityGroupNovaAPI(EC2SecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class CloudSecurityGroupQuantumAPI(EC2SecurityGroupExceptions,
quantum_driver.SecurityGroupAPI):
pass
def get_cloud_security_group_api():
if cfg.CONF.security_group_api.lower() == 'nova':
return CloudSecurityGroupNovaAPI()
elif cfg.CONF.security_group_api.lower() == 'quantum':
return CloudSecurityGroupQuantumAPI()
else:
raise NotImplementedError()
|
|
"""
Axislines includes modified implementation of the Axes class. The
biggest difference is that the artists responsible to draw axis line,
ticks, ticklabel and axis labels are separated out from the mpl's Axis
class, which are much more than artists in the original
mpl. Originally, this change was motivated to support curvilinear
grid. Here are a few reasons that I came up with new axes class.
* "top" and "bottom" x-axis (or "left" and "right" y-axis) can have
different ticks (tick locations and labels). This is not possible
with the current mpl, although some twin axes trick can help.
* Curvilinear grid.
* angled ticks.
In the new axes class, xaxis and yaxis is set to not visible by
default, and new set of artist (AxisArtist) are defined to draw axis
line, ticks, ticklabels and axis label. Axes.axis attribute serves as
a dictionary of these artists, i.e., ax.axis["left"] is a AxisArtist
instance responsible to draw left y-axis. The default Axes.axis contains
"bottom", "left", "top" and "right".
AxisArtist can be considered as a container artist and
has following children artists which will draw ticks, labels, etc.
* line
* major_ticks, major_ticklabels
* minor_ticks, minor_ticklabels
* offsetText
* label
Note that these are separate artists from Axis class of the
original mpl, thus most of tick-related command in the original mpl
won't work, although some effort has made to work with. For example,
color and markerwidth of the ax.axis["bottom"].major_ticks will follow
those of Axes.xaxis unless explicitly specified.
In addition to AxisArtist, the Axes will have *gridlines* attribute,
which obviously draws grid lines. The gridlines needs to be separated
from the axis as some gridlines can never pass any axis.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.axes as maxes
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, ScaledTranslation, \
IdentityTransform, TransformedPath, Bbox
from matplotlib.collections import LineCollection
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
import warnings
import numpy as np
import matplotlib.lines as mlines
from .axisline_style import AxislineStyle
from .axis_artist import AxisArtist, GridlinesCollection
class AxisArtistHelper(object):
"""
AxisArtistHelper should define
following method with given APIs. Note that the first axes argument
will be axes attribute of the caller artist.
# LINE (spinal line?)
def get_line(self, axes):
# path : Path
return path
def get_line_transform(self, axes):
# ...
# trans : transform
return trans
# LABEL
def get_label_pos(self, axes):
# x, y : position
return (x, y), trans
def get_label_offset_transform(self, \
axes,
pad_points, fontprops, renderer,
bboxes,
):
# va : vertical alignment
# ha : horizontal alignment
# a : angle
return trans, va, ha, a
# TICK
def get_tick_transform(self, axes):
return trans
def get_tick_iterators(self, axes):
# iter : iterable object that yields (c, angle, l) where
# c, angle, l is position, tick angle, and label
return iter_major, iter_minor
"""
class _Base(object):
"""
Base class for axis helper.
"""
def __init__(self):
"""
"""
self.delta1, self.delta2 = 0.00001, 0.00001
def update_lim(self, axes):
pass
class Fixed(_Base):
"""
Helper class for a fixed (in the axes coordinate) axis.
"""
_default_passthru_pt = dict(left=(0, 0),
right=(1, 0),
bottom=(0, 0),
top=(0, 1))
def __init__(self,
loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
self._loc = loc
if loc not in ["left", "right", "bottom", "top"]:
raise ValueError("%s" % loc)
if nth_coord is None:
if loc in ["left", "right"]:
nth_coord = 1
elif loc in ["bottom", "top"]:
nth_coord = 0
self.nth_coord = nth_coord
super(AxisArtistHelper.Fixed, self).__init__()
self.passthru_pt = self._default_passthru_pt[loc]
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-nth_coord
_verts[:,fixed_coord] = self.passthru_pt[fixed_coord]
# axis line in transAxes
self._path = Path(_verts)
def get_nth_coord(self):
return self.nth_coord
# LINE
def get_line(self, axes):
return self._path
def get_line_transform(self, axes):
return axes.transAxes
# LABEL
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._loc
pos, angle_tangent = dict(left=((0., 0.5), 90),
right=((1., 0.5), 90),
bottom=((0.5, 0.), 0),
top=((0.5, 1.), 0))[loc]
return pos, angle_tangent
# TICK
def get_tick_transform(self, axes):
trans_tick = [axes.get_xaxis_transform(),
axes.get_yaxis_transform()][self.nth_coord]
return trans_tick
class Floating(_Base):
def __init__(self, nth_coord,
value):
self.nth_coord = nth_coord
self._value = value
super(AxisArtistHelper.Floating,
self).__init__()
def get_nth_coord(self):
return self.nth_coord
def get_line(self, axes):
raise RuntimeError("get_line method should be defined by the derived class")
class AxisArtistHelperRectlinear(object):
class Fixed(AxisArtistHelper.Fixed):
def __init__(self,
axes, loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(AxisArtistHelperRectlinear.Fixed, self).__init__( \
loc, nth_coord)
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
# TICK
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._loc
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = list(self.passthru_pt) # copy
c[self.nth_coord] = x
# check if the tick point is inside axes
c2 = tr2ax.transform_point(c)
#delta=0.00001
if 0. -self.delta1<= c2[self.nth_coord] <= 1.+self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class Floating(AxisArtistHelper.Floating):
def __init__(self, axes, nth_coord,
passingthrough_point, axis_direction="bottom"):
super(AxisArtistHelperRectlinear.Floating, self).__init__( \
nth_coord, passingthrough_point)
self._axis_direction = axis_direction
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
def get_line(self, axes):
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[:,fixed_coord] = p[fixed_coord]
return Path(_verts)
def get_line_transform(self, axes):
return axes.transAxes
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._axis_direction
#angle = dict(left=0,
# right=0,
# bottom=.5*np.pi,
# top=.5*np.pi)[loc]
if self.nth_coord == 0:
angle = 0
else:
angle = 90
_verts = [0.5, 0.5]
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[fixed_coord] = p[fixed_coord]
if not (0. <= _verts[fixed_coord] <= 1.):
return None, None
else:
return _verts, angle
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._axis_direction
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
if self.nth_coord == 0:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
#angle = 90 - 90 * self.nth_coord
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
tr2ax = axes.transData + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = [self._value, self._value]
c[self.nth_coord] = x
c1, c2 = tr2ax.transform_point(c)
if 0. <= c1 <= 1. and 0. <= c2 <= 1.:
if 0. - self.delta1 <= [c1, c2][self.nth_coord] <= 1. + self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class GridHelperBase(object):
def __init__(self):
self._force_update = True
self._old_limits = None
super(GridHelperBase, self).__init__()
def update_lim(self, axes):
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
if self._force_update or self._old_limits != (x1, x2, y1, y2):
self._update(x1, x2, y1, y2)
self._force_update = False
self._old_limits = (x1, x2, y1, y2)
def _update(self, x1, x2, y1, y2):
pass
def invalidate(self):
self._force_update = True
def valid(self):
return not self._force_update
def get_gridlines(self, which, axis):
"""
Return list of grid lines as a list of paths (list of points).
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
return []
def new_gridlines(self, ax):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = GridlinesCollection(None, transform=ax.transData,
colors=rcParams['grid.color'],
linestyles=rcParams['grid.linestyle'],
linewidths=rcParams['grid.linewidth'])
ax._set_artist_props(gridlines)
gridlines.set_grid_helper(self)
ax.axes._set_artist_props(gridlines)
# gridlines.set_clip_path(self.axes.patch)
# set_clip_path need to be deferred after Axes.cla is completed.
# It is done inside the cla.
return gridlines
class GridHelperRectlinear(GridHelperBase):
def __init__(self, axes):
super(GridHelperRectlinear, self).__init__()
self.axes = axes
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None,
):
if axes is None:
warnings.warn("'new_fixed_axis' explicitly requires the axes keyword.")
axes = self.axes
_helper = AxisArtistHelperRectlinear.Fixed(axes, loc, nth_coord)
if axis_direction is None:
axis_direction = loc
axisline = AxisArtist(axes, _helper, offset=offset,
axis_direction=axis_direction,
)
return axisline
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
axes=None,
):
if axes is None:
warnings.warn("'new_floating_axis' explicitly requires the axes keyword.")
axes = self.axes
passthrough_point = (value, value)
transform = axes.transData
_helper = AxisArtistHelperRectlinear.Floating( \
axes, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
return axisline
def get_gridlines(self, which="major", axis="both"):
"""
return list of gridline coordinates in data coordinates.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = []
if axis in ["both", "x"]:
locs = []
y1, y2 = self.axes.get_ylim()
#if self.axes.xaxis._gridOnMajor:
if which in ["both", "major"]:
locs.extend(self.axes.xaxis.major.locator())
#if self.axes.xaxis._gridOnMinor:
if which in ["both", "minor"]:
locs.extend(self.axes.xaxis.minor.locator())
for x in locs:
gridlines.append([[x, x], [y1, y2]])
if axis in ["both", "y"]:
x1, x2 = self.axes.get_xlim()
locs = []
if self.axes.yaxis._gridOnMajor:
#if which in ["both", "major"]:
locs.extend(self.axes.yaxis.major.locator())
if self.axes.yaxis._gridOnMinor:
#if which in ["both", "minor"]:
locs.extend(self.axes.yaxis.minor.locator())
for y in locs:
gridlines.append([[x1, x2], [y, y]])
return gridlines
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start == None and k.stop == None and k.step == None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
helper = kw.pop("grid_helper", None)
self._axisline_on = True
if helper:
self._grid_helper = helper
else:
self._grid_helper = GridHelperRectlinear(self)
super(Axes, self).__init__(*kl, **kw)
self.toggle_axisline(True)
def toggle_axisline(self, b=None):
if b is None:
b = not self._axisline_on
if b:
self._axisline_on = True
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
else:
self._axisline_on = False
for s in self.spines.values():
s.set_visible(True)
self.xaxis.set_visible(True)
self.yaxis.set_visible(True)
def _init_axis(self):
super(Axes, self)._init_axis()
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
new_fixed_axis = self.get_grid_helper().new_fixed_axis
for loc in ["bottom", "top", "left", "right"]:
self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes,
axis_direction=loc)
for axisline in [self._axislines["top"], self._axislines["right"]]:
axisline.label.set_visible(False)
axisline.major_ticklabels.set_visible(False)
axisline.minor_ticklabels.set_visible(False)
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def new_gridlines(self, grid_helper=None):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
if grid_helper is None:
grid_helper = self.get_grid_helper()
gridlines = grid_helper.new_gridlines(self)
return gridlines
def _init_gridlines(self, grid_helper=None):
# It is done inside the cla.
gridlines = self.new_gridlines(grid_helper)
self.gridlines = gridlines
def cla(self):
# gridlines need to b created before cla() since cla calls grid()
self._init_gridlines()
super(Axes, self).cla()
# the clip_path should be set after Axes.cla() since that's
# when a patch is created.
self.gridlines.set_clip_path(self.axes.patch)
self._init_axis_artists()
def get_grid_helper(self):
return self._grid_helper
def grid(self, b=None, which='major', axis="both", **kwargs):
"""
Toggle the gridlines, and optionally set the properties of the lines.
"""
# their are some discrepancy between the behavior of grid in
# axes_grid and the original mpl's grid, because axes_grid
# explicitly set the visibility of the gridlines.
super(Axes, self).grid(b, which=which, axis=axis, **kwargs)
if not self._axisline_on:
return
if b is None:
if self.axes.xaxis._gridOnMinor or self.axes.xaxis._gridOnMajor or \
self.axes.yaxis._gridOnMinor or self.axes.yaxis._gridOnMajor:
b=True
else:
b=False
self.gridlines.set_which(which)
self.gridlines.set_axis(axis)
self.gridlines.set_visible(b)
if len(kwargs):
martist.setp(self.gridlines, **kwargs)
def get_children(self):
if self._axisline_on:
children = list(six.itervalues(self._axislines)) + [self.gridlines]
else:
children = []
children.extend(super(Axes, self).get_children())
return children
def invalidate_grid_helper(self):
self._grid_helper.invalidate()
def new_fixed_axis(self, loc, offset=None):
gh = self.get_grid_helper()
axis = gh.new_fixed_axis(loc,
nth_coord=None,
axis_direction=None,
offset=offset,
axes=self,
)
return axis
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
):
gh = self.get_grid_helper()
axis = gh.new_floating_axis(nth_coord, value,
axis_direction=axis_direction,
axes=self)
return axis
def draw(self, renderer, inframe=False):
if not self._axisline_on:
super(Axes, self).draw(renderer, inframe)
return
orig_artists = self.artists
self.artists = self.artists + list(self._axislines.values()) + [self.gridlines]
super(Axes, self).draw(renderer, inframe)
self.artists = orig_artists
def get_tightbbox(self, renderer, call_axes_locator=True):
bb0 = super(Axes, self).get_tightbbox(renderer, call_axes_locator)
if not self._axisline_on:
return bb0
bb = [bb0]
for axisline in list(six.itervalues(self._axislines)):
if not axisline.get_visible():
continue
bb.append(axisline.get_tightbbox(renderer))
# if axisline.label.get_visible():
# bb.append(axisline.label.get_window_extent(renderer))
# if axisline.major_ticklabels.get_visible():
# bb.extend(axisline.major_ticklabels.get_window_extents(renderer))
# if axisline.minor_ticklabels.get_visible():
# bb.extend(axisline.minor_ticklabels.get_window_extents(renderer))
# if axisline.major_ticklabels.get_visible() or \
# axisline.minor_ticklabels.get_visible():
# bb.append(axisline.offsetText.get_window_extent(renderer))
#bb.extend([c.get_window_extent(renderer) for c in artists \
# if c.get_visible()])
_bbox = Bbox.union([b for b in bb if b and (b.width!=0 or b.height!=0)])
return _bbox
Subplot = maxes.subplot_class_factory(Axes)
class AxesZero(Axes):
def __init__(self, *kl, **kw):
super(AxesZero, self).__init__(*kl, **kw)
def _init_axis_artists(self):
super(AxesZero, self)._init_axis_artists()
new_floating_axis = self._grid_helper.new_floating_axis
xaxis_zero = new_floating_axis(nth_coord=0,
value=0.,
axis_direction="bottom",
axes=self)
xaxis_zero.line.set_clip_path(self.patch)
xaxis_zero.set_visible(False)
self._axislines["xzero"] = xaxis_zero
yaxis_zero = new_floating_axis(nth_coord=1,
value=0.,
axis_direction="left",
axes=self)
yaxis_zero.line.set_clip_path(self.patch)
yaxis_zero.set_visible(False)
self._axislines["yzero"] = yaxis_zero
SubplotZero = maxes.subplot_class_factory(AxesZero)
if 0:
#if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = SubplotZero(fig, 1, 1, 1)
fig.add_subplot(ax)
ax.axis["xzero"].set_visible(True)
ax.axis["xzero"].label.set_text("Axis Zero")
for n in ["top", "right"]:
ax.axis[n].set_visible(False)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
plt.draw()
plt.show()
if __name__ == "__main__":
#if 1:
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = Subplot(fig, 1, 1, 1)
fig.add_subplot(ax)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
ax.axis["top"].major_ticks.set_tick_out(True) #set_tick_direction("out")
ax.axis["bottom"].major_ticks.set_tick_out(True) #set_tick_direction("out")
#ax.axis["bottom"].set_tick_direction("in")
ax.axis["bottom"].set_label("Tk0")
plt.draw()
plt.show()
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from past.builtins import basestring
from datetime import datetime
import logging
from urllib.parse import urlparse
from time import sleep
import re
import sys
import airflow
from airflow import hooks, settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout, AirflowSkipException
from airflow.models import BaseOperator, TaskInstance
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.hdfs_hook import HDFSHook
from airflow.utils.state import State
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator):
'''
Sensor operators are derived from this class an inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
'''
ui_color = '#e6f1f2'
@apply_defaults
def __init__(
self,
poke_interval=60,
timeout=60*60*24*7,
soft_fail=False,
*args, **kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
def poke(self, context):
'''
Function that the sensors defined while deriving this class should
override.
'''
raise AirflowException('Override me.')
def execute(self, context):
started_at = datetime.now()
while not self.poke(context):
if (datetime.now() - started_at).total_seconds() > self.timeout:
if self.soft_fail:
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
sleep(self.poke_interval)
logging.info("Success criteria met. Exiting.")
class SqlSensor(BaseSensorOperator):
"""
Runs a sql statement until a criteria is met. It will keep trying until
sql returns no row, or if the first cell in (0, '0', '').
:param conn_id: The connection to run the sensor against
:type conn_id: string
:param sql: The sql to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
ui_color = '#7c7287'
@apply_defaults
def __init__(self, conn_id, sql, *args, **kwargs):
self.sql = sql
self.conn_id = conn_id
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
hook = BaseHook.get_connection(self.conn_id).get_hook()
logging.info('Poking: ' + self.sql)
records = hook.get_records(self.sql)
if not records:
return False
else:
if str(records[0][0]) in ('0', '',):
return False
else:
return True
print(records[0][0])
class MetastorePartitionSensor(SqlSensor):
"""
An alternative to the HivePartitionSensor that talk directly to the
MySQL db. This was created as a result of observing sub optimal
queries generated by the Metastore thrift service when hitting
subpartitioned tables. The Thrift service's queries were written in a
way that wouldn't leverage the indexes.
:param schema: the schema
:type schema: str
:param table: the table
:type table: str
:param partition_name: the partition name, as defined in the PARTITIONS
table of the Metastore. Order of the fields does matter.
Examples: ``ds=2016-01-01`` or
``ds=2016-01-01/sub=foo`` for a sub partitioned table
:type partition_name: str
:param mysql_conn_id: a reference to the MySQL conn_id for the metastore
:type mysql_conn_id: str
"""
template_fields = ('partition_name', 'table', 'schema')
ui_color = '#8da7be'
@apply_defaults
def __init__(
self, table, partition_name, schema="default",
mysql_conn_id="metastore_mysql",
*args, **kwargs):
self.partition_name = partition_name
self.table = table
self.schema = schema
self.first_poke = True
self.conn_id = mysql_conn_id
# TODO(aoen): We shouldn't be using SqlSensor here but MetastorePartitionSensor.
# The problem is the way apply_defaults works isn't compatible with inheritance.
# The inheritance model needs to be reworked in order to support overriding args/
# kwargs with arguments here, then 'conn_id' and 'sql' can be passed into the
# constructor below and apply_defaults will no longer throw an exception.
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
if self.first_poke:
self.first_poke = False
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.sql = """
SELECT 'X'
FROM PARTITIONS A0
LEFT OUTER JOIN TBLS B0 ON A0.TBL_ID = B0.TBL_ID
LEFT OUTER JOIN DBS C0 ON B0.DB_ID = C0.DB_ID
WHERE
B0.TBL_NAME = '{self.table}' AND
C0.NAME = '{self.schema}' AND
A0.PART_NAME = '{self.partition_name}';
""".format(self=self)
return super(MetastorePartitionSensor, self).poke(context)
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a task to complete in a different DAG
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: string
:param external_task_id: The task_id that contains the task you want to
wait for
:type external_task_id: string
:param allowed_states: list of allowed states, default is ``['success']``
:type allowed_states: list
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: datetime.timedelta
:param execution_date_fn: function that receives the current execution date
and returns the desired execution date to query. Either execution_delta
or execution_date_fn can be passed to ExternalTaskSensor, but not both.
:type execution_date_fn: callable
"""
ui_color = '#19647e'
@apply_defaults
def __init__(
self,
external_dag_id,
external_task_id,
allowed_states=None,
execution_delta=None,
execution_date_fn=None,
*args, **kwargs):
super(ExternalTaskSensor, self).__init__(*args, **kwargs)
self.allowed_states = allowed_states or [State.SUCCESS]
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_date` or `execution_date_fn` may'
'be provided to ExternalTaskSensor; not both.')
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
def poke(self, context):
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self.execution_date_fn(context['execution_date'])
else:
dttm = context['execution_date']
logging.info(
'Poking for '
'{self.external_dag_id}.'
'{self.external_task_id} on '
'{dttm} ... '.format(**locals()))
TI = TaskInstance
session = settings.Session()
count = session.query(TI).filter(
TI.dag_id == self.external_dag_id,
TI.task_id == self.external_task_id,
TI.state.in_(self.allowed_states),
TI.execution_date == dttm,
).count()
session.commit()
session.close()
return count
class NamedHivePartitionSensor(BaseSensorOperator):
"""
Waits for a set of partitions to show up in Hive.
:param partition_names: List of fully qualified names of the
partitions to wait for. A fully qualified name is of the
form ``schema.table/pk1=pv1/pk2=pv2``, for example,
default.users/ds=2016-01-01. This is passed as is to the metastore
Thrift client ``get_partitions_by_name`` method. Note that
you cannot use logical or comparison operators as in
HivePartitionSensor.
:type partition_names: list of strings
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('partition_names', )
ui_color = '#8d99ae'
@apply_defaults
def __init__(
self,
partition_names,
metastore_conn_id='metastore_default',
poke_interval=60 * 3,
*args,
**kwargs):
super(NamedHivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if isinstance(partition_names, basestring):
raise TypeError('partition_names must be an array of strings')
self.metastore_conn_id = metastore_conn_id
self.partition_names = partition_names
self.next_poke_idx = 0
@classmethod
def parse_partition_name(self, partition):
try:
schema, table_partition = partition.split('.', 1)
table, partition = table_partition.split('/', 1)
return schema, table, partition
except ValueError as e:
raise ValueError('Could not parse ' + partition)
def poke(self, context):
if not hasattr(self, 'hook'):
self.hook = hooks.HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
def poke_partition(partition):
schema, table, partition = self.parse_partition_name(partition)
logging.info(
'Poking for {schema}.{table}/{partition}'.format(**locals())
)
return self.hook.check_for_named_partition(
schema, table, partition)
while self.next_poke_idx < len(self.partition_names):
if poke_partition(self.partition_names[self.next_poke_idx]):
self.next_poke_idx += 1
else:
return False
return True
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive.
Note: Because ``partition`` supports general logical operators, it
can be inefficient. Consider using NamedHivePartitionSensor instead if
you don't need the full flexibility of HivePartitionSensor.
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table: string
:param partition: The partition clause to wait for. This is passed as
is to the metastore Thrift client ``get_partitions_by_filter`` method,
and apparently supports SQL like notation as in ``ds='2015-01-01'
AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``
:type partition: string
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('schema', 'table', 'partition',)
ui_color = '#2b2d42'
@apply_defaults
def __init__(
self,
table, partition="ds='{{ ds }}'",
metastore_conn_id='metastore_default',
schema='default',
poke_interval=60*3,
*args, **kwargs):
super(HivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context):
if '.' in self.table:
self.schema, self.table = self.table.split('.')
logging.info(
'Poking for table {self.schema}.{self.table}, '
'partition {self.partition}'.format(**locals()))
if not hasattr(self, 'hook'):
self.hook = hooks.HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
return self.hook.check_for_partition(
self.schema, self.table, self.partition)
class HdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
ui_color = settings.WEB_COLORS['LIGHTBLUE']
@apply_defaults
def __init__(
self,
filepath,
hdfs_conn_id='hdfs_default',
ignored_ext=['_COPYING_'],
ignore_copying=True,
file_size=None,
hook=HDFSHook,
*args, **kwargs):
super(HdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.hdfs_conn_id = hdfs_conn_id
self.file_size = file_size
self.ignored_ext = ignored_ext
self.ignore_copying = ignore_copying
self.hook = hook
@staticmethod
def filter_for_filesize(result, size=None):
"""
Will test the filepath result and test if its size is at least self.filesize
:param result: a list of dicts returned by Snakebite ls
:param size: the file size in MB a file should be at least to trigger True
:return: (bool) depending on the matching criteria
"""
if size:
logging.debug('Filtering for file size >= %s in files: %s', size, map(lambda x: x['path'], result))
size *= settings.MEGABYTE
result = [x for x in result if x['length'] >= size]
logging.debug('HdfsSensor.poke: after size filter result is %s', result)
return result
@staticmethod
def filter_for_ignored_ext(result, ignored_ext, ignore_copying):
"""
Will filter if instructed to do so the result to remove matching criteria
:param result: (list) of dicts returned by Snakebite ls
:param ignored_ext: (list) of ignored extentions
:param ignore_copying: (bool) shall we ignore ?
:return:
"""
if ignore_copying:
regex_builder = "^.*\.(%s$)$" % '$|'.join(ignored_ext)
ignored_extentions_regex = re.compile(regex_builder)
logging.debug('Filtering result for ignored extentions: %s in files %s', ignored_extentions_regex.pattern,
map(lambda x: x['path'], result))
result = [x for x in result if not ignored_extentions_regex.match(x['path'])]
logging.debug('HdfsSensor.poke: after ext filter result is %s', result)
return result
def poke(self, context):
sb = self.hook(self.hdfs_conn_id).get_conn()
logging.getLogger("snakebite").setLevel(logging.WARNING)
logging.info('Poking for file {self.filepath} '.format(**locals()))
try:
# IMOO it's not right here, as there no raise of any kind.
# if the filepath is let's say '/data/mydirectory', it's correct but if it is '/data/mydirectory/*',
# it's not correct as the directory exists and sb does not raise any error
# here is a quick fix
result = [f for f in sb.ls([self.filepath], include_toplevel=False)]
logging.debug('HdfsSensor.poke: result is %s', result)
result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
return bool(result)
except:
e = sys.exc_info()
logging.debug("Caught an exception !: %s", str(e))
return False
class WebHdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
@apply_defaults
def __init__(
self,
filepath,
webhdfs_conn_id='webhdfs_default',
*args, **kwargs):
super(WebHdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.webhdfs_conn_id = webhdfs_conn_id
def poke(self, context):
c = airflow.hooks.webhdfs_hook.WebHDFSHook(self.webhdfs_conn_id)
logging.info(
'Poking for file {self.filepath} '.format(**locals()))
return c.check_for_path(hdfs_path=self.filepath)
class S3KeySensor(BaseSensorOperator):
"""
Waits for a key (a file-like instance on S3) to be present in a S3 bucket.
S3 being a key/value it does not support folders. The path is just a key
a resource.
:param bucket_key: The key being waited on. Supports full s3:// style url
or relative path from root level.
:type bucket_key: str
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param wildcard_match: whether the bucket_key should be interpreted as a
Unix wildcard pattern
:type wildcard_match: bool
:param s3_conn_id: a reference to the s3 connection
:type s3_conn_id: str
"""
template_fields = ('bucket_key', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_key,
bucket_name=None,
wildcard_match=False,
s3_conn_id='s3_default',
*args, **kwargs):
super(S3KeySensor, self).__init__(*args, **kwargs)
# Parse
if bucket_name is None:
parsed_url = urlparse(bucket_key)
if parsed_url.netloc == '':
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
if parsed_url.path[0] == '/':
bucket_key = parsed_url.path[1:]
else:
bucket_key = parsed_url.path
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.wildcard_match = wildcard_match
self.s3_conn_id = s3_conn_id
def poke(self, context):
import airflow.hooks.S3_hook
hook = airflow.hooks.S3_hook.S3Hook(s3_conn_id=self.s3_conn_id)
full_url = "s3://" + self.bucket_name + "/" + self.bucket_key
logging.info('Poking for key : {full_url}'.format(**locals()))
if self.wildcard_match:
return hook.check_for_wildcard_key(self.bucket_key,
self.bucket_name)
else:
return hook.check_for_key(self.bucket_key, self.bucket_name)
class S3PrefixSensor(BaseSensorOperator):
"""
Waits for a prefix to exist. A prefix is the first part of a key,
thus enabling checking of constructs similar to glob airfl* or
SQL LIKE 'airfl%'. There is the possibility to precise a delimiter to
indicate the hierarchy or keys, meaning that the match will stop at that
delimiter. Current code accepts sane delimiters, i.e. characters that
are NOT special characters in the Python regex engine.
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param prefix: The prefix being waited on. Relative path from bucket root level.
:type prefix: str
:param delimiter: The delimiter intended to show hierarchy.
Defaults to '/'.
:type delimiter: str
"""
template_fields = ('prefix', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_name,
prefix, delimiter='/',
s3_conn_id='s3_default',
*args, **kwargs):
super(S3PrefixSensor, self).__init__(*args, **kwargs)
# Parse
self.bucket_name = bucket_name
self.prefix = prefix
self.delimiter = delimiter
self.full_url = "s3://" + bucket_name + '/' + prefix
self.s3_conn_id = s3_conn_id
def poke(self, context):
logging.info('Poking for prefix : {self.prefix}\n'
'in bucket s3://{self.bucket_name}'.format(**locals()))
import airflow.hooks.S3_hook
hook = airflow.hooks.S3_hook.S3Hook(s3_conn_id=self.s3_conn_id)
return hook.check_for_prefix(
prefix=self.prefix,
delimiter=self.delimiter,
bucket_name=self.bucket_name)
class TimeSensor(BaseSensorOperator):
"""
Waits until the specified time of the day.
:param target_time: time after which the job succeeds
:type target_time: datetime.time
"""
template_fields = tuple()
@apply_defaults
def __init__(self, target_time, *args, **kwargs):
super(TimeSensor, self).__init__(*args, **kwargs)
self.target_time = target_time
def poke(self, context):
logging.info(
'Checking if the time ({0}) has come'.format(self.target_time))
return datetime.now().time() > self.target_time
class TimeDeltaSensor(BaseSensorOperator):
"""
Waits for a timedelta after the task's execution_date + schedule_interval.
In Airflow, the daily task stamped with ``execution_date``
2016-01-01 can only start running on 2016-01-02. The timedelta here
represents the time after the execution period has closed.
:param delta: time length to wait after execution_date before succeeding
:type delta: datetime.timedelta
"""
template_fields = tuple()
@apply_defaults
def __init__(self, delta, *args, **kwargs):
super(TimeDeltaSensor, self).__init__(*args, **kwargs)
self.delta = delta
def poke(self, context):
dag = context['dag']
target_dttm = dag.following_schedule(context['execution_date'])
target_dttm += self.delta
logging.info('Checking if the time ({0}) has come'.format(target_dttm))
return datetime.now() > target_dttm
class HttpSensor(BaseSensorOperator):
"""
Executes a HTTP get statement and returns False on failure:
404 not found or response_check function returned False
:param http_conn_id: The connection to run the sensor against
:type http_conn_id: string
:param endpoint: The relative part of the full url
:type endpoint: string
:param params: The parameters to be added to the GET url
:type params: a dictionary of string key/value pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
template_fields = ('endpoint', 'params')
@apply_defaults
def __init__(self,
endpoint,
http_conn_id='http_default',
params=None,
headers=None,
response_check=None,
extra_options=None, *args, **kwargs):
super(HttpSensor, self).__init__(*args, **kwargs)
self.endpoint = endpoint
self.http_conn_id = http_conn_id
self.params = params or {}
self.headers = headers or {}
self.extra_options = extra_options or {}
self.response_check = response_check
self.hook = hooks.http_hook.HttpHook(method='GET', http_conn_id=http_conn_id)
def poke(self, context):
logging.info('Poking: ' + self.endpoint)
try:
response = self.hook.run(self.endpoint,
data=self.params,
headers=self.headers,
extra_options=self.extra_options)
if self.response_check:
# run content check on response
return self.response_check(response)
except AirflowException as ae:
if str(ae).startswith("404"):
return False
raise ae
return True
|
|
from __future__ import absolute_import
import logging
from django.contrib import messages
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.generic import View
from sudo.views import redirect_to_sudo
from sentry.auth import access
from sentry.models import (
Organization, OrganizationStatus, Project, Team
)
from sentry.web.helpers import get_login_url, render_to_response
ERR_MISSING_SSO_LINK = _('You need to link your account with the SSO provider to continue.')
class OrganizationMixin(object):
# TODO(dcramer): move the implicit organization logic into its own class
# as it's only used in a single location and over complicates the rest of
# the code
def get_active_organization(self, request, organization_slug=None,
access=None):
"""
Returns the currently active organization for the request or None
if no organization.
"""
active_organization = None
is_implicit = organization_slug is None
if is_implicit:
organization_slug = request.session.get('activeorg')
if organization_slug is not None:
if request.user.is_superuser:
try:
active_organization = Organization.objects.get_from_cache(
slug=organization_slug,
)
if active_organization.status != OrganizationStatus.VISIBLE:
raise Organization.DoesNotExist
except Organization.DoesNotExist:
logging.info('Active organization [%s] not found',
organization_slug)
return None
if active_organization is None:
organizations = Organization.objects.get_for_user(
user=request.user,
access=access,
)
if active_organization is None and organization_slug:
try:
active_organization = (
o for o in organizations
if o.slug == organization_slug
).next()
except StopIteration:
logging.info('Active organization [%s] not found in scope',
organization_slug)
if is_implicit:
del request.session['activeorg']
active_organization = None
if active_organization is None:
if not is_implicit:
return None
try:
active_organization = organizations[0]
except IndexError:
logging.info('User is not a member of any organizations')
pass
if active_organization and active_organization.slug != request.session.get('activeorg'):
request.session['activeorg'] = active_organization.slug
return active_organization
def get_active_team(self, request, organization, team_slug, access=None):
"""
Returns the currently selected team for the request or None
if no match.
"""
try:
team = Team.objects.get_from_cache(
slug=team_slug,
organization=organization,
)
except Team.DoesNotExist:
return None
if not request.user.is_superuser and not team.has_access(request.user, access):
return None
return team
def get_active_project(self, request, organization, project_slug, access=None):
try:
project = Project.objects.get_from_cache(
slug=project_slug,
organization=organization,
)
except Project.DoesNotExist:
return None
if not request.user.is_superuser and not project.has_access(request.user, access):
return None
return project
class BaseView(View, OrganizationMixin):
auth_required = True
# TODO(dcramer): change sudo so it can be required only on POST
sudo_required = False
def __init__(self, auth_required=None, sudo_required=None, *args, **kwargs):
if auth_required is not None:
self.auth_required = auth_required
if sudo_required is not None:
self.sudo_required = sudo_required
super(BaseView, self).__init__(*args, **kwargs)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
if self.is_auth_required(request, *args, **kwargs):
return self.handle_auth_required(request, *args, **kwargs)
if self.is_sudo_required(request, *args, **kwargs):
return self.handle_sudo_required(request, *args, **kwargs)
args, kwargs = self.convert_args(request, *args, **kwargs)
request.access = self.get_access(request, *args, **kwargs)
if not self.has_permission(request, *args, **kwargs):
return self.handle_permission_required(request, *args, **kwargs)
self.request = request
self.default_context = self.get_context_data(request, *args, **kwargs)
return self.handle(request, *args, **kwargs)
def get_access(self, request, *args, **kwargs):
return access.DEFAULT
def convert_args(self, request, *args, **kwargs):
return (args, kwargs)
def handle(self, request, *args, **kwargs):
return super(BaseView, self).dispatch(request, *args, **kwargs)
def is_auth_required(self, request, *args, **kwargs):
return self.auth_required and not request.user.is_authenticated()
def handle_auth_required(self, request, *args, **kwargs):
request.session['_next'] = request.get_full_path()
if 'organization_slug' in kwargs:
redirect_to = reverse('sentry-auth-organization',
args=[kwargs['organization_slug']])
else:
redirect_to = get_login_url()
return self.redirect(redirect_to)
def is_sudo_required(self, request, *args, **kwargs):
return self.sudo_required and not request.is_sudo()
def handle_sudo_required(self, request, *args, **kwargs):
return redirect_to_sudo(request.get_full_path())
def has_permission(self, request, *args, **kwargs):
return True
def handle_permission_required(self, request, *args, **kwargs):
redirect_uri = self.get_no_permission_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def get_no_permission_url(request, *args, **kwargs):
return reverse('sentry')
def get_context_data(self, request, **kwargs):
context = csrf(request)
return context
def respond(self, template, context=None, status=200):
default_context = self.default_context
if context:
default_context.update(context)
return render_to_response(template, default_context, self.request,
status=status)
def redirect(self, url):
return HttpResponseRedirect(url)
def get_team_list(self, user, organization):
return Team.objects.get_for_user(
organization=organization,
user=user,
with_projects=True,
)
class OrganizationView(BaseView):
"""
Any view acting on behalf of an organization should inherit from this base.
The 'organization' keyword argument is automatically injected into the
resulting dispatch.
"""
required_access = None
valid_sso_required = True
def get_access(self, request, organization, *args, **kwargs):
if organization is None:
return access.DEFAULT
return access.from_user(request.user, organization)
def get_context_data(self, request, organization, **kwargs):
context = super(OrganizationView, self).get_context_data(request)
context['organization'] = organization
context['TEAM_LIST'] = self.get_team_list(request.user, organization)
context['ACCESS'] = request.access.to_django_context()
return context
def has_permission(self, request, organization, *args, **kwargs):
if organization is None:
return False
if self.valid_sso_required and not request.access.sso_is_valid:
return False
return True
def handle_permission_required(self, request, organization, *args, **kwargs):
needs_link = (
organization and request.user.is_authenticated()
and self.valid_sso_required and not request.access.sso_is_valid
)
request.session['_next'] = request.get_full_path()
if needs_link:
messages.add_message(
request, messages.ERROR,
ERR_MISSING_SSO_LINK,
)
redirect_uri = reverse('sentry-auth-link-identity',
args=[organization.slug])
else:
redirect_uri = self.get_no_permission_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def convert_args(self, request, organization_slug=None, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
access=self.required_access,
organization_slug=organization_slug,
)
kwargs['organization'] = active_organization
return (args, kwargs)
class TeamView(OrganizationView):
"""
Any view acting on behalf of a team should inherit from this base and the
matching URL pattern must pass 'team_slug'.
Two keyword arguments are added to the resulting dispatch:
- organization
- team
"""
def get_context_data(self, request, organization, team, **kwargs):
context = super(TeamView, self).get_context_data(request, organization)
context['team'] = team
return context
def has_permission(self, request, organization, team, *args, **kwargs):
rv = super(TeamView, self).has_permission(request, organization)
if not rv:
return rv
return team is not None
def convert_args(self, request, organization_slug, team_slug, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if active_organization:
active_team = self.get_active_team(
request=request,
team_slug=team_slug,
organization=active_organization,
access=self.required_access,
)
else:
active_team = None
kwargs['organization'] = active_organization
kwargs['team'] = active_team
return (args, kwargs)
class ProjectView(TeamView):
"""
Any view acting on behalf of a project should inherit from this base and the
matching URL pattern must pass 'team_slug' as well as 'project_slug'.
Three keyword arguments are added to the resulting dispatch:
- organization
- team
- project
"""
def get_context_data(self, request, organization, team, project, **kwargs):
context = super(ProjectView, self).get_context_data(request, organization, team)
context['project'] = project
return context
def has_permission(self, request, organization, team, project, *args, **kwargs):
rv = super(ProjectView, self).has_permission(request, organization, team)
if not rv:
return rv
return project is not None
def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):
active_organization = self.get_active_organization(
request=request,
organization_slug=organization_slug,
)
if active_organization:
active_project = self.get_active_project(
request=request,
organization=active_organization,
project_slug=project_slug,
access=self.required_access,
)
else:
active_project = None
if active_project:
active_team = active_project.team
else:
active_team = None
kwargs['project'] = active_project
kwargs['team'] = active_team
kwargs['organization'] = active_organization
return (args, kwargs)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
from os import getcwd, fdopen, fstat as stat
from time import ctime, time
from ..fdbus_h import *
from ..exceptions.exceptions import *
# a client name/id field (i.e. a process name or identifier/pid)
fdobj = namedtuple('File_Descriptor', ('name', 'path', 'fd', 'mode',
'client', 'created'))
class FileDescriptorPool(object):
def __init__(self):
self.fdobjs = {}
self.client_fdobjs = defaultdict(list)
def add(self, client, fdobj):
self.fdobjs[fdobj.name] = [client, fdobj]
self.client_fdobjs[client].append(fdobj)
def remove(self, name):
try:
fdobj = self.fdobjs[name]
self.client_fdobjs[fdobj[0]].remove(fdobj[1])
del self.fdobjs[name]
except KeyError:
raise UnknownDescriptorError(name)
def __len__(self):
return len(self.fdobjs)
def retrieve(self, fdobj):
pass
def bypath(self):
pass
def byfileno(self):
pass
def byfname(self):
pass
def __iter__(self):
pass
class FileDescriptor(object):
def __new__(self, **kwargs):
path = kwargs.get('path')
if path is None:
raise FileDescriptorError(self)
name = path.split('/')[-1]
mode = kwargs.get('mode')
if mode is None:
raise FileDescriptorError(self)
fd = kwargs.get('fd')
if fd is None:
fd = FileDescriptor.fopen(path, mode)
client = kwargs.get('client')
created = time()
new_fdobj = _FileDescriptor()
new_fdobj.load(name, path, fd, mode, client, created)
return new_fdobj
@staticmethod
def fopen(path, mode):
fd = libc.open(path, mode)
if fd == -1:
error_msg = get_error_msg()
raise OpenError(error_msg)
return fd
class _FileDescriptor(object):
def __init__(self):
self.refcnt = 1
def load(self, *args):
self.new_fdobj = fdobj(*args)
self.name = self.new_fdobj.name
self.path = self.new_fdobj.path
self.fd = self.new_fdobj.fd
self.mode = self.new_fdobj.path
self.client = self.new_fdobj.client
self.created = self.new_fdobj.created
def fsize(self):
try:
file_size = stat(self.fd).st_size
except NameError: # handle different errors
# raise
print("File is not open")
return file_size
def fpos(self): # handle errors below
offset = libc.lseek(self.fd, 0, SEEK_CUR)
return offset
def fstart(self):
# handle errors
offset = libc.lseek(self.fd, 0, SEEK_SET)
def fend(self):
# handle errors
offset = libc.lseek(self.fd, 0, SEEK_END)
def fset(self, pos):
# handle errors
c_pos = off_t(pos)
offset = libc.lseek(self.fd, c_pos, SEEK_SET)
def fclose(self):
ret = libc.close(self.fd)
if ret == -1:
error_msg = get_error_msg()
raise CloseError(error_msg)
def __enter__(self):
# handle errors
return self.fd
def __exit__(self, exc_type, exc_value, traceback):
# handle errors
self.fclose()
class FDBus(object):
def __init__(self, path):
super(FDBus, self).__init__()
self.path = path
self.fdpool = FileDescriptorPool()
self.proto_funcs = {LOAD:self.ld_protomsg, PASS:self.pass_protomsg,
RECV:self.recv_protomsg, CLOSE:self.cls_protomsg,
REFERENCE:self.ref_protomsg}
def socket(self):
sock = libc.socket(AF_UNIX, SOCK_STREAM, PROTO_DEFAULT)
if sock == -1:
error_msg = get_error_msg()
raise SocketError(error_msg)
return sock
def close_pool(self):
fdpool = self.fdpool.fdobjs
client_fdpool = self.clients.fdpool
for fd in fdpool:
fdpool[fd][1].fclose()
for fd in client_fdpool:
libc.close(fd)
def get_fd(self, name):
fdobj = self.fdpool.fdobjs.get(name)
if fdobj is None:
raise UnknownDescriptorError(name)
return fdobj
def send_fd(self, name, recepient=None):
fdobj = self.get_fd(name)[1]
cmd = fdobj.mode
payload = [name, fdobj.path] + \
list(map(str, [fdobj.fd, fdobj.mode, fdobj.created]))
request = self.build_msg(LOAD, cmd, *payload)
recepient = recepient if recepient else self.sock
ret = libc.send(recepient, cast(request, c_void_p),
MSG_LEN, MSG_FLAGS)
if ret == -1:
error_msg = get_error_msg()
raise SendError(error_msg)
self.sendmsg(LOAD, cmd, fdobj.fd, recepient)
def remove_fd(self, name):
# XXX build protocol send, no need for sendmsg
fdobj = self.get_fd(name)
self.sendmsg(CLOSE, CLS_FD, fdobj[1])
def recvmsg(self, sock, cmd, payload=None):
msg = pointer(msghdr(RECV, cmd, payload))
# set up a poll timout -- client disconnects -- will this call block indefin?
if libc.recvmsg(sock, msg, MSG_SERV) == -1:
error_msg = get_error_msg()
raise RecvmsgError(error_msg)
return msg
def sendmsg(self, protocol, cmd, payload=None, client=None):
receipent = client if client is not None else self.sock
msg = pointer(msghdr(protocol, cmd, payload, client))
if libc.sendmsg(receipent, msg, MSG_SERV) == -1:
error_msg = get_error_msg()
raise SendmsgError(error_msg)
def build_msg(self, protocol, cmd, *payload):
req_buffer = REQ_BUFFER()
request = (PROTOCOL_NAMES[protocol], COMMAND_NAMES[cmd]) + payload
req_buffer.value = ':'.join(request)
return req_buffer
def createfd(self, path, mode, fd=None, client=None, created=None):
client = client if client else self.sock
fdobj = FileDescriptor(path=path, mode=mode, fd=fd,
client=client, created=created)
self.fdpool.add(client, fdobj)
def extract_fd(self, msg):
fd = CMSG_DATA(msg.contents.msg_control)
return fd
def ld_protomsg(self, sock, cmd, msg):
path, created = msg[3], float(msg[6])
msg = self.recvmsg(sock, cmd)
fd = self.extract_fd(msg)
self.createfd(path, cmd, fd, sock, created)
def recv_protomsg(self, sock, cmd, msg):
if cmd == RECV_PEER:
self.client_peer_req(sock)
elif cmd == RECV_FD:
self.send_fd(msg[2], sock)
elif cmd == RECV_CMD:
self.recvmsg(sock, RECV_CMD, msg)
def pass_protomsg(self, sock, cmd, msg):
if cmd == PASS_PEER:
self.recvpeers(msg)
elif cmd == PASS_FD:
self.passfd(*msg[2:])
def cls_protomsg(self, sock, cmd, msg):
if cmd == CLS_FD:
vector = self.unpack_vector(msg)
self.fdpool.remove(vector.name)
elif cmd == CLS_ALL:
pass
else:
raise InvalidCmdError(cmd)
def ref_protomsg(self, sock, cmd, msg):
if cmd == RET_FD:
pass
elif cmd == REFCNT_FD:
pass
else:
raise InvalidCmdError(cmd)
|
|
#!/usr/bin/env python
# load libraries
from __future__ import print_function
import argparse
from openpyxl import load_workbook
import coloredlogs
import logging
import re
import couchdb
import numbers
import yaml
# global variables
WARNINGS = 0
NONNUMERIC = []
EMPTY = []
BADRIN = []
OUTCONC = []
OUTVOL = []
# Set up a logger with colored output
logger = logging.getLogger(__name__)
logger.propagate = False # Otherwise the messages appeared twice
coloredlogs.install(level='INFO', logger=logger,
fmt='%(asctime)s %(levelname)s %(message)s')
class ProjectSheet:
#Class Attributes
SHEET_NAME = 'Sample information'
FIRST_LINE = 20 # First line where user submitted data is located
SAMPLE_NAME_COL = 'O'
A_RATIO_COL = 'R' # A260:A280 ratio
CONC_COL = 'S'
VOL_COL = 'T'
RIN_COL = 'V'
SAMPLE_TYPE = 'O8'
PLATE_ID = 'M6'
PROJECT_NAME_USER_SHEET = 'M3'
# Initializer / Instance attributes
def __init__(self, sample_info_sheet):
self.sample_info_sheet = sample_info_sheet
self.work_sheet = None
self.sample_rec = None
# instance methods
def getAccessUserSheet(self):
"""loads the Excel sheet"""
if self.work_sheet is None:
wb = load_workbook(self.sample_info_sheet, read_only=True, data_only=True)
ws = wb[ProjectSheet.SHEET_NAME]
self.work_sheet = ws
def projectID(self):
"""retrieves the project and plate ID from the excel sheet and checks the
correctness of the plate ID format."""
self.getAccessUserSheet()
plate_id = self.work_sheet[ProjectSheet.PLATE_ID].value
if(len(re.findall('P\d+P\d+', plate_id))>0):
project_id_user = re.findall('P\d+', plate_id)[0]
else:
logger.error(
'Given plate ID ({}) in cell {} has the wrong format. It should be in the format'
' PxxxxxPx, where x are numbers. If your Plate ID is correct, contact your project coordinator.'\
.format(plate_id, ProjectSheet.PLATE_ID)
)
quit()
return([project_id_user, plate_id])
def getSamples(self):
""" identifies the all rows containing a sample name, discards rows without entry.
Rows containing whitespace only trigger a warning and are discarded for subsequent
tests """
cellID_withSample = []
cellID_noSample = []
for i in range(ProjectSheet.FIRST_LINE, ProjectSheet.FIRST_LINE+96):
cell_id = "{col}{row_iter}".format(col=ProjectSheet.SAMPLE_NAME_COL, row_iter=i)
cell_value = str(self.work_sheet[cell_id].value)
if(cell_value.isspace()):
logger.warning(
'Cell {} contains empty spaces only. Remove content.'.format(cell_id)
)
global WARNINGS
WARNINGS += 1
elif(self.work_sheet[cell_id].value is not None):
cellID_withSample.append(i)
else:
cellID_noSample.append(cell_id) # TODO check here that these rows do really not contain information
return(cellID_withSample)
def ProjectInfo(self, config):
"""
Retrieves the project information from couchDB, checks that the project exists in
couchDB and is unique. Returns the information and the full project plateID.
"""
with open(config) as settings_file:
server_settings = yaml.load(settings_file, Loader=yaml.FullLoader)
couch = couchdb.Server(server_settings.get("couch_server", None))
db = couch["projects"]
# check the existence of the project number in couchDB
project_plate_ID = self.projectID()
project_id_found = db.view("project/project_id", key=project_plate_ID[0])
prow = project_id_found.rows
# Project not found
if len(prow) == 0:
logger.error(
'Project not found, please check your entry for the PlateID, it should have the format'
'PxxxxxPx, where x are numbers. If your Plate ID is correct, contact your project coordinator.'
)
quit()
# more than one project found
elif len(prow) > 1:
logger.error(
'Project ID not unique, please check your entry for the PlateID, it should have the format'
'PxxxxxPx, where x are numbers. If your Plate ID is correct, contact your project coordinator.'
)
quit()
else:
# puts the Document of the identified project in a new variable "pdoc"
pdoc = db.get(prow[0].id)
return pdoc, project_plate_ID[1]
def prep_standards(self, info, config):
'''
gets the sample requirements from statusDB (json format) based
on the given sample prep type.
'''
with open(config) as settings_file:
server_settings = yaml.load(settings_file, Loader=yaml.FullLoader)
couch = couchdb.Server(server_settings.get("couch_server", None))
requirementsDB = couch["sample_requirements"]
requirements = requirementsDB.view("valid/by_date", descending=True)
recom_info = requirements.rows[0].value["requirements"]
prep = info['details']['library_construction_method']
prep_recs = [None,None,None,None,None,None,None]
if prep in recom_info:
if recom_info[prep]['Quality requirement'] is not None:
prep_recs = [\
recom_info[prep]['Concentration']['Minimum'],
recom_info[prep]['Concentration']['Maximum'],
recom_info[prep]['Volume']['Minimum'],
recom_info[prep]['Amount']['Recommended'],
recom_info[prep]['Amount']['Minimum'],
recom_info[prep]['Quality requirement']['Method'],
recom_info[prep]['QC recommendation']]
if 'RIN' in recom_info[prep]['Quality requirement']:
prep_recs.append(recom_info[prep]['Quality requirement']['RIN'])
else:
prep_recs.append(None)
else:
prep_recs = [\
recom_info[prep]['Concentration']['Minimum'],
recom_info[prep]['Concentration']['Maximum'],
recom_info[prep]['Volume']['Minimum'],
recom_info[prep]['Amount']['Recommended'],
recom_info[prep]['Amount']['Minimum'],
None,
recom_info[prep]['QC recommendation'],
None]
else:
logger.error('Preparation type \"{}\" not found'.format(prep))
quit()
return(prep_recs)
def validate_project_Name(self, info, project_plate_ID ):
"""
Prints the identified project name based on the user supplied Plate/Project ID for
control purposes by the project coordinator. Further checks that the
plate number is not already in couchDB.
"""
project_name_DB = info['project_name']
samples = list(info['samples'].keys())
plate ='P{}_{}'.format(project_plate_ID.split("P")[1],project_plate_ID.split("P")[2])
found_plate = [s for s in samples if plate in s]
if(len(found_plate)>0):
new_plate_no = int(project_plate_ID.split("P")[2])
new_plate_no += 1
new_plate_ID = 'P{}P{}'.format(project_plate_ID.split("P")[1], new_plate_no)
logger.warning(
'Plate number {} is already used. Please increase the plate number to {}.'.format(project_plate_ID, new_plate_ID))
global WARNINGS
WARNINGS += 1
logger.info('identified project name: {}'.format(project_name_DB))
def validate(self, info, config_info):
"""Validates all rows with a sample ID
Given the column letter and which rows to validate:
- Initiates the given validators for cell content (numeric), concentration,
volume and RIN (RNA samples only) with the optional attributes.
- Loops through all the given cells and validates them individually.
- prints summaries of the warnings and of the Excel file check.
"""
prep_recs = self.prep_standards(info, config_info)
passes = 0
total = 0
recom_avail = 1
for row_nr in self.getSamples():
total += 1
cell_id_conc = "{col}{row_nr}".format(col=ProjectSheet.CONC_COL, row_nr=row_nr)
cell_id_vol = "{col}{row_nr}".format(col=ProjectSheet.VOL_COL, row_nr=row_nr)
cell_id_rin = "{col}{row_nr}".format(col=ProjectSheet.RIN_COL, row_nr=row_nr)
validator = Validator(self.work_sheet,cell_id_conc,cell_id_vol, cell_id_rin)
result_numeric = validator.validate_numeric()
if any(t is not None for t in prep_recs[0:7]):
result_conc = validator.validate_conc(prep_recs[0], prep_recs[1])
result_vol = validator.validate_vol(prep_recs[2])
if prep_recs[7] is not None:
result_rin = validator.validate_rin(prep_recs[7])
if result_conc and result_vol and result_rin and result_numeric: # Test passed
passes += 1
else:
if any(t is not None for t in prep_recs):
if result_conc and result_vol and result_numeric: # Test passed
passes += 1
else:
# this means that the prep chosen has no recommendations in the DB
# i.e. ALL values for recommendations are missing
recom_avail = None
if result_numeric: # Test passed
passes += 1
# summary of QC prerequisits and recommendations
if (prep_recs[5] is not None):
logger.info(
'Sample processing prerequisit: submission of {} data'.format(prep_recs[5])
)
if (prep_recs[6] is not None):
logger.info(
'Sample QC recommendation: submission of {} data'.format(prep_recs[6])
)
# summary of all warnings
if (len(EMPTY) > 0):
logger.warning(
'Required entries in the following cells are missing: {}'\
.format(EMPTY)
)
if (len(NONNUMERIC) > 0):
logger.warning(
'Required entries in the following cells are non-numeric: {}'\
.format(NONNUMERIC)
)
if (len(OUTCONC) > 0):
logger.warning(
'Sample concentration(s) in cell(s) {} is out of specifications: {}-{}ng/ul'\
.format(OUTCONC, prep_recs[0], prep_recs[1])
)
if (len(OUTVOL) > 0):
logger.warning(
'Sample volume(s) in cell(s) {} is to low: min volume = {}ul'\
.format(OUTVOL, prep_recs[2])
)
if (len(BADRIN) > 0):
logger.warning(
'RIN value in cell(s) {} is below recommendation'\
.format(BADRIN)
)
# summary for missing sample recommendations
if (recom_avail is None):
logger.info(
'Sample submission check complete. No sample recommendations available. {}/{} pass, {} warnings(s)'\
.format(passes, total, WARNINGS)
)
# summary with sample recommendations
else:
logger.info(
'Sample submission check complete. {}/{} pass, {} warning(s).'\
.format(passes, total, WARNINGS)
)
class Validator(object):
# Initializer / Instance attributes
def __init__(self, access_sample_info_sheet, concentrationID, volumeID, rinID):
self.access_sample_info_sheet = access_sample_info_sheet
self.concentrationID = concentrationID
self.volumeID = volumeID
self.rinID = rinID
# instance methods
def validate_numeric(self):
global WARNINGS
global EMPTY
global NONNUMERIC
"""Checks whether value is numeric or not."""
warnings_before = WARNINGS
for checkNumbers in [self.concentrationID, self.volumeID, self.rinID]:
if not isinstance(self.access_sample_info_sheet[checkNumbers].value, numbers.Number):
try:
float(self.access_sample_info_sheet[checkNumbers].value.replace(",", "."))
WARNINGS += 1
NONNUMERIC.append(checkNumbers)
except ValueError:
WARNINGS += 1
NONNUMERIC.append(checkNumbers)
except AttributeError:
if self.access_sample_info_sheet[checkNumbers].value is None:
WARNINGS += 1
EMPTY.append(checkNumbers)
else:
raise
if (WARNINGS > warnings_before):
return False
else:
return True
def validate_conc(self, min_conc, max_conc):
"""checks entry for concentration"""
if not(self.concentrationID in NONNUMERIC or self.concentrationID in EMPTY):
global WARNINGS
global OUTCONC
if(self.access_sample_info_sheet[self.concentrationID].value < min_conc) \
or (self.access_sample_info_sheet[self.concentrationID].value > max_conc):
WARNINGS += 1
OUTCONC.append(self.concentrationID)
return False
else:
return True
def validate_vol(self, vol):
"""Checks entry for volume"""
if not(self.volumeID in NONNUMERIC or self.volumeID in EMPTY):
global WARNINGS
global OUTVOL
if(self.access_sample_info_sheet[self.volumeID].value < vol):
WARNINGS += 1
OUTVOL.append(self.volumeID)
return False
else:
return True
def validate_rin(self, rin):
"""Checks entry for RIN in RNA samples only"""
if self.access_sample_info_sheet[self.rinID].value < rin:
global WARNINGS
global BADRIN
WARNINGS += 1
BADRIN.append(self.rinID)
return False
else:
return True
def main(input_sheet, config_statusDB):
# Instantiate the ProjectSheet object
sheetOI = ProjectSheet(input_sheet)
# get Project Information from couchDB
Project_Information, project_plate_ID = sheetOI.ProjectInfo(config_statusDB)
# validate the project name to ensure correct identification in couchDB
sheetOI.validate_project_Name(Project_Information, project_plate_ID)
# validate all entries
sheetOI.validate(Project_Information, config_statusDB)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('sampleInfoSheet',
help="Completed sample info sent to NGI by the user.")
parser.add_argument('config_statusDB',
help="settings file in yaml format to access statusDB \
in the format \"couch_server: http://<username>:<password>@tools.scilifelab.se:5984\"")
args = parser.parse_args()
main(args.sampleInfoSheet, args.config_statusDB)
|
|
"""Tests for the Hyperion config flow."""
from __future__ import annotations
import asyncio
from typing import Any, Awaitable
from unittest.mock import AsyncMock, Mock, patch
from hyperion import const
from homeassistant import data_entry_flow
from homeassistant.components.hyperion.const import (
CONF_AUTH_ID,
CONF_CREATE_TOKEN,
CONF_EFFECT_HIDE_LIST,
CONF_EFFECT_SHOW_LIST,
CONF_PRIORITY,
DOMAIN,
)
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_SSDP, SOURCE_USER
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_PORT,
CONF_TOKEN,
SERVICE_TURN_ON,
)
from homeassistant.core import HomeAssistant
from . import (
TEST_AUTH_REQUIRED_RESP,
TEST_CONFIG_ENTRY_ID,
TEST_ENTITY_ID_1,
TEST_HOST,
TEST_INSTANCE,
TEST_PORT,
TEST_PORT_UI,
TEST_SYSINFO_ID,
TEST_TITLE,
TEST_TOKEN,
add_test_config_entry,
create_mock_client,
)
from tests.common import MockConfigEntry
TEST_IP_ADDRESS = "192.168.0.1"
TEST_HOST_PORT: dict[str, Any] = {
CONF_HOST: TEST_HOST,
CONF_PORT: TEST_PORT,
}
TEST_AUTH_ID = "ABCDE"
TEST_REQUEST_TOKEN_SUCCESS = {
"command": "authorize-requestToken",
"success": True,
"info": {"comment": const.DEFAULT_ORIGIN, "id": TEST_AUTH_ID, "token": TEST_TOKEN},
}
TEST_REQUEST_TOKEN_FAIL = {
"command": "authorize-requestToken",
"success": False,
"error": "Token request timeout or denied",
}
TEST_SSDP_SERVICE_INFO = {
"ssdp_location": f"http://{TEST_HOST}:{TEST_PORT_UI}/description.xml",
"ssdp_st": "upnp:rootdevice",
"deviceType": "urn:schemas-upnp-org:device:Basic:1",
"friendlyName": f"Hyperion ({TEST_HOST})",
"manufacturer": "Hyperion Open Source Ambient Lighting",
"manufacturerURL": "https://www.hyperion-project.org",
"modelDescription": "Hyperion Open Source Ambient Light",
"modelName": "Hyperion",
"modelNumber": "2.0.0-alpha.8",
"modelURL": "https://www.hyperion-project.org",
"serialNumber": f"{TEST_SYSINFO_ID}",
"UDN": f"uuid:{TEST_SYSINFO_ID}",
"ports": {
"jsonServer": f"{TEST_PORT}",
"sslServer": "8092",
"protoBuffer": "19445",
"flatBuffer": "19400",
},
"presentationURL": "index.html",
"iconList": {
"icon": {
"mimetype": "image/png",
"height": "100",
"width": "100",
"depth": "32",
"url": "img/hyperion/ssdp_icon.png",
}
},
"ssdp_usn": f"uuid:{TEST_SYSINFO_ID}",
"ssdp_ext": "",
"ssdp_server": "Raspbian GNU/Linux 10 (buster)/10 UPnP/1.0 Hyperion/2.0.0-alpha.8",
}
async def _create_mock_entry(hass: HomeAssistant) -> MockConfigEntry:
"""Add a test Hyperion entity to hass."""
entry: MockConfigEntry = MockConfigEntry( # type: ignore[no-untyped-call]
entry_id=TEST_CONFIG_ENTRY_ID,
domain=DOMAIN,
unique_id=TEST_SYSINFO_ID,
title=TEST_TITLE,
data={
"host": TEST_HOST,
"port": TEST_PORT,
"instance": TEST_INSTANCE,
},
)
entry.add_to_hass(hass) # type: ignore[no-untyped-call]
# Setup
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
async def _init_flow(
hass: HomeAssistant,
source: str = SOURCE_USER,
data: dict[str, Any] | None = None,
) -> Any:
"""Initialize a flow."""
data = data or {}
return await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
async def _configure_flow(
hass: HomeAssistant, result: dict, user_input: dict[str, Any] | None = None
) -> Any:
"""Provide input to a flow."""
user_input = user_input or {}
with patch(
"homeassistant.components.hyperion.async_setup", return_value=True
), patch(
"homeassistant.components.hyperion.async_setup_entry",
return_value=True,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=user_input
)
await hass.async_block_till_done()
return result
async def test_user_if_no_configuration(hass: HomeAssistant) -> None:
"""Check flow behavior when no configuration is present."""
result = await _init_flow(hass)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["handler"] == DOMAIN
async def test_user_existing_id_abort(hass: HomeAssistant) -> None:
"""Verify a duplicate ID results in an abort."""
result = await _init_flow(hass)
await _create_mock_entry(hass)
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_user_client_errors(hass: HomeAssistant) -> None:
"""Verify correct behaviour with client errors."""
result = await _init_flow(hass)
client = create_mock_client()
# Fail the connection.
client.async_client_connect = AsyncMock(return_value=False)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "cannot_connect"
# Fail the auth check call.
client.async_client_connect = AsyncMock(return_value=True)
client.async_is_auth_required = AsyncMock(return_value={"success": False})
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "auth_required_error"
async def test_user_confirm_cannot_connect(hass: HomeAssistant) -> None:
"""Test a failure to connect during confirmation."""
result = await _init_flow(hass)
good_client = create_mock_client()
bad_client = create_mock_client()
bad_client.async_client_connect = AsyncMock(return_value=False)
# Confirmation sync_client_connect fails.
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
side_effect=[good_client, bad_client],
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_user_confirm_id_error(hass: HomeAssistant) -> None:
"""Test a failure fetching the server id during confirmation."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_sysinfo_id = AsyncMock(return_value=None)
# Confirmation sync_client_connect fails.
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_id"
async def test_user_noauth_flow_success(hass: HomeAssistant) -> None:
"""Check a full flow without auth."""
result = await _init_flow(hass)
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["handler"] == DOMAIN
assert result["title"] == TEST_TITLE
assert result["data"] == {
**TEST_HOST_PORT,
}
async def test_user_auth_required(hass: HomeAssistant) -> None:
"""Verify correct behaviour when auth is required."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
async def test_auth_static_token_auth_required_fail(hass: HomeAssistant) -> None:
"""Verify correct behaviour with a failed auth required call."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=None)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "auth_required_error"
async def test_auth_static_token_success(hass: HomeAssistant) -> None:
"""Test a successful flow with a static token."""
result = await _init_flow(hass)
assert result["step_id"] == "user"
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: False, CONF_TOKEN: TEST_TOKEN}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["handler"] == DOMAIN
assert result["title"] == TEST_TITLE
assert result["data"] == {
**TEST_HOST_PORT,
CONF_TOKEN: TEST_TOKEN,
}
async def test_auth_static_token_login_connect_fail(hass: HomeAssistant) -> None:
"""Test correct behavior with a static token that cannot connect."""
result = await _init_flow(hass)
assert result["step_id"] == "user"
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
client.async_client_connect = AsyncMock(return_value=False)
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: False, CONF_TOKEN: TEST_TOKEN}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_auth_static_token_login_fail(hass: HomeAssistant) -> None:
"""Test correct behavior with a static token that cannot login."""
result = await _init_flow(hass)
assert result["step_id"] == "user"
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
client.async_login = AsyncMock(
return_value={"command": "authorize-login", "success": False, "tan": 0}
)
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: False, CONF_TOKEN: TEST_TOKEN}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_access_token"
async def test_auth_create_token_approval_declined(hass: HomeAssistant) -> None:
"""Verify correct behaviour when a token request is declined."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
client.async_request_token = AsyncMock(return_value=TEST_REQUEST_TOKEN_FAIL)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch(
"homeassistant.components.hyperion.config_flow.client.generate_random_auth_id",
return_value=TEST_AUTH_ID,
):
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "create_token"
assert result["description_placeholders"] == {
CONF_AUTH_ID: TEST_AUTH_ID,
}
result = await _configure_flow(hass, result)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "create_token_external"
# The flow will be automatically advanced by the auth token response.
result = await _configure_flow(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "auth_new_token_not_granted_error"
async def test_auth_create_token_approval_declined_task_canceled(
hass: HomeAssistant,
) -> None:
"""Verify correct behaviour when a token request is declined."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["step_id"] == "auth"
client.async_request_token = AsyncMock(return_value=TEST_REQUEST_TOKEN_FAIL)
class CanceledAwaitableMock(AsyncMock):
"""A canceled awaitable mock."""
def __await__(self) -> None:
raise asyncio.CancelledError
mock_task = CanceledAwaitableMock()
task_coro: Awaitable | None = None
def create_task(arg: Any) -> CanceledAwaitableMock:
nonlocal task_coro
task_coro = arg
return mock_task
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch(
"homeassistant.components.hyperion.config_flow.client.generate_random_auth_id",
return_value=TEST_AUTH_ID,
), patch.object(
hass, "async_create_task", side_effect=create_task
):
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: True}
)
assert result["step_id"] == "create_token"
result = await _configure_flow(hass, result)
assert result["step_id"] == "create_token_external"
# Leave the task running, to ensure it is canceled.
mock_task.done = Mock(return_value=False)
mock_task.cancel = Mock()
result = await _configure_flow(hass, result)
# This await will advance to the next step.
assert task_coro
await task_coro
# Assert that cancel is called on the task.
assert mock_task.cancel.called
async def test_auth_create_token_when_issued_token_fails(
hass: HomeAssistant,
) -> None:
"""Verify correct behaviour when a token is granted by fails to authenticate."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
client.async_request_token = AsyncMock(return_value=TEST_REQUEST_TOKEN_SUCCESS)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch(
"homeassistant.components.hyperion.config_flow.client.generate_random_auth_id",
return_value=TEST_AUTH_ID,
):
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "create_token"
assert result["description_placeholders"] == {
CONF_AUTH_ID: TEST_AUTH_ID,
}
result = await _configure_flow(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "create_token_external"
# The flow will be automatically advanced by the auth token response.
# Make the last verification fail.
client.async_client_connect = AsyncMock(return_value=False)
result = await _configure_flow(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_auth_create_token_success(hass: HomeAssistant) -> None:
"""Verify correct behaviour when a token is successfully created."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
client.async_request_token = AsyncMock(return_value=TEST_REQUEST_TOKEN_SUCCESS)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch(
"homeassistant.components.hyperion.config_flow.client.generate_random_auth_id",
return_value=TEST_AUTH_ID,
):
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "create_token"
assert result["description_placeholders"] == {
CONF_AUTH_ID: TEST_AUTH_ID,
}
result = await _configure_flow(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "create_token_external"
# The flow will be automatically advanced by the auth token response.
result = await _configure_flow(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["handler"] == DOMAIN
assert result["title"] == TEST_TITLE
assert result["data"] == {
**TEST_HOST_PORT,
CONF_TOKEN: TEST_TOKEN,
}
async def test_auth_create_token_success_but_login_fail(
hass: HomeAssistant,
) -> None:
"""Verify correct behaviour when a token is successfully created but the login fails."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["step_id"] == "auth"
client.async_request_token = AsyncMock(return_value=TEST_REQUEST_TOKEN_SUCCESS)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch(
"homeassistant.components.hyperion.config_flow.client.generate_random_auth_id",
return_value=TEST_AUTH_ID,
):
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: True}
)
assert result["step_id"] == "create_token"
result = await _configure_flow(hass, result)
assert result["step_id"] == "create_token_external"
client.async_login = AsyncMock(
return_value={"command": "authorize-login", "success": False, "tan": 0}
)
# The flow will be automatically advanced by the auth token response.
result = await _configure_flow(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "auth_new_token_not_work_error"
async def test_ssdp_success(hass: HomeAssistant) -> None:
"""Check an SSDP flow."""
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _init_flow(hass, source=SOURCE_SSDP, data=TEST_SSDP_SERVICE_INFO)
await hass.async_block_till_done()
# Accept the confirmation.
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["handler"] == DOMAIN
assert result["title"] == TEST_TITLE
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_PORT: TEST_PORT,
}
async def test_ssdp_cannot_connect(hass: HomeAssistant) -> None:
"""Check an SSDP flow that cannot connect."""
client = create_mock_client()
client.async_client_connect = AsyncMock(return_value=False)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _init_flow(hass, source=SOURCE_SSDP, data=TEST_SSDP_SERVICE_INFO)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_ssdp_missing_serial(hass: HomeAssistant) -> None:
"""Check an SSDP flow where no id is provided."""
client = create_mock_client()
bad_data = {**TEST_SSDP_SERVICE_INFO}
del bad_data["serialNumber"]
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _init_flow(hass, source=SOURCE_SSDP, data=bad_data)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_id"
async def test_ssdp_failure_bad_port_json(hass: HomeAssistant) -> None:
"""Check an SSDP flow with bad json port."""
client = create_mock_client()
bad_data: dict[str, Any] = {**TEST_SSDP_SERVICE_INFO}
bad_data["ports"]["jsonServer"] = "not_a_port"
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _init_flow(hass, source=SOURCE_SSDP, data=bad_data)
result = await _configure_flow(hass, result)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_PORT] == const.DEFAULT_PORT_JSON
async def test_ssdp_failure_bad_port_ui(hass: HomeAssistant) -> None:
"""Check an SSDP flow with bad ui port."""
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
bad_data = {**TEST_SSDP_SERVICE_INFO}
bad_data["ssdp_location"] = f"http://{TEST_HOST}:not_a_port/description.xml"
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch(
"homeassistant.components.hyperion.config_flow.client.generate_random_auth_id",
return_value=TEST_AUTH_ID,
):
result = await _init_flow(hass, source=SOURCE_SSDP, data=bad_data)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
client.async_request_token = AsyncMock(return_value=TEST_REQUEST_TOKEN_FAIL)
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "create_token"
# Verify a working URL is used despite the bad port number
assert result["description_placeholders"] == {
CONF_AUTH_ID: TEST_AUTH_ID,
}
async def test_ssdp_abort_duplicates(hass: HomeAssistant) -> None:
"""Check an SSDP flow where no id is provided."""
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result_1 = await _init_flow(
hass, source=SOURCE_SSDP, data=TEST_SSDP_SERVICE_INFO
)
result_2 = await _init_flow(
hass, source=SOURCE_SSDP, data=TEST_SSDP_SERVICE_INFO
)
await hass.async_block_till_done()
assert result_1["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result_2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result_2["reason"] == "already_in_progress"
async def test_options_priority(hass: HomeAssistant) -> None:
"""Check an options flow priority option."""
config_entry = add_test_config_entry(hass)
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is not None
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
new_priority = 1
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_PRIORITY: new_priority},
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_PRIORITY] == new_priority
# Turn the light on and ensure the new priority is used.
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1},
blocking=True,
)
# pylint: disable=unsubscriptable-object
assert client.async_send_set_color.call_args[1][CONF_PRIORITY] == new_priority
async def test_options_effect_show_list(hass: HomeAssistant) -> None:
"""Check an options flow effect show list."""
config_entry = add_test_config_entry(hass)
client = create_mock_client()
client.effects = [
{const.KEY_NAME: "effect1"},
{const.KEY_NAME: "effect2"},
{const.KEY_NAME: "effect3"},
]
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_EFFECT_SHOW_LIST: ["effect1", "effect3"]},
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
# effect1 and effect3 only, so effect2 & external sources are hidden.
assert result["data"][CONF_EFFECT_HIDE_LIST] == sorted(
["effect2"] + const.KEY_COMPONENTID_EXTERNAL_SOURCES
)
async def test_options_effect_hide_list_cannot_connect(hass: HomeAssistant) -> None:
"""Check an options flow effect hide list with a failed connection."""
config_entry = add_test_config_entry(hass)
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
client.async_client_connect = AsyncMock(return_value=False)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_reauth_success(hass: HomeAssistant) -> None:
"""Check a reauth flow that succeeds."""
config_data = {
CONF_HOST: TEST_HOST,
CONF_PORT: TEST_PORT,
}
config_entry = add_test_config_entry(hass, data=config_data)
client = create_mock_client()
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch("homeassistant.components.hyperion.async_setup", return_value=True), patch(
"homeassistant.components.hyperion.async_setup_entry", return_value=True
):
result = await _init_flow(
hass,
source=SOURCE_REAUTH,
data=config_data,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await _configure_flow(
hass, result, user_input={CONF_CREATE_TOKEN: False, CONF_TOKEN: TEST_TOKEN}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert CONF_TOKEN in config_entry.data
async def test_reauth_cannot_connect(hass: HomeAssistant) -> None:
"""Check a reauth flow that fails to connect."""
config_data = {
CONF_HOST: TEST_HOST,
CONF_PORT: TEST_PORT,
}
add_test_config_entry(hass, data=config_data)
client = create_mock_client()
client.async_client_connect = AsyncMock(return_value=False)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _init_flow(
hass,
source=SOURCE_REAUTH,
data=config_data,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
|
|
'''
YARN Cluster Metrics
--------------------
yarn.metrics.appsSubmitted The number of submitted apps
yarn.metrics.appsCompleted The number of completed apps
yarn.metrics.appsPending The number of pending apps
yarn.metrics.appsRunning The number of running apps
yarn.metrics.appsFailed The number of failed apps
yarn.metrics.appsKilled The number of killed apps
yarn.metrics.reservedMB The size of reserved memory
yarn.metrics.availableMB The amount of available memory
yarn.metrics.allocatedMB The amount of allocated memory
yarn.metrics.totalMB The amount of total memory
yarn.metrics.reservedVirtualCores The number of reserved virtual cores
yarn.metrics.availableVirtualCores The number of available virtual cores
yarn.metrics.allocatedVirtualCores The number of allocated virtual cores
yarn.metrics.totalVirtualCores The total number of virtual cores
yarn.metrics.containersAllocated The number of containers allocated
yarn.metrics.containersReserved The number of containers reserved
yarn.metrics.containersPending The number of containers pending
yarn.metrics.totalNodes The total number of nodes
yarn.metrics.activeNodes The number of active nodes
yarn.metrics.lostNodes The number of lost nodes
yarn.metrics.unhealthyNodes The number of unhealthy nodes
yarn.metrics.decommissionedNodes The number of decommissioned nodes
yarn.metrics.rebootedNodes The number of rebooted nodes
YARN App Metrics
----------------
yarn.app.progress The progress of the application as a percent
yarn.app.startedTime The time in which application started (in ms since epoch)
yarn.app.finishedTime The time in which the application finished (in ms since epoch)
yarn.app.elapsedTime The elapsed time since the application started (in ms)
yarn.app.allocatedMB The sum of memory in MB allocated to the applications running containers
yarn.app.allocatedVCores The sum of virtual cores allocated to the applications running containers
yarn.app.runningContainers The number of containers currently running for the application
yarn.app.memorySeconds The amount of memory the application has allocated (megabyte-seconds)
yarn.app.vcoreSeconds The amount of CPU resources the application has allocated (virtual core-seconds)
YARN Node Metrics
-----------------
yarn.node.lastHealthUpdate The last time the node reported its health (in ms since epoch)
yarn.node.usedMemoryMB The total amount of memory currently used on the node (in MB)
yarn.node.availMemoryMB The total amount of memory currently available on the node (in MB)
yarn.node.usedVirtualCores The total number of vCores currently used on the node
yarn.node.availableVirtualCores The total number of vCores available on the node
yarn.node.numContainers The total number of containers currently running on the node
YARN Capacity Scheduler Metrics
-----------------
yarn.queue.root.maxCapacity The configured maximum queue capacity in percentage for root queue
yarn.queue.root.usedCapacity The used queue capacity in percentage for root queue
yarn.queue.root.capacity The configured queue capacity in percentage for root queue
yarn.queue.numPendingApplications The number of pending applications in this queue
yarn.queue.userAMResourceLimit.memory The maximum memory resources a user can use for Application Masters (in MB)
yarn.queue.userAMResourceLimit.vCores The maximum vCpus a user can use for Application Masters
yarn.queue.absoluteCapacity The absolute capacity percentage this queue can use of entire cluster
yarn.queue.userLimitFactor The minimum user limit percent set in the configuration
yarn.queue.userLimit The user limit factor set in the configuration
yarn.queue.numApplications The number of applications currently in the queue
yarn.queue.usedAMResource.memory The memory resources used for Application Masters (in MB)
yarn.queue.usedAMResource.vCores The vCpus used for Application Masters
yarn.queue.absoluteUsedCapacity The absolute used capacity percentage this queue is using of the entire cluster
yarn.queue.resourcesUsed.memory The total memory resources this queue is using (in MB)
yarn.queue.resourcesUsed.vCores The total vCpus this queue is using
yarn.queue.AMResourceLimit.vCores The maximum vCpus this queue can use for Application Masters
yarn.queue.AMResourceLimit.memory The maximum memory resources this queue can use for Application Masters (in MB)
yarn.queue.capacity The configured queue capacity in percentage relative to its parent queue
yarn.queue.numActiveApplications The number of active applications in this queue
yarn.queue.absoluteMaxCapacity The absolute maximum capacity percentage this queue can use of the entire cluster
yarn.queue.usedCapacity The used queue capacity in percentage
yarn.queue.numContainers The number of containers being used
yarn.queue.maxCapacity The configured maximum queue capacity in percentage relative to its parent queue
yarn.queue.maxApplications The maximum number of applications this queue can have
yarn.queue.maxApplicationsPerUser The maximum number of active applications per user this queue can have
'''
# stdlib
from urlparse import urljoin, urlsplit, urlunsplit
# 3rd party
from requests.exceptions import Timeout, HTTPError, InvalidURL, ConnectionError
import requests
# Project
from checks import AgentCheck
from config import _is_affirmative
# Default settings
DEFAULT_RM_URI = 'http://localhost:8088'
DEFAULT_TIMEOUT = 5
DEFAULT_CUSTER_NAME = 'default_cluster'
DEFAULT_COLLECT_APP_METRICS = True
MAX_DETAILED_QUEUES = 100
# Path to retrieve cluster metrics
YARN_CLUSTER_METRICS_PATH = '/ws/v1/cluster/metrics'
# Path to retrieve YARN APPS
YARN_APPS_PATH = '/ws/v1/cluster/apps'
# Path to retrieve node statistics
YARN_NODES_PATH = '/ws/v1/cluster/nodes'
# Path to retrieve queue statistics
YARN_SCHEDULER_PATH = '/ws/v1/cluster/scheduler'
# Metric types
GAUGE = 'gauge'
INCREMENT = 'increment'
# Name of the service check
SERVICE_CHECK_NAME = 'yarn.can_connect'
# Application states to collect
YARN_APPLICATION_STATES = 'RUNNING'
# Cluster metrics identifier
YARN_CLUSTER_METRICS_ELEMENT = 'clusterMetrics'
# Cluster metrics for YARN
YARN_CLUSTER_METRICS = {
'appsSubmitted': ('yarn.metrics.apps_submitted', GAUGE),
'appsCompleted': ('yarn.metrics.apps_completed', GAUGE),
'appsPending': ('yarn.metrics.apps_pending', GAUGE),
'appsRunning': ('yarn.metrics.apps_running', GAUGE),
'appsFailed': ('yarn.metrics.apps_failed', GAUGE),
'appsKilled': ('yarn.metrics.apps_killed', GAUGE),
'reservedMB': ('yarn.metrics.reserved_mb', GAUGE),
'availableMB': ('yarn.metrics.available_mb', GAUGE),
'allocatedMB': ('yarn.metrics.allocated_mb', GAUGE),
'totalMB': ('yarn.metrics.total_mb', GAUGE),
'reservedVirtualCores': ('yarn.metrics.reserved_virtual_cores', GAUGE),
'availableVirtualCores': ('yarn.metrics.available_virtual_cores', GAUGE),
'allocatedVirtualCores': ('yarn.metrics.allocated_virtual_cores', GAUGE),
'totalVirtualCores': ('yarn.metrics.total_virtual_cores', GAUGE),
'containersAllocated': ('yarn.metrics.containers_allocated', GAUGE),
'containersReserved': ('yarn.metrics.containers_reserved', GAUGE),
'containersPending': ('yarn.metrics.containers_pending', GAUGE),
'totalNodes': ('yarn.metrics.total_nodes', GAUGE),
'activeNodes': ('yarn.metrics.active_nodes', GAUGE),
'lostNodes': ('yarn.metrics.lost_nodes', GAUGE),
'unhealthyNodes': ('yarn.metrics.unhealthy_nodes', GAUGE),
'decommissionedNodes': ('yarn.metrics.decommissioned_nodes', GAUGE),
'rebootedNodes': ('yarn.metrics.rebooted_nodes', GAUGE),
}
# Application metrics for YARN
YARN_APP_METRICS = {
'progress': ('yarn.apps.progress', INCREMENT),
'startedTime': ('yarn.apps.started_time', INCREMENT),
'finishedTime': ('yarn.apps.finished_time', INCREMENT),
'elapsedTime': ('yarn.apps.elapsed_time', INCREMENT),
'allocatedMB': ('yarn.apps.allocated_mb', INCREMENT),
'allocatedVCores': ('yarn.apps.allocated_vcores', INCREMENT),
'runningContainers': ('yarn.apps.running_containers', INCREMENT),
'memorySeconds': ('yarn.apps.memory_seconds', INCREMENT),
'vcoreSeconds': ('yarn.apps.vcore_seconds', INCREMENT),
}
# Node metrics for YARN
YARN_NODE_METRICS = {
'lastHealthUpdate': ('yarn.node.last_health_update', GAUGE),
'usedMemoryMB': ('yarn.node.used_memory_mb', GAUGE),
'availMemoryMB': ('yarn.node.avail_memory_mb', GAUGE),
'usedVirtualCores': ('yarn.node.used_virtual_cores', GAUGE),
'availableVirtualCores': ('yarn.node.available_virtual_cores', GAUGE),
'numContainers': ('yarn.node.num_containers', GAUGE),
}
# Root queue metrics for YARN
YARN_ROOT_QUEUE_METRICS = {
'maxCapacity': ('yarn.queue.root.max_capacity', GAUGE),
'usedCapacity': ('yarn.queue.root.used_capacity', GAUGE),
'capacity': ('yarn.queue.root.capacity', GAUGE)
}
# Queue metrics for YARN
YARN_QUEUE_METRICS = {
'numPendingApplications': ('yarn.queue.num_pending_applications', GAUGE),
'userAMResourceLimit.memory': ('yarn.queue.user_am_resource_limit.memory', GAUGE),
'userAMResourceLimit.vCores': ('yarn.queue.user_am_resource_limit.vcores', GAUGE),
'absoluteCapacity': ('yarn.queue.absolute_capacity', GAUGE),
'userLimitFactor': ('yarn.queue.user_limit_factor', GAUGE),
'userLimit': ('yarn.queue.user_limit', GAUGE),
'numApplications': ('yarn.queue.num_applications', GAUGE),
'usedAMResource.memory': ('yarn.queue.used_am_resource.memory', GAUGE),
'usedAMResource.vCores': ('yarn.queue.used_am_resource.vcores', GAUGE),
'absoluteUsedCapacity': ('yarn.queue.absolute_used_capacity', GAUGE),
'resourcesUsed.memory': ('yarn.queue.resources_used.memory', GAUGE),
'resourcesUsed.vCores': ('yarn.queue.resources_used.vcores', GAUGE),
'AMResourceLimit.vCores': ('yarn.queue.am_resource_limit.vcores', GAUGE),
'AMResourceLimit.memory': ('yarn.queue.am_resource_limit.memory', GAUGE),
'capacity': ('yarn.queue.capacity', GAUGE),
'numActiveApplications': ('yarn.queue.num_active_applications', GAUGE),
'absoluteMaxCapacity': ('yarn.queue.absolute_max_capacity', GAUGE),
'usedCapacity' : ('yarn.queue.used_capacity', GAUGE),
'numContainers': ('yarn.queue.num_containers', GAUGE),
'maxCapacity': ('yarn.queue.max_capacity', GAUGE),
'maxApplications': ('yarn.queue.max_applications', GAUGE),
'maxApplicationsPerUser': ('yarn.queue.max_applications_per_user', GAUGE)
}
class YarnCheck(AgentCheck):
'''
Extract statistics from YARN's ResourceManger REST API
'''
_ALLOWED_APPLICATION_TAGS = [
'applicationTags',
'applicationType',
'name',
'queue',
'user'
]
def check(self, instance):
# Get properties from conf file
rm_address = instance.get('resourcemanager_uri', DEFAULT_RM_URI)
app_tags = instance.get('application_tags', {})
queue_blacklist = instance.get('queue_blacklist', [])
if type(app_tags) is not dict:
self.log.error('application_tags is incorrect: %s is not a dictionary', app_tags)
app_tags = {}
filtered_app_tags = {}
for dd_prefix, yarn_key in app_tags.iteritems():
if yarn_key in self._ALLOWED_APPLICATION_TAGS:
filtered_app_tags[dd_prefix] = yarn_key
app_tags = filtered_app_tags
# Collected by default
app_tags['app_name'] = 'name'
# Get additional tags from the conf file
tags = instance.get('tags', [])
if tags is None:
tags = []
else:
tags = list(set(tags))
# Get the cluster name from the conf file
cluster_name = instance.get('cluster_name')
if cluster_name is None:
self.warning("The cluster_name must be specified in the instance configuration, defaulting to '%s'" % (DEFAULT_CUSTER_NAME))
cluster_name = DEFAULT_CUSTER_NAME
tags.append('cluster_name:%s' % cluster_name)
# Get metrics from the Resource Manager
self._yarn_cluster_metrics(rm_address, tags)
if _is_affirmative(instance.get('collect_app_metrics', DEFAULT_COLLECT_APP_METRICS)):
self._yarn_app_metrics(rm_address, app_tags, tags)
self._yarn_node_metrics(rm_address, tags)
self._yarn_scheduler_metrics(rm_address, tags, queue_blacklist)
def _yarn_cluster_metrics(self, rm_address, addl_tags):
'''
Get metrics related to YARN cluster
'''
metrics_json = self._rest_request_to_json(rm_address, YARN_CLUSTER_METRICS_PATH)
if metrics_json:
yarn_metrics = metrics_json[YARN_CLUSTER_METRICS_ELEMENT]
if yarn_metrics is not None:
self._set_yarn_metrics_from_json(addl_tags, yarn_metrics, YARN_CLUSTER_METRICS)
def _yarn_app_metrics(self, rm_address, app_tags, addl_tags):
'''
Get metrics for running applications
'''
metrics_json = self._rest_request_to_json(
rm_address,
YARN_APPS_PATH,
states=YARN_APPLICATION_STATES
)
if (metrics_json and metrics_json['apps'] is not None and
metrics_json['apps']['app'] is not None):
for app_json in metrics_json['apps']['app']:
tags = []
for dd_tag, yarn_key in app_tags.iteritems():
try:
val = app_json[yarn_key]
if val:
tags.append("{tag}:{value}".format(
tag=dd_tag, value=val
))
except KeyError:
self.log.error("Invalid value %s for application_tag", yarn_key)
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, app_json, YARN_APP_METRICS)
def _yarn_node_metrics(self, rm_address, addl_tags):
'''
Get metrics related to YARN nodes
'''
metrics_json = self._rest_request_to_json(rm_address, YARN_NODES_PATH)
if (metrics_json and metrics_json['nodes'] is not None and
metrics_json['nodes']['node'] is not None):
for node_json in metrics_json['nodes']['node']:
node_id = node_json['id']
tags = ['node_id:%s' % str(node_id)]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, node_json, YARN_NODE_METRICS)
def _yarn_scheduler_metrics(self, rm_address, addl_tags, queue_blacklist):
'''
Get metrics from YARN scheduler
'''
metrics_json = self._rest_request_to_json(rm_address, YARN_SCHEDULER_PATH)
try:
metrics_json = metrics_json['scheduler']['schedulerInfo']
if metrics_json['type'] == 'capacityScheduler':
self._yarn_capacity_scheduler_metrics(metrics_json, addl_tags, queue_blacklist)
except KeyError:
pass
def _yarn_capacity_scheduler_metrics(self, metrics_json, addl_tags, queue_blacklist):
'''
Get metrics from YARN scheduler if it's type is capacityScheduler
'''
tags = ['queue_name:%s' % metrics_json['queueName']]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, metrics_json, YARN_ROOT_QUEUE_METRICS)
if metrics_json['queues'] is not None and metrics_json['queues']['queue'] is not None:
queues_count = 0
for queue_json in metrics_json['queues']['queue']:
queue_name = queue_json['queueName']
if queue_name in queue_blacklist:
self.log.debug('Queue "%s" is blacklisted. Ignoring it' % queue_name)
continue
queues_count += 1
if queues_count > MAX_DETAILED_QUEUES:
self.warning("Found more than 100 queues, will only send metrics on first 100 queues. " +
" Please filter the queues with the check's `queue_blacklist` parameter")
break
tags = ['queue_name:%s' % str(queue_name)]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, queue_json, YARN_QUEUE_METRICS)
def _set_yarn_metrics_from_json(self, tags, metrics_json, yarn_metrics):
'''
Parse the JSON response and set the metrics
'''
for dict_path, metric in yarn_metrics.iteritems():
metric_name, metric_type = metric
metric_value = self._get_value_from_json(dict_path, metrics_json)
if metric_value is not None:
self._set_metric(metric_name,
metric_type,
metric_value,
tags)
def _get_value_from_json(self, dict_path, metrics_json):
'''
Get a value from a dictionary under N keys, represented as str("key1.key2...key{n}")
'''
for key in dict_path.split('.'):
if key in metrics_json:
metrics_json = metrics_json.get(key)
else:
return None
return metrics_json
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
'''
Set a metric
'''
if metric_type == GAUGE:
self.gauge(metric_name, value, tags=tags, device_name=device_name)
elif metric_type == INCREMENT:
self.increment(metric_name, value, tags=tags, device_name=device_name)
else:
self.log.error('Metric type "%s" unknown', metric_type)
def _rest_request_to_json(self, address, object_path, *args, **kwargs):
'''
Query the given URL and return the JSON response
'''
response_json = None
service_check_tags = ['url:%s' % self._get_url_base(address)]
url = address
if object_path:
url = self._join_url_dir(url, object_path)
# Add args to the url
if args:
for directory in args:
url = self._join_url_dir(url, directory)
self.log.debug('Attempting to connect to "%s"' % url)
# Add kwargs as arguments
if kwargs:
query = '&'.join(['{0}={1}'.format(key, value) for key, value in kwargs.iteritems()])
url = urljoin(url, '?' + query)
try:
response = requests.get(url, timeout=self.default_integration_http_timeout)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request timeout: {0}, {1}".format(url, e))
raise
except (HTTPError,
InvalidURL,
ConnectionError) as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request failed: {0}, {1}".format(url, e))
raise
except ValueError as e:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message=str(e))
raise
else:
self.service_check(SERVICE_CHECK_NAME,
AgentCheck.OK,
tags=service_check_tags,
message='Connection to %s was successful' % url)
return response_json
def _join_url_dir(self, url, *args):
'''
Join a URL with multiple directories
'''
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
def _get_url_base(self, url):
'''
Return the base of a URL
'''
s = urlsplit(url)
return urlunsplit([s.scheme, s.netloc, '', '', ''])
|
|
r"""An implementation of the Web Site Process Bus.
This module is completely standalone, depending only on the stdlib.
Web Site Process Bus
--------------------
A Bus object is used to contain and manage site-wide behavior:
daemonization, HTTP server start/stop, process reload, signal handling,
drop privileges, PID file management, logging for all of these,
and many more.
In addition, a Bus object provides a place for each web framework
to register code that runs in response to site-wide events (like
process start and stop), or which controls or otherwise interacts with
the site-wide components mentioned above. For example, a framework which
uses file-based templates would add known template filenames to an
autoreload component.
Ideally, a Bus object will be flexible enough to be useful in a variety
of invocation scenarios:
1. The deployer starts a site from the command line via a
framework-neutral deployment script; applications from multiple frameworks
are mixed in a single site. Command-line arguments and configuration
files are used to define site-wide components such as the HTTP server,
WSGI component graph, autoreload behavior, signal handling, etc.
2. The deployer starts a site via some other process, such as Apache;
applications from multiple frameworks are mixed in a single site.
Autoreload and signal handling (from Python at least) are disabled.
3. The deployer starts a site via a framework-specific mechanism;
for example, when running tests, exploring tutorials, or deploying
single applications from a single framework. The framework controls
which site-wide components are enabled as it sees fit.
The Bus object in this package uses topic-based publish-subscribe
messaging to accomplish all this. A few topic channels are built in
('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and
site containers are free to define their own. If a message is sent to a
channel that has not been defined or has no listeners, there is no effect.
In general, there should only ever be a single Bus object per process.
Frameworks and site containers share a single Bus object by publishing
messages and subscribing listeners.
The Bus object works as a finite state machine which models the current
state of the process. Bus methods move it from one state to another;
those methods then publish to subscribed listeners on the channel for
the new state.::
O
|
V
STOPPING --> STOPPED --> EXITING -> X
A A |
| \___ |
| \ |
| V V
STARTED <-- STARTING
"""
import atexit
try:
import ctypes
except ImportError:
"""Google AppEngine is shipped without ctypes
:seealso: http://stackoverflow.com/a/6523777/70170
"""
ctypes = None
import operator
import os
import sys
import threading
import time
import traceback as _traceback
import warnings
import subprocess
import functools
from more_itertools import always_iterable
# Here I save the value of os.getcwd(), which, if I am imported early enough,
# will be the directory from which the startup script was run. This is needed
# by _do_execv(), to change back to the original directory before execv()ing a
# new process. This is a defense against the application having changed the
# current working directory (which could make sys.executable "not found" if
# sys.executable is a relative-path, and/or cause other problems).
_startup_cwd = os.getcwd()
class ChannelFailures(Exception):
"""Exception raised during errors on Bus.publish()."""
delimiter = '\n'
def __init__(self, *args, **kwargs):
"""Initialize ChannelFailures errors wrapper."""
super(ChannelFailures, self).__init__(*args, **kwargs)
self._exceptions = list()
def handle_exception(self):
"""Append the current exception to self."""
self._exceptions.append(sys.exc_info()[1])
def get_instances(self):
"""Return a list of seen exception instances."""
return self._exceptions[:]
def __str__(self):
"""Render the list of errors, which happened in channel."""
exception_strings = map(repr, self.get_instances())
return self.delimiter.join(exception_strings)
__repr__ = __str__
def __bool__(self):
"""Determine whether any error happened in channel."""
return bool(self._exceptions)
__nonzero__ = __bool__
# Use a flag to indicate the state of the bus.
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return 'states.%s' % self.name
def __setattr__(self, key, value):
if isinstance(value, self.State):
value.name = key
object.__setattr__(self, key, value)
states = _StateEnum()
states.STOPPED = states.State()
states.STARTING = states.State()
states.STARTED = states.State()
states.STOPPING = states.State()
states.EXITING = states.State()
try:
import fcntl
except ImportError:
max_files = 0
else:
try:
max_files = os.sysconf('SC_OPEN_MAX')
except AttributeError:
max_files = 1024
class Bus(object):
"""Process state-machine and messenger for HTTP site deployment.
All listeners for a given channel are guaranteed to be called even
if others at the same channel fail. Each failure is logged, but
execution proceeds on to the next listener. The only way to stop all
processing from inside a listener is to raise SystemExit and stop the
whole server.
"""
states = states
state = states.STOPPED
execv = False
max_cloexec_files = max_files
def __init__(self):
"""Initialize pub/sub bus."""
self.execv = False
self.state = states.STOPPED
channels = 'start', 'stop', 'exit', 'graceful', 'log', 'main'
self.listeners = dict(
(channel, set())
for channel in channels
)
self._priorities = {}
def subscribe(self, channel, callback=None, priority=None):
"""Add the given callback at the given channel (if not present).
If callback is None, return a partial suitable for decorating
the callback.
"""
if callback is None:
return functools.partial(
self.subscribe,
channel,
priority=priority,
)
ch_listeners = self.listeners.setdefault(channel, set())
ch_listeners.add(callback)
if priority is None:
priority = getattr(callback, 'priority', 50)
self._priorities[(channel, callback)] = priority
def unsubscribe(self, channel, callback):
"""Discard the given callback (if present)."""
listeners = self.listeners.get(channel)
if listeners and callback in listeners:
listeners.discard(callback)
del self._priorities[(channel, callback)]
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
raw_items = (
(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]
)
items = sorted(raw_items, key=operator.itemgetter(0))
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except KeyboardInterrupt:
raise
except SystemExit:
e = sys.exc_info()[1]
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except Exception:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log('Error in %r listener %r' % (channel, listener),
level=40, traceback=True)
if exc:
raise exc
return output
def _clean_exit(self):
"""Assert that the Bus is not running in atexit handler callback."""
if self.state != states.EXITING:
warnings.warn(
'The main thread is exiting, but the Bus is in the %r state; '
'shutting it down automatically now. You must either call '
'bus.block() after start(), or call bus.exit() before the '
'main thread exits.' % self.state, RuntimeWarning)
self.exit()
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.log('Shutting down due to error in start listener:',
level=40, traceback=True)
e_info = sys.exc_info()[1]
try:
self.exit()
except Exception:
# Any stop/exit errors will be logged inside publish().
pass
# Re-raise the original error
raise e_info
def exit(self):
"""Stop all services and prepare to exit the process."""
exitstate = self.state
EX_SOFTWARE = 70
try:
self.stop()
self.state = states.EXITING
self.log('Bus EXITING')
self.publish('exit')
# This isn't strictly necessary, but it's better than seeing
# "Waiting for child threads to terminate..." and then nothing.
self.log('Bus EXITED')
except Exception:
# This method is often called asynchronously (whether thread,
# signal handler, console handler, or atexit handler), so we
# can't just let exceptions propagate out unhandled.
# Assume it's been logged and just die.
os._exit(EX_SOFTWARE)
if exitstate == states.STARTING:
# exit() was called before start() finished, possibly due to
# Ctrl-C because a start listener got stuck. In this case,
# we could get stuck in a loop where Ctrl-C never exits the
# process, so we just call os.exit here.
os._exit(EX_SOFTWARE)
def restart(self):
"""Restart the process (may close connections).
This method does not restart the process from the calling thread;
instead, it stops the bus and asks the main thread to call execv.
"""
self.execv = True
self.exit()
def graceful(self):
"""Advise all services to reload."""
self.log('Bus graceful')
self.publish('graceful')
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all threads
to terminate, and then calls os.execv if self.execv is True. This
design allows another thread to call bus.restart, yet have the main
thread perform the actual execv call (required on some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See https://github.com/cherrypy/cherrypy/issues/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See https://github.com/cherrypy/cherrypy/issues/751.
self.log('Waiting for child threads to terminate...')
for t in threading.enumerate():
# Validate the we're not trying to join the MainThread
# that will cause a deadlock and the case exist when
# implemented as a windows service and in any other case
# that another thread executes cherrypy.engine.exit()
if (
t != threading.currentThread() and
not isinstance(t, threading._MainThread) and
# Note that any dummy (external) threads are
# always daemonic.
not t.daemon
):
self.log('Waiting for thread %s.' % t.getName())
t.join()
if self.execv:
self._do_execv()
def wait(self, state, interval=0.1, channel=None):
"""Poll for the given state(s) at intervals; publish to channel."""
states = set(always_iterable(state))
while self.state not in states:
time.sleep(interval)
self.publish(channel)
def _do_execv(self):
"""Re-execute the current process.
This must be called from the main thread, because certain platforms
(OS X) don't allow execv to be called in a child thread very well.
"""
try:
args = self._get_true_argv()
except NotImplementedError:
"""It's probably win32 or GAE"""
args = [sys.executable] + self._get_interpreter_argv() + sys.argv
self.log('Re-spawning %s' % ' '.join(args))
self._extend_pythonpath(os.environ)
if sys.platform[:4] == 'java':
from _systemrestart import SystemRestart
raise SystemRestart
else:
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
if self.max_cloexec_files:
self._set_cloexec()
os.execv(sys.executable, args)
@staticmethod
def _get_interpreter_argv():
"""Retrieve current Python interpreter's arguments.
Returns empty tuple in case of frozen mode, uses built-in arguments
reproduction function otherwise.
Frozen mode is possible for the app has been packaged into a binary
executable using py2exe. In this case the interpreter's arguments are
already built-in into that executable.
:seealso: https://github.com/cherrypy/cherrypy/issues/1526
Ref: https://pythonhosted.org/PyInstaller/runtime-information.html
"""
return ([]
if getattr(sys, 'frozen', False)
else subprocess._args_from_interpreter_flags())
@staticmethod
def _get_true_argv():
"""Retrieve all real arguments of the python interpreter.
...even those not listed in ``sys.argv``
:seealso: http://stackoverflow.com/a/28338254/595220
:seealso: http://stackoverflow.com/a/6683222/595220
:seealso: http://stackoverflow.com/a/28414807/595220
"""
try:
char_p = ctypes.c_wchar_p
argv = ctypes.POINTER(char_p)()
argc = ctypes.c_int()
ctypes.pythonapi.Py_GetArgcArgv(
ctypes.byref(argc),
ctypes.byref(argv),
)
_argv = argv[:argc.value]
# The code below is trying to correctly handle special cases.
# `-c`'s argument interpreted by Python itself becomes `-c` as
# well. Same applies to `-m`. This snippet is trying to survive
# at least the case with `-m`
# Ref: https://github.com/cherrypy/cherrypy/issues/1545
# Ref: python/cpython@418baf9
argv_len, is_command, is_module = len(_argv), False, False
try:
m_ind = _argv.index('-m')
if m_ind < argv_len - 1 and _argv[m_ind + 1] in ('-c', '-m'):
"""
In some older Python versions `-m`'s argument may be
substituted with `-c`, not `-m`
"""
is_module = True
except (IndexError, ValueError):
m_ind = None
try:
c_ind = _argv.index('-c')
if c_ind < argv_len - 1 and _argv[c_ind + 1] == '-c':
is_command = True
except (IndexError, ValueError):
c_ind = None
if is_module:
"""It's containing `-m -m` sequence of arguments"""
if is_command and c_ind < m_ind:
"""There's `-c -c` before `-m`"""
raise RuntimeError(
"Cannot reconstruct command from '-c'. Ref: "
'https://github.com/cherrypy/cherrypy/issues/1545')
# Survive module argument here
original_module = sys.argv[0]
if not os.access(original_module, os.R_OK):
"""There's no such module exist"""
raise AttributeError(
"{} doesn't seem to be a module "
'accessible by current user'.format(original_module))
del _argv[m_ind:m_ind + 2] # remove `-m -m`
# ... and substitute it with the original module path:
_argv.insert(m_ind, original_module)
elif is_command:
"""It's containing just `-c -c` sequence of arguments"""
raise RuntimeError(
"Cannot reconstruct command from '-c'. "
'Ref: https://github.com/cherrypy/cherrypy/issues/1545')
except AttributeError:
"""It looks Py_GetArgcArgv is completely absent in some environments
It is known, that there's no Py_GetArgcArgv in MS Windows and
``ctypes`` module is completely absent in Google AppEngine
:seealso: https://github.com/cherrypy/cherrypy/issues/1506
:seealso: https://github.com/cherrypy/cherrypy/issues/1512
:ref: http://bit.ly/2gK6bXK
"""
raise NotImplementedError
else:
return _argv
@staticmethod
def _extend_pythonpath(env):
"""Prepend current working dir to PATH environment variable if needed.
If sys.path[0] is an empty string, the interpreter was likely
invoked with -m and the effective path is about to change on
re-exec. Add the current directory to $PYTHONPATH to ensure
that the new process sees the same path.
This issue cannot be addressed in the general case because
Python cannot reliably reconstruct the
original command line (http://bugs.python.org/issue14208).
(This idea filched from tornado.autoreload)
"""
path_prefix = '.' + os.pathsep
existing_path = env.get('PYTHONPATH', '')
needs_patch = (
sys.path[0] == '' and
not existing_path.startswith(path_prefix)
)
if needs_patch:
env['PYTHONPATH'] = path_prefix + existing_path
def _set_cloexec(self):
"""Set the CLOEXEC flag on all open files (except stdin/out/err).
If self.max_cloexec_files is an integer (the default), then on
platforms which support it, it represents the max open files setting
for the operating system. This function will be called just before
the process is restarted via os.execv() to prevent open files
from persisting into the new process.
Set self.max_cloexec_files to 0 to disable this behavior.
"""
for fd in range(3, self.max_cloexec_files): # skip stdin/out/err
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def stop(self):
"""Stop all services."""
self.state = states.STOPPING
self.log('Bus STOPPING')
self.publish('stop')
self.state = states.STOPPED
self.log('Bus STOPPED')
def start_with_callback(self, func, args=None, kwargs=None):
"""Start 'func' in a new thread T, then start self (and return T)."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = (func,) + args
def _callback(func, *a, **kw):
self.wait(states.STARTED)
func(*a, **kw)
t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
t.setName('Bus Callback ' + t.getName())
t.start()
self.start()
return t
def log(self, msg='', level=20, traceback=False):
"""Log the given message. Append the last traceback if requested."""
if traceback:
msg += '\n' + ''.join(_traceback.format_exception(*sys.exc_info()))
self.publish('log', msg, level)
bus = Bus()
|
|
"""
These MathApp-based digital logic classes are experimental.
"""
from abc import ABCMeta, abstractmethod
from ggame.mathapp import _MathDynamic
# decorator for _getvalue or any value handler that may experience recursion
def _recursiontrap(handler):
def trapmagic(self):
"""
An attempt to catch recursion (doesn't work).
"""
if not self.ingetvalue:
self.ingetvalue = True
self.lastget = handler(self)
self.ingetvalue = False
return self.lastget
self.ingetvalue = False
return self.lastget
return trapmagic
class _BoolDevice(_MathDynamic, metaclass=ABCMeta):
"""
Base class for boolean objects.
:Required Arguments:
:param int mininputqty: The minimum number of inputs possible.
:Optional Keyword Arguments:
* **namedinputs** (*list[str]*) List of input names.
"""
def __init__(self, mininputqty, **kwargs):
self.inp = [None] * mininputqty
self.enable = True
namedinputs = kwargs.get("namedinputs", [])
self._indict = {name: self.eval(None) for name in namedinputs}
self.ingetvalue = False
self.lastget = False
self.resetval = False
self.firsttime = True
self._enable = None
self._input = None
super().__init__()
@property
def inp(self):
"""
Report the list of input references.
"""
return self._input
@inp.setter
def inp(self, val):
try:
self._input = [self.eval(v) for v in list(val)]
except TypeError:
self._input = [self.eval(val)]
# Enable attribute controls the "tri-state" of output
@property
def enable(self):
"""
Report the enable state of the object.
"""
return self._enable
@enable.setter
def enable(self, val):
self._enable = self.eval(val)
@abstractmethod
@_recursiontrap # MUST use with any implementation that may recurse!
def _getvalue(self):
return None
@staticmethod
def _inputState(value):
"""
interprets a value that could be single input or a list of inputs!
"""
try:
inputs = [].extend(value)
except TypeError:
inputs = [value]
scalars = [v() for v in inputs]
ones = scalars.count(True) + scalars.count(1)
zeros = scalars.count(False) + scalars.count(0)
if ones > 0 and zeros > 0:
raise ValueError("Conflicting inputs found")
if ones > 0:
return True
if zeros > 0:
return False
return None
def __call__(self):
if self.enable:
return self._getvalue()
return None
def getinput(self, inputname):
"""
Retrieve input by name.
:param str inputname: Name to look up.
"""
return self._inputState(self._indict[inputname])
def setinput(self, inputname, reference):
"""
Set an input connection.
:param str inputname: Name to assign.
:param function reference: Callable object or function connected to input.
"""
self._indict[inputname] = self.eval(reference)
class _BoolOneInput(_BoolDevice):
"""
Base class for one-input boolean objects. No required inputs.
"""
def __init__(self, *args, **kwargs):
super().__init__(1, *args, **kwargs)
@abstractmethod
@_recursiontrap # MUST use with any implementation that may recurse!
def _getvalue(self):
return None
class _BoolMultiInput(_BoolDevice):
"""
Base class for multiple-input boolean objects. No required inputs.
"""
def __init__(self, *args, **kwargs):
super().__init__(2, *args, **kwargs)
@abstractmethod
@_recursiontrap # MUST use with any implementation that may recurse!
def _getvalue(self):
return None
class BoolNOT(_BoolOneInput):
"""
Logical NOT boolean gate.
"""
@_recursiontrap
def _getvalue(self):
inval = self._inputState(self.inp[0])
if inval is None:
return True # equivalent to an "open" input
return not inval
class BoolAND(_BoolMultiInput):
"""
Logical AND boolean gate. Multiple inputs.
"""
@_recursiontrap
def _getvalue(self):
for v in self._input:
if not self._inputState(v):
return False
return True
class BoolNOR(_BoolMultiInput):
"""
Logical NOR boolean gate. Multiple inputs.
"""
@_recursiontrap
def _getvalue(self):
for v in self._input:
if self._inputState(v):
return False
return True
class BoolNAND(_BoolMultiInput):
"""
Logical NAND boolean gate. Multiple inputs.
"""
@_recursiontrap
def _getvalue(self):
for v in self._input:
if not self._inputState(v):
return True
return False
class BoolSRFF(_BoolOneInput):
"""
Logical Set/Reset (SR) FlipFlop.
:Optional Keyword Arguments:
:param class gateclass: One of BoolNAND or BoolNOR (default).
"""
def __init__(self, *args, **kwargs):
kwargs["namedinputs"] = ["R", "S"]
super().__init__(*args, **kwargs)
gate = kwargs.get("gateclass", BoolNOR)
self.ic1 = gate()
self.ic2 = gate()
# we can only assign IC1 and IC2 inputs when this device's inputs are set
def setinput(self, inputname, reference):
super().setinput(inputname, reference)
if inputname == "R":
self.ic1.In = reference, self.ic2
elif inputname == "S":
self.ic2.In = reference, self.ic1
def _getvalue(self):
return self.ic1()
# pylint: disable=invalid-name
def q_(self):
"""
Report value of Q_ output.
"""
return self.ic2()
# pylint: disable=invalid-name
def q(self):
"""
Report value of Q output.
"""
return self._getvalue()
|
|
# -*- coding: utf-8 -*-
#
# This file is part of GetTor, a Tor Browser distribution system.
#
# :authors: Israel Leiva <ilv@riseup.net>
# see also AUTHORS file
#
# :copyright: (c) 2008-2015, The Tor Project, Inc.
# (c) 2015, Israel Leiva
#
# :license: This is Free Software. See LICENSE for license information.
import os
import re
import sys
import time
import gettext
import hashlib
import logging
import ConfigParser
from sleekxmpp import ClientXMPP
from sleekxmpp.xmlstream.stanzabase import JID
from sleekxmpp.exceptions import IqError, IqTimeout
import core
import utils
import blacklist
"""XMPP module for processing requests."""
OS = {
'osx': 'Mac OS X',
'linux': 'Linux',
'windows': 'Windows'
}
class ConfigError(Exception):
pass
class InternalError(Exception):
pass
class Bot(ClientXMPP):
"""XMPP bot.
Handle messages and pass them to XMPP module for parsing.
"""
def __init__(self, jid, password, xmpp_obj):
ClientXMPP.__init__(self, jid, password)
self.xmpp = xmpp_obj
self.add_event_handler("session_start", self.session_start)
self.add_event_handler("message", self.message)
def session_start(self, event):
self.send_presence()
self.get_roster()
try:
self.get_roster()
except IqError as err:
# error getting the roster
self.xmpp.log.error(err.iq['error']['condition'])
self.disconnect()
except IqTimeout:
# server is taking too long to respond
self.xmpp.log.error("Server is taking too long to respond")
self.disconnect()
def message(self, msg):
if msg['type'] in ('chat', 'normal'):
msg_to_send = self.xmpp.parse_request(msg['from'], msg['body'])
if msg_to_send:
msg.reply(msg_to_send).send()
class XMPP(object):
"""Receive and reply requests by XMPP.
Public methods:
parse_request(): parses a message and tries to figure out what the user
is asking for.
Exceptions:
ConfigError: Bad configuration.
InternalError: Something went wrong internally.
"""
def __init__(self, cfg=None):
"""Create new object by reading a configuration file.
:param: cfg (string) the path of the configuration file.
"""
# define a set of default values
default_cfg = 'xmpp.cfg'
config = ConfigParser.ConfigParser()
if cfg is None or not os.path.isfile(cfg):
cfg = default_cfg
try:
with open(cfg) as f:
config.readfp(f)
except IOError:
raise ConfigError("File %s not found!" % cfg)
try:
self.user = config.get('account', 'user')
self.password = config.get('account', 'password')
self.mirrors = config.get('general', 'mirrors')
self.max_words = config.get('general', 'max_words')
self.max_words = int(self.max_words)
core_cfg = config.get('general', 'core_cfg')
self.core = core.Core(core_cfg)
self.i18ndir = config.get('i18n', 'dir')
blacklist_cfg = config.get('blacklist', 'cfg')
self.bl = blacklist.Blacklist(blacklist_cfg)
self.bl_max_req = config.get('blacklist', 'max_requests')
self.bl_max_req = int(self.bl_max_req)
self.bl_wait_time = config.get('blacklist', 'wait_time')
self.bl_wait_time = int(self.bl_wait_time)
logdir = config.get('log', 'dir')
logfile = os.path.join(logdir, 'xmpp.log')
loglevel = config.get('log', 'level')
except ConfigParser.Error as e:
raise ConfigError("Configuration error: %s" % str(e))
except blacklist.ConfigError as e:
raise InternalError("Blacklist error: %s" % str(e))
except core.ConfigError as e:
raise InternalError("Core error: %s" % str(e))
# logging
log = logging.getLogger(__name__)
logging_format = utils.get_logging_format()
date_format = utils.get_date_format()
formatter = logging.Formatter(logging_format, date_format)
log.info('Redirecting XMPP logging to %s' % logfile)
logfileh = logging.FileHandler(logfile, mode='a+')
logfileh.setFormatter(formatter)
logfileh.setLevel(logging.getLevelName(loglevel))
log.addHandler(logfileh)
# stop logging on stdout from now on
log.propagate = False
self.log = log
def start_bot(self):
"""Start the bot for handling requests.
Start a new sleekxmpp bot.
"""
self.log.info("Starting the bot with account %s" % self.user)
xmpp = Bot(self.user, self.password, self)
xmpp.connect()
xmpp.process(block=True)
def _is_blacklisted(self, account):
"""Check if a user is blacklisted.
:param: addr (string) the hashed address of the user.
:return: true is the address is blacklisted, false otherwise.
"""
anon_acc = utils.get_sha256(account)
try:
self.bl.is_blacklisted(
anon_acc, 'XMPP', self.bl_max_req, self.bl_wait_time
)
return False
except blacklist.BlacklistError as e:
return True
def _get_msg(self, msgid, lc):
"""Get message identified by msgid in a specific locale.
:param: msgid (string) the identifier of a string.
:param: lc (string) the locale.
:return: (string) the message from the .po file.
"""
# obtain the content in the proper language
self.log.debug("Trying to get translated text")
try:
t = gettext.translation(lc, self.i18ndir, languages=[lc])
_ = t.ugettext
msgstr = _(msgid)
return msgstr
except IOError as e:
raise ConfigError("%s" % str(e))
def _parse_text(self, msg):
"""Parse the text part of a message.
Split the message in words and look for patterns for locale,
operating system and built-in pluggable transport info.
:param: msg (string) the message received.
:param: core_obj (object) the object of gettor core module.
:return: request (list) 4-tuple with locale, os, type of request
and pt info.
"""
# core knows what OS are supported
supported_os = self.core.get_supported_os()
supported_lc = self.core.get_supported_lc()
self.log.debug("Parsing text")
# default values
req = {}
req['lc'] = 'en'
req['os'] = None
req['type'] = 'help'
found_lc = False
found_os = False
found_mirrors = False
# analyze every word
for word in msg.split(' '):
# look for lc and os
if not found_lc:
for lc in supported_lc:
if re.match(lc, word, re.IGNORECASE):
found_lc = True
req['lc'] = lc
if not found_os:
for os in supported_os:
if re.match(os, word, re.IGNORECASE):
found_os = True
req['os'] = os
req['type'] = 'links'
# mirrors
if not found_mirrors:
if re.match("mirrors?", word, re.IGNORECASE):
found_mirrors = True
req['type'] = 'mirrors'
if (found_lc and found_os) or (found_lc and found_mirrors):
break
return req
def parse_request(self, account, msg):
"""Process the request received.
Check if the user is not blacklisted and then check the body of
the message to find out what is asking.
:param: account (string) the account that did the request.
:param: msg (string) the body of the message sent to us.
:return: (string/None) the message to be sent to the user via the
bot, or None if the user is blacklisted.
"""
bogus_request = False
reply = ''
status = ''
req = None
self.log.debug("Parsing request")
try:
if self._is_blacklisted(str(account)):
self.log.info('blacklist; none; none')
bogus_request = True
# first let's find out how many words are in the message
# request shouldn't be longer than 3 words, but just in case
words = re.split('\s+', msg.strip())
if len(words) > self.max_words:
bogus_request = True
self.log.info("Message way too long")
self.log.info('invalid; none; none')
reply = self._get_msg('message_error', 'en')
if not bogus_request:
self.log.debug("Request seems legit, let's parse it")
# let's try to guess what the user is asking
req = self._parse_text(str(msg))
if req['type'] == 'help':
self.log.info('help; none; %s' % req['lc'])
reply = self._get_msg('help', 'en')
elif req['type'] == 'mirrors':
self.log.info('mirrors; none; %s' % req['lc'])
reply = self._get_msg('mirrors', 'en')
try:
with open(self.mirrors, "r") as list_mirrors:
mirrors = list_mirrors.read()
reply = reply % mirrors
except IOError as e:
reply = self._get_msg('mirrors_unavailable', 'en')
elif req['type'] == 'links':
self.log.info('links; %s; %s' % (req['os'], req['lc']))
links = self.core.get_links(
"XMPP",
req['os'],
req['lc']
)
reply = self._get_msg('links', 'en')
reply = reply % (OS[req['os']], links)
except (core.ConfigError, core.InternalError) as e:
# if core failes, send the user an error message, but keep going
self.log.error("Something went wrong internally: %s" % str(e))
reply = self._get_msg('internal_error', req['lc'])
finally:
return reply
|
|
"""Tests for the Formatters."""
import warnings
from math import pi
try:
import numpy
except:
numpy = None
import nose.tools as nt
from IPython import get_ipython
from traitlets.config import Config
from IPython.core.formatters import (
PlainTextFormatter, HTMLFormatter, PDFFormatter, _mod_name_key,
DisplayFormatter, JSONFormatter,
)
from IPython.utils.io import capture_output
class A(object):
def __repr__(self):
return 'A()'
class B(A):
def __repr__(self):
return 'B()'
class C:
pass
class BadRepr(object):
def __repr__(self):
raise ValueError("bad repr")
class BadPretty(object):
_repr_pretty_ = None
class GoodPretty(object):
def _repr_pretty_(self, pp, cycle):
pp.text('foo')
def __repr__(self):
return 'GoodPretty()'
def foo_printer(obj, pp, cycle):
pp.text('foo')
def test_pretty():
f = PlainTextFormatter()
f.for_type(A, foo_printer)
nt.assert_equal(f(A()), 'foo')
nt.assert_equal(f(B()), 'B()')
nt.assert_equal(f(GoodPretty()), 'foo')
# Just don't raise an exception for the following:
f(BadPretty())
f.pprint = False
nt.assert_equal(f(A()), 'A()')
nt.assert_equal(f(B()), 'B()')
nt.assert_equal(f(GoodPretty()), 'GoodPretty()')
def test_deferred():
f = PlainTextFormatter()
def test_precision():
"""test various values for float_precision."""
f = PlainTextFormatter()
nt.assert_equal(f(pi), repr(pi))
f.float_precision = 0
if numpy:
po = numpy.get_printoptions()
nt.assert_equal(po['precision'], 0)
nt.assert_equal(f(pi), '3')
f.float_precision = 2
if numpy:
po = numpy.get_printoptions()
nt.assert_equal(po['precision'], 2)
nt.assert_equal(f(pi), '3.14')
f.float_precision = '%g'
if numpy:
po = numpy.get_printoptions()
nt.assert_equal(po['precision'], 2)
nt.assert_equal(f(pi), '3.14159')
f.float_precision = '%e'
nt.assert_equal(f(pi), '3.141593e+00')
f.float_precision = ''
if numpy:
po = numpy.get_printoptions()
nt.assert_equal(po['precision'], 8)
nt.assert_equal(f(pi), repr(pi))
def test_bad_precision():
"""test various invalid values for float_precision."""
f = PlainTextFormatter()
def set_fp(p):
f.float_precision=p
nt.assert_raises(ValueError, set_fp, '%')
nt.assert_raises(ValueError, set_fp, '%.3f%i')
nt.assert_raises(ValueError, set_fp, 'foo')
nt.assert_raises(ValueError, set_fp, -1)
def test_for_type():
f = PlainTextFormatter()
# initial return, None
nt.assert_is(f.for_type(C, foo_printer), None)
# no func queries
nt.assert_is(f.for_type(C), foo_printer)
# shouldn't change anything
nt.assert_is(f.for_type(C), foo_printer)
# None should do the same
nt.assert_is(f.for_type(C, None), foo_printer)
nt.assert_is(f.for_type(C, None), foo_printer)
def test_for_type_string():
f = PlainTextFormatter()
type_str = '%s.%s' % (C.__module__, 'C')
# initial return, None
nt.assert_is(f.for_type(type_str, foo_printer), None)
# no func queries
nt.assert_is(f.for_type(type_str), foo_printer)
nt.assert_in(_mod_name_key(C), f.deferred_printers)
nt.assert_is(f.for_type(C), foo_printer)
nt.assert_not_in(_mod_name_key(C), f.deferred_printers)
nt.assert_in(C, f.type_printers)
def test_for_type_by_name():
f = PlainTextFormatter()
mod = C.__module__
# initial return, None
nt.assert_is(f.for_type_by_name(mod, 'C', foo_printer), None)
# no func queries
nt.assert_is(f.for_type_by_name(mod, 'C'), foo_printer)
# shouldn't change anything
nt.assert_is(f.for_type_by_name(mod, 'C'), foo_printer)
# None should do the same
nt.assert_is(f.for_type_by_name(mod, 'C', None), foo_printer)
nt.assert_is(f.for_type_by_name(mod, 'C', None), foo_printer)
def test_lookup():
f = PlainTextFormatter()
f.for_type(C, foo_printer)
nt.assert_is(f.lookup(C()), foo_printer)
with nt.assert_raises(KeyError):
f.lookup(A())
def test_lookup_string():
f = PlainTextFormatter()
type_str = '%s.%s' % (C.__module__, 'C')
f.for_type(type_str, foo_printer)
nt.assert_is(f.lookup(C()), foo_printer)
# should move from deferred to imported dict
nt.assert_not_in(_mod_name_key(C), f.deferred_printers)
nt.assert_in(C, f.type_printers)
def test_lookup_by_type():
f = PlainTextFormatter()
f.for_type(C, foo_printer)
nt.assert_is(f.lookup_by_type(C), foo_printer)
with nt.assert_raises(KeyError):
f.lookup_by_type(A)
def test_lookup_by_type_string():
f = PlainTextFormatter()
type_str = '%s.%s' % (C.__module__, 'C')
f.for_type(type_str, foo_printer)
# verify insertion
nt.assert_in(_mod_name_key(C), f.deferred_printers)
nt.assert_not_in(C, f.type_printers)
nt.assert_is(f.lookup_by_type(type_str), foo_printer)
# lookup by string doesn't cause import
nt.assert_in(_mod_name_key(C), f.deferred_printers)
nt.assert_not_in(C, f.type_printers)
nt.assert_is(f.lookup_by_type(C), foo_printer)
# should move from deferred to imported dict
nt.assert_not_in(_mod_name_key(C), f.deferred_printers)
nt.assert_in(C, f.type_printers)
def test_in_formatter():
f = PlainTextFormatter()
f.for_type(C, foo_printer)
type_str = '%s.%s' % (C.__module__, 'C')
nt.assert_in(C, f)
nt.assert_in(type_str, f)
def test_string_in_formatter():
f = PlainTextFormatter()
type_str = '%s.%s' % (C.__module__, 'C')
f.for_type(type_str, foo_printer)
nt.assert_in(type_str, f)
nt.assert_in(C, f)
def test_pop():
f = PlainTextFormatter()
f.for_type(C, foo_printer)
nt.assert_is(f.lookup_by_type(C), foo_printer)
nt.assert_is(f.pop(C, None), foo_printer)
f.for_type(C, foo_printer)
nt.assert_is(f.pop(C), foo_printer)
with nt.assert_raises(KeyError):
f.lookup_by_type(C)
with nt.assert_raises(KeyError):
f.pop(C)
with nt.assert_raises(KeyError):
f.pop(A)
nt.assert_is(f.pop(A, None), None)
def test_pop_string():
f = PlainTextFormatter()
type_str = '%s.%s' % (C.__module__, 'C')
with nt.assert_raises(KeyError):
f.pop(type_str)
f.for_type(type_str, foo_printer)
f.pop(type_str)
with nt.assert_raises(KeyError):
f.lookup_by_type(C)
with nt.assert_raises(KeyError):
f.pop(type_str)
f.for_type(C, foo_printer)
nt.assert_is(f.pop(type_str, None), foo_printer)
with nt.assert_raises(KeyError):
f.lookup_by_type(C)
with nt.assert_raises(KeyError):
f.pop(type_str)
nt.assert_is(f.pop(type_str, None), None)
def test_error_method():
f = HTMLFormatter()
class BadHTML(object):
def _repr_html_(self):
raise ValueError("Bad HTML")
bad = BadHTML()
with capture_output() as captured:
result = f(bad)
nt.assert_is(result, None)
nt.assert_in("Traceback", captured.stdout)
nt.assert_in("Bad HTML", captured.stdout)
nt.assert_in("_repr_html_", captured.stdout)
def test_nowarn_notimplemented():
f = HTMLFormatter()
class HTMLNotImplemented(object):
def _repr_html_(self):
raise NotImplementedError
h = HTMLNotImplemented()
with capture_output() as captured:
result = f(h)
nt.assert_is(result, None)
nt.assert_equal("", captured.stderr)
nt.assert_equal("", captured.stdout)
def test_warn_error_for_type():
f = HTMLFormatter()
f.for_type(int, lambda i: name_error)
with capture_output() as captured:
result = f(5)
nt.assert_is(result, None)
nt.assert_in("Traceback", captured.stdout)
nt.assert_in("NameError", captured.stdout)
nt.assert_in("name_error", captured.stdout)
def test_error_pretty_method():
f = PlainTextFormatter()
class BadPretty(object):
def _repr_pretty_(self):
return "hello"
bad = BadPretty()
with capture_output() as captured:
result = f(bad)
nt.assert_is(result, None)
nt.assert_in("Traceback", captured.stdout)
nt.assert_in("_repr_pretty_", captured.stdout)
nt.assert_in("given", captured.stdout)
nt.assert_in("argument", captured.stdout)
def test_bad_repr_traceback():
f = PlainTextFormatter()
bad = BadRepr()
with capture_output() as captured:
result = f(bad)
# catches error, returns None
nt.assert_is(result, None)
nt.assert_in("Traceback", captured.stdout)
nt.assert_in("__repr__", captured.stdout)
nt.assert_in("ValueError", captured.stdout)
class MakePDF(object):
def _repr_pdf_(self):
return 'PDF'
def test_pdf_formatter():
pdf = MakePDF()
f = PDFFormatter()
nt.assert_equal(f(pdf), 'PDF')
def test_print_method_bound():
f = HTMLFormatter()
class MyHTML(object):
def _repr_html_(self):
return "hello"
with capture_output() as captured:
result = f(MyHTML)
nt.assert_is(result, None)
nt.assert_not_in("FormatterWarning", captured.stderr)
with capture_output() as captured:
result = f(MyHTML())
nt.assert_equal(result, "hello")
nt.assert_equal(captured.stderr, "")
def test_print_method_weird():
class TextMagicHat(object):
def __getattr__(self, key):
return key
f = HTMLFormatter()
text_hat = TextMagicHat()
nt.assert_equal(text_hat._repr_html_, '_repr_html_')
with capture_output() as captured:
result = f(text_hat)
nt.assert_is(result, None)
nt.assert_not_in("FormatterWarning", captured.stderr)
class CallableMagicHat(object):
def __getattr__(self, key):
return lambda : key
call_hat = CallableMagicHat()
with capture_output() as captured:
result = f(call_hat)
nt.assert_equal(result, None)
class BadReprArgs(object):
def _repr_html_(self, extra, args):
return "html"
bad = BadReprArgs()
with capture_output() as captured:
result = f(bad)
nt.assert_is(result, None)
nt.assert_not_in("FormatterWarning", captured.stderr)
def test_format_config():
"""config objects don't pretend to support fancy reprs with lazy attrs"""
f = HTMLFormatter()
cfg = Config()
with capture_output() as captured:
result = f(cfg)
nt.assert_is(result, None)
nt.assert_equal(captured.stderr, "")
with capture_output() as captured:
result = f(Config)
nt.assert_is(result, None)
nt.assert_equal(captured.stderr, "")
def test_pretty_max_seq_length():
f = PlainTextFormatter(max_seq_length=1)
lis = list(range(3))
text = f(lis)
nt.assert_equal(text, '[0, ...]')
f.max_seq_length = 0
text = f(lis)
nt.assert_equal(text, '[0, 1, 2]')
text = f(list(range(1024)))
lines = text.splitlines()
nt.assert_equal(len(lines), 1024)
def test_ipython_display_formatter():
"""Objects with _ipython_display_ defined bypass other formatters"""
f = get_ipython().display_formatter
catcher = []
class SelfDisplaying(object):
def _ipython_display_(self):
catcher.append(self)
class NotSelfDisplaying(object):
def __repr__(self):
return "NotSelfDisplaying"
def _ipython_display_(self):
raise NotImplementedError
save_enabled = f.ipython_display_formatter.enabled
f.ipython_display_formatter.enabled = True
yes = SelfDisplaying()
no = NotSelfDisplaying()
d, md = f.format(no)
nt.assert_equal(d, {'text/plain': repr(no)})
nt.assert_equal(md, {})
nt.assert_equal(catcher, [])
d, md = f.format(yes)
nt.assert_equal(d, {})
nt.assert_equal(md, {})
nt.assert_equal(catcher, [yes])
f.ipython_display_formatter.enabled = save_enabled
def test_json_as_string_deprecated():
class JSONString(object):
def _repr_json_(self):
return '{}'
f = JSONFormatter()
with warnings.catch_warnings(record=True) as w:
d = f(JSONString())
nt.assert_equal(d, {})
nt.assert_equal(len(w), 1)
def test_repr_mime():
class HasReprMime(object):
def _repr_mimebundle_(self, include=None, exclude=None):
return {
'application/json+test.v2': {
'x': 'y'
},
'plain/text' : '<HasReprMime>',
'image/png' : 'i-overwrite'
}
def _repr_png_(self):
return 'should-be-overwritten'
def _repr_html_(self):
return '<b>hi!</b>'
f = get_ipython().display_formatter
html_f = f.formatters['text/html']
save_enabled = html_f.enabled
html_f.enabled = True
obj = HasReprMime()
d, md = f.format(obj)
html_f.enabled = save_enabled
nt.assert_equal(sorted(d), ['application/json+test.v2',
'image/png',
'plain/text',
'text/html',
'text/plain'])
nt.assert_equal(md, {})
d, md = f.format(obj, include={'image/png'})
nt.assert_equal(list(d.keys()), ['image/png'],
'Include should filter out even things from repr_mimebundle')
nt.assert_equal(d['image/png'], 'i-overwrite', '_repr_mimebundle_ take precedence')
def test_pass_correct_include_exclude():
class Tester(object):
def __init__(self, include=None, exclude=None):
self.include = include
self.exclude = exclude
def _repr_mimebundle_(self, include, exclude, **kwargs):
if include and (include != self.include):
raise ValueError('include got modified: display() may be broken.')
if exclude and (exclude != self.exclude):
raise ValueError('exclude got modified: display() may be broken.')
return None
include = {'a', 'b', 'c'}
exclude = {'c', 'e' , 'f'}
f = get_ipython().display_formatter
f.format(Tester(include=include, exclude=exclude), include=include, exclude=exclude)
f.format(Tester(exclude=exclude), exclude=exclude)
f.format(Tester(include=include), include=include)
def test_repr_mime_meta():
class HasReprMimeMeta(object):
def _repr_mimebundle_(self, include=None, exclude=None):
data = {
'image/png': 'base64-image-data',
}
metadata = {
'image/png': {
'width': 5,
'height': 10,
}
}
return (data, metadata)
f = get_ipython().display_formatter
obj = HasReprMimeMeta()
d, md = f.format(obj)
nt.assert_equal(sorted(d), ['image/png', 'text/plain'])
nt.assert_equal(md, {
'image/png': {
'width': 5,
'height': 10,
}
})
def test_repr_mime_failure():
class BadReprMime(object):
def _repr_mimebundle_(self, include=None, exclude=None):
raise RuntimeError
f = get_ipython().display_formatter
obj = BadReprMime()
d, md = f.format(obj)
nt.assert_in('text/plain', d)
|
|
import socket
import json
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
from rest_framework import status
from rest_framework.test import APITestCase
from hs_core.hydroshare import users
from hs_core.hydroshare import resource
class HSRESTTestCase(APITestCase):
def setUp(self):
self.hostname = socket.gethostname()
self.resource_url = "http://example.com/resource/{res_id}/"
self.maxDiff = None
self.client = APIClient()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
# create a user
self.user = users.create_account(
'test_user@email.com',
username='testuser',
first_name='some_first_name',
last_name='some_last_name',
superuser=False)
self.client.force_authenticate(user=self.user)
self.resources_to_delete = []
def tearDown(self):
for r in self.resources_to_delete:
resource.delete_resource(r)
self.user.delete()
def getResourceBag(self, res_id, exhaust_stream=True):
"""Get resource bag from iRODS, following redirects.
:param res_id: ID of resource whose bag should be fetched
:param exhaust_stream: If True, the response returned
will have its stream_content exhausted. This prevents
an error that causes the Docker container to exit when tests
are run with an external web server.
:return: Django test client response object
"""
url = "/hydroshare/hsapi/resource/{res_id}".format(res_id=res_id)
return self._get_file_irods(url, exhaust_stream)
def getDownloadTaskStatus(self, task_id):
"""Check download celery task status.
:param task_id: ID of download celery task
:return: Django test client response object
"""
url = reverse('get_task_status', kwargs={'task_id': task_id})
return self.client.get(url, follow=True)
def getResourceFile(self, res_id, file_name, exhaust_stream=True):
"""Get resource file from iRODS, following redirects
:param res_id: ID of resource whose resource file should be fetched
:param file_name: Name of the file to fetch (just the filename, not the full path)
:param exhaust_stream: If True, the response returned
will have its stream_content exhausted. This prevents
an error that causes the Docker container to exit when tests
are run with an external web server.
:return: Django test client response object
"""
url = "/hydroshare/hsapi/resource/{res_id}/files/{file_name}".format(res_id=res_id,
file_name=file_name)
return self._get_file_irods(url, exhaust_stream)
def _get_file_irods(self, url, exhaust_stream=True):
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Exhaust the file stream so that WSGI doesn't get upset (this causes the Docker container to exit)
if exhaust_stream and hasattr(response, 'streaming_content'):
for l in response.streaming_content:
pass
return response
def getScienceMetadata(self, res_id, exhaust_stream=True):
"""Get sciencematadata.xml from iRODS, following redirects
:param res_id: ID of resource whose science metadata should be fetched
:param exhaust_stream: If True, the response returned
will have its stream_content exhausted. This prevents
an error that causes the Docker container to exit when tests
are run with an external web server.
:return: Django test client response object
"""
url = "/hydroshare/hsapi/scimeta/{res_id}/".format(res_id=res_id)
response = self._get_file_irods(url, exhaust_stream)
self.assertEqual(response['Content-Type'], 'application/xml')
self.assertGreater(int(response['Content-Length']), 0)
return response
class SciMetaTestCase(HSRESTTestCase):
NS = {'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
'rdfs1': "http://www.w3.org/2001/01/rdf-schema#",
'dc': "http://purl.org/dc/elements/1.1/",
'dcterms': "http://purl.org/dc/terms/",
'hsterms': "http://hydroshare.org/terms/"}
RESOURCE_URL_TEMPLATE = "http://example.com/resource/{0}"
RESOURCE_METADATA = 'resourcemetadata.xml'
RESOURCE_METADATA_OLD = 'resourcemetadata_old.xml'
RESOURCE_METADATA_UPDATED = 'resourcemetadata_updated.xml'
def getTitle(self, scimeta, should_exist=True):
""" Get title from parsed ElementTree representation of science metadata.
:param scimeta: ElementTree representing science metadata
:param should_exist: If True, the abstract is expected to exist in the DOM.
:return: String representing title text, if should_exist == True, else None.
"""
title = scimeta.xpath('/rdf:RDF/rdf:Description[1]/dc:title', namespaces=self.NS)
if should_exist:
self.assertEqual(len(title), 1)
return title[0].text
else:
self.assertEqual(len(title), 0)
return None
def getAbstract(self, scimeta, should_exist=True):
""" Get abstract from parsed ElementTree representation of science metadata.
:param scimeta: ElementTree representing science metadata
:param should_exist: If True, the abstract is expected to exist in the DOM.
:return: String representing abstract text, if should_exist == True, else None.
"""
abstract = scimeta.xpath('/rdf:RDF/rdf:Description[1]/dc:description/rdf:Description/dcterms:abstract',
namespaces=self.NS)
if should_exist:
self.assertEqual(len(abstract), 1)
return abstract[0].text
else:
self.assertEqual(len(abstract), 0)
return None
def getKeywords(self, scimeta):
""" Get keywords from parsed ElementTree representation of science metadata.
:param scimeta: ElementTree representing science metadata
:return: Tuple of Strings representing keyword metadata elements
"""
keywords = scimeta.xpath('/rdf:RDF/rdf:Description[1]/dc:subject',
namespaces=self.NS)
return tuple(k.text for k in keywords)
def updateScimetaResourceID(self, scimeta, new_id):
""" Update resource ID of the science metadata to http://example.com/resource/$new_id
:param scimeta: ElementTree representing science metadata
:param new_id: String representing the new ID of the resource.
:return: ElementTree representing science metadata
"""
desc = scimeta.xpath('/rdf:RDF/rdf:Description[1]', namespaces=self.NS)[0]
desc.set('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}about',
self.RESOURCE_URL_TEMPLATE.format(new_id))
return scimeta
def updateScimeta(self, pk, scimeta_path, should_succeed=True):
""" Use the test client to perform a PUT /scimeta
using scimeta_path as the resourcemetadata.xml
:param pk: The ID of the resource whose
:param scimeta_path: Path to a file named resourcemetadata.xml
:param should_succeed: If True, will check for HTTP 202 status in the
response, else will check for HTTP 400.
:return: Test client HTTP response.
"""
params = {'file': (self.RESOURCE_METADATA,
open(scimeta_path),
'application/xml')}
url = "/hydroshare/hsapi/scimeta/{pid}/".format(pid=pk)
response = self.client.put(url, params)
if should_succeed:
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED,
msg=str(json.loads(response.content)))
else:
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST,
msg=str(json.loads(response.content)))
return response
class ModelInstanceSciMetaTestCase(SciMetaTestCase):
MOD_OUT_PATH = ('/rdf:RDF/rdf:Description[1]/hsterms:ModelOutput/'
'rdf:Description/hsterms:includesModelOutput')
EXECUTED_BY_PATH = ('/rdf:RDF/rdf:Description[1]/hsterms:ExecutedBY/'
'rdf:Description')
EXECUTED_BY_NAME_PATH = "{exec_by_path}/hsterms:modelProgramName".format(exec_by_path=EXECUTED_BY_PATH)
EXECUTED_BY_ID_PATH = "{exec_by_path}/hsterms:modelProgramIdentifier".format(exec_by_path=EXECUTED_BY_PATH)
def setUp(self):
super(ModelInstanceSciMetaTestCase, self).setUp()
self.rtype_prog = 'ModelProgramResource'
self.title_prog = 'Some program'
res = resource.create_resource(self.rtype_prog,
self.user,
self.title_prog)
self.pid_prog = res.short_id
self.resources_to_delete.append(self.pid_prog)
def updateExecutedBy(self, scimeta, name, id):
""" Update ExecutedBy
:param scimeta: ElementTree representing science metadata
:param name: String representing the title of the program resource.
:param id: String representing the ID of the program resource.
:return: ElementTree representing science metadata
"""
name_elem = scimeta.xpath(self.EXECUTED_BY_NAME_PATH, namespaces=self.NS)[0]
name_elem.text = name
id_elem = scimeta.xpath(self.EXECUTED_BY_ID_PATH, namespaces=self.NS)[0]
id_elem.text = self.RESOURCE_URL_TEMPLATE.format(id) + '/'
return scimeta
class ResMapTestCase(HSRESTTestCase):
NS = {'rdf': "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
'rdfs1': "http://www.w3.org/2001/01/rdf-schema#",
'dc': "http://purl.org/dc/elements/1.1/",
'dcterms': "http://purl.org/dc/terms/",
'hsterms': "http://hydroshare.org/terms/"}
RESOURCE_URL_TEMPLATE = "http://example.com/resource/{0}"
RESOURCE_METADATA = 'resourcemap.xml'
RESOURCE_METADATA_OLD = 'resourcemap_old.xml'
RESOURCE_METADATA_UPDATED = 'resourcemap_updated.xml'
|
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Top-level presubmit script for V8.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
_EXCLUDED_PATHS = (
r"^test[\\\/].*",
r"^testing[\\\/].*",
r"^third_party[\\\/].*",
r"^tools[\\\/].*",
)
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.+-unittest\.cc',
# Has a method VisitForTest().
r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc',
# Test extension.
r'src[\\\/]extensions[\\\/]gc-extension\.cc',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.')
def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks."""
import sys
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
from presubmit import CheckRuntimeVsNativesNameClashes
from presubmit import CheckExternalReferenceRegistration
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
if not CheckRuntimeVsNativesNameClashes(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Runtime/natives name clash check failed"))
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
return results
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'buildtools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files, as the declaration of for-testing functions in
# header files are hard to distinguish from calls to such functions without a
# proper C++ parser.
file_inclusion_pattern = r'.+\.cc'
base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (inclusion_pattern.search(line) and
not comment_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted(
input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
return results
def _SkipTreeCheck(input_api, output_api):
"""Check the env var whether we want to skip tree check.
Only skip if include/v8-version.h has been updated."""
src_version = 'include/v8-version.h'
FilterFile = lambda file: file.LocalPath() == src_version
if not input_api.AffectedSourceFiles(
lambda file: file.LocalPath() == src_version):
return False
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckChangeLogFlag(input_api, output_api):
"""Checks usage of LOG= flag in the commit message."""
results = []
if input_api.change.BUG and not 'LOG' in input_api.change.tags:
results.append(output_api.PresubmitError(
'An issue reference (BUG=) requires a change log flag (LOG=). '
'Use LOG=Y for including this commit message in the change log. '
'Use LOG=N or leave blank otherwise.'))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nosnap_rel': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
'v8_linux_arm_dbg': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_linux_layout_dbg': set(['defaulttests']),
'v8_linux_chromium_gn_rel': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
'v8_win64_compile_rel': set(['defaulttests']),
},
}
|
|
from sympy import Abs, S, Symbol, symbols, I, Rational, PurePoly, Float
from sympy.matrices import \
Matrix, MutableSparseMatrix, ImmutableSparseMatrix, SparseMatrix, eye, \
ones, zeros, ShapeError
from sympy.utilities.pytest import raises
def test_sparse_matrix():
def sparse_eye(n):
return SparseMatrix.eye(n)
def sparse_zeros(n):
return SparseMatrix.zeros(n)
# creation args
raises(TypeError, lambda: SparseMatrix(1, 2))
a = SparseMatrix((
(1, 0),
(0, 1)
))
assert SparseMatrix(a) == a
from sympy.matrices import MutableSparseMatrix, MutableDenseMatrix
a = MutableSparseMatrix([])
b = MutableDenseMatrix([1, 2])
assert a.row_join(b) == b
assert a.col_join(b) == b
assert type(a.row_join(b)) == type(a)
assert type(a.col_join(b)) == type(a)
# make sure 0 x n matrices get stacked correctly
sparse_matrices = [SparseMatrix.zeros(0, n) for n in range(4)]
assert SparseMatrix.hstack(*sparse_matrices) == Matrix(0, 6, [])
sparse_matrices = [SparseMatrix.zeros(n, 0) for n in range(4)]
assert SparseMatrix.vstack(*sparse_matrices) == Matrix(6, 0, [])
# test element assignment
a = SparseMatrix((
(1, 0),
(0, 1)
))
a[3] = 4
assert a[1, 1] == 4
a[3] = 1
a[0, 0] = 2
assert a == SparseMatrix((
(2, 0),
(0, 1)
))
a[1, 0] = 5
assert a == SparseMatrix((
(2, 0),
(5, 1)
))
a[1, 1] = 0
assert a == SparseMatrix((
(2, 0),
(5, 0)
))
assert a._smat == {(0, 0): 2, (1, 0): 5}
# test_multiplication
a = SparseMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = SparseMatrix((
(1, 2),
(3, 0),
))
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
try:
eval('c = a @ b')
except SyntaxError:
pass
else:
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
x = Symbol("x")
c = b * Symbol("x")
assert isinstance(c, SparseMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c = 5 * b
assert isinstance(c, SparseMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
#test_power
A = SparseMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == [6140, 8097, 10796, 14237]
A = SparseMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]
# test_creation
x = Symbol("x")
a = SparseMatrix([[x, 0], [0, 0]])
m = a
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
b = SparseMatrix(2, 2, [x, 0, 0, 0])
m = b
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
assert a == b
S = sparse_eye(3)
S.row_del(1)
assert S == SparseMatrix([
[1, 0, 0],
[0, 0, 1]])
S = sparse_eye(3)
S.col_del(1)
assert S == SparseMatrix([
[1, 0],
[0, 0],
[0, 1]])
S = SparseMatrix.eye(3)
S[2, 1] = 2
S.col_swap(1, 0)
assert S == SparseMatrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
a = SparseMatrix(1, 2, [1, 2])
b = a.copy()
c = a.copy()
assert a[0] == 1
a.row_del(0)
assert a == SparseMatrix(0, 2, [])
b.col_del(1)
assert b == SparseMatrix(1, 1, [1])
assert SparseMatrix([[1, 2, 3], [1, 2], [1]]) == Matrix([
[1, 2, 3],
[1, 2, 0],
[1, 0, 0]])
assert SparseMatrix(4, 4, {(1, 1): sparse_eye(2)}) == Matrix([
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]])
raises(ValueError, lambda: SparseMatrix(1, 1, {(1, 1): 1}))
assert SparseMatrix(1, 2, [1, 2]).tolist() == [[1, 2]]
assert SparseMatrix(2, 2, [1, [2, 3]]).tolist() == [[1, 0], [2, 3]]
raises(ValueError, lambda: SparseMatrix(2, 2, [1]))
raises(ValueError, lambda: SparseMatrix(1, 1, [[1, 2]]))
assert SparseMatrix([.1]).has(Float)
# autosizing
assert SparseMatrix(None, {(0, 1): 0}).shape == (0, 0)
assert SparseMatrix(None, {(0, 1): 1}).shape == (1, 2)
assert SparseMatrix(None, None, {(0, 1): 1}).shape == (1, 2)
raises(ValueError, lambda: SparseMatrix(None, 1, [[1, 2]]))
raises(ValueError, lambda: SparseMatrix(1, None, [[1, 2]]))
raises(ValueError, lambda: SparseMatrix(3, 3, {(0, 0): ones(2), (1, 1): 2}))
# test_determinant
x, y = Symbol('x'), Symbol('y')
assert SparseMatrix(1, 1, [0]).det() == 0
assert SparseMatrix([[1]]).det() == 1
assert SparseMatrix(((-3, 2), (8, -5))).det() == -1
assert SparseMatrix(((x, 1), (y, 2*y))).det() == 2*x*y - y
assert SparseMatrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) )).det() == 1
assert SparseMatrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) )).det() == -289
assert SparseMatrix(( ( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16) )).det() == 0
assert SparseMatrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) )).det() == 275
assert SparseMatrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) )).det() == -55
assert SparseMatrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) )).det() == 11664
assert SparseMatrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) )).det() == 123
# test_slicing
m0 = sparse_eye(4)
assert m0[:3, :3] == sparse_eye(3)
assert m0[2:4, 0:2] == sparse_zeros(2)
m1 = SparseMatrix(3, 3, lambda i, j: i + j)
assert m1[0, :] == SparseMatrix(1, 3, (0, 1, 2))
assert m1[1:3, 1] == SparseMatrix(2, 1, (2, 3))
m2 = SparseMatrix(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
assert m2[:, -1] == SparseMatrix(4, 1, [3, 7, 11, 15])
assert m2[-2:, :] == SparseMatrix([[8, 9, 10, 11], [12, 13, 14, 15]])
assert SparseMatrix([[1, 2], [3, 4]])[[1], [1]] == Matrix([[4]])
# test_submatrix_assignment
m = sparse_zeros(4)
m[2:4, 2:4] = sparse_eye(2)
assert m == SparseMatrix([(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)])
assert len(m._smat) == 2
m[:2, :2] = sparse_eye(2)
assert m == sparse_eye(4)
m[:, 0] = SparseMatrix(4, 1, (1, 2, 3, 4))
assert m == SparseMatrix([(1, 0, 0, 0),
(2, 1, 0, 0),
(3, 0, 1, 0),
(4, 0, 0, 1)])
m[:, :] = sparse_zeros(4)
assert m == sparse_zeros(4)
m[:, :] = ((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16))
assert m == SparseMatrix((( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
m[:2, 0] = [0, 0]
assert m == SparseMatrix((( 0, 2, 3, 4),
( 0, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
# test_reshape
m0 = sparse_eye(3)
assert m0.reshape(1, 9) == SparseMatrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = SparseMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(4, 3) == \
SparseMatrix([(0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)])
assert m1.reshape(2, 6) == \
SparseMatrix([(0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)])
# test_applyfunc
m0 = sparse_eye(3)
assert m0.applyfunc(lambda x: 2*x) == sparse_eye(3)*2
assert m0.applyfunc(lambda x: 0 ) == sparse_zeros(3)
# test__eval_Abs
assert abs(SparseMatrix(((x, 1), (y, 2*y)))) == SparseMatrix(((Abs(x), 1), (Abs(y), 2*Abs(y))))
# test_LUdecomp
testmat = SparseMatrix([[ 0, 2, 5, 3],
[ 3, 3, 7, 4],
[ 8, 4, 0, 2],
[-2, 6, 3, 4]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permute_rows(p, 'backward') - testmat == sparse_zeros(4)
testmat = SparseMatrix([[ 6, -2, 7, 4],
[ 0, 3, 6, 7],
[ 1, -2, 7, 4],
[-9, 2, 6, 3]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permute_rows(p, 'backward') - testmat == sparse_zeros(4)
x, y, z = Symbol('x'), Symbol('y'), Symbol('z')
M = Matrix(((1, x, 1), (2, y, 0), (y, 0, z)))
L, U, p = M.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permute_rows(p, 'backward') - M == sparse_zeros(3)
# test_LUsolve
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
x = SparseMatrix(3, 1, [3, 7, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
A = SparseMatrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
x = SparseMatrix(3, 1, [-1, 2, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
# test_inverse
A = sparse_eye(4)
assert A.inv() == sparse_eye(4)
assert A.inv(method="CH") == sparse_eye(4)
assert A.inv(method="LDL") == sparse_eye(4)
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[7, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method="CH") == Ainv
assert A.inv(method="LDL") == Ainv
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[5, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method="CH") == Ainv
assert A.inv(method="LDL") == Ainv
# test_cross
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.cross(v2) == Matrix(1, 3, [-2, 4, -2])
assert v1.norm(2)**2 == 14
# conjugate
a = SparseMatrix(((1, 2 + I), (3, 4)))
assert a.C == SparseMatrix([
[1, 2 - I],
[3, 4]
])
# mul
assert a*Matrix(2, 2, [1, 0, 0, 1]) == a
assert a + Matrix(2, 2, [1, 1, 1, 1]) == SparseMatrix([
[2, 3 + I],
[4, 5]
])
# col join
assert a.col_join(sparse_eye(2)) == SparseMatrix([
[1, 2 + I],
[3, 4],
[1, 0],
[0, 1]
])
# symmetric
assert not a.is_symmetric(simplify=False)
# test_cofactor
assert sparse_eye(3) == sparse_eye(3).cofactor_matrix()
test = SparseMatrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactor_matrix() == \
SparseMatrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = SparseMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactor_matrix() == \
SparseMatrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
# test_jacobian
x = Symbol('x')
y = Symbol('y')
L = SparseMatrix(1, 2, [x**2*y, 2*y**2 + x*y])
syms = [x, y]
assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])
L = SparseMatrix(1, 2, [x, x**2*y**3])
assert L.jacobian(syms) == SparseMatrix([[1, 0], [2*x*y**3, x**2*3*y**2]])
# test_QR
A = Matrix([[1, 2], [2, 3]])
Q, S = A.QRdecomposition()
R = Rational
assert Q == Matrix([
[ 5**R(-1, 2), (R(2)/5)*(R(1)/5)**R(-1, 2)],
[2*5**R(-1, 2), (-R(1)/5)*(R(1)/5)**R(-1, 2)]])
assert S == Matrix([
[5**R(1, 2), 8*5**R(-1, 2)],
[ 0, (R(1)/5)**R(1, 2)]])
assert Q*S == A
assert Q.T * Q == sparse_eye(2)
R = Rational
# test nullspace
# first test reduced row-ech form
M = SparseMatrix([[5, 7, 2, 1],
[1, 6, 2, -1]])
out, tmp = M.rref()
assert out == Matrix([[1, 0, -R(2)/23, R(13)/23],
[0, 1, R(8)/23, R(-6)/23]])
M = SparseMatrix([[ 1, 3, 0, 2, 6, 3, 1],
[-2, -6, 0, -2, -8, 3, 1],
[ 3, 9, 0, 0, 6, 6, 2],
[-1, -3, 0, 1, 0, 9, 3]])
out, tmp = M.rref()
assert out == Matrix([[1, 3, 0, 0, 2, 0, 0],
[0, 0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 1, R(1)/3],
[0, 0, 0, 0, 0, 0, 0]])
# now check the vectors
basis = M.nullspace()
assert basis[0] == Matrix([-3, 1, 0, 0, 0, 0, 0])
assert basis[1] == Matrix([0, 0, 1, 0, 0, 0, 0])
assert basis[2] == Matrix([-2, 0, 0, -2, 1, 0, 0])
assert basis[3] == Matrix([0, 0, 0, 0, 0, R(-1)/3, 1])
# test eigen
x = Symbol('x')
y = Symbol('y')
sparse_eye3 = sparse_eye(3)
assert sparse_eye3.charpoly(x) == PurePoly(((x - 1)**3))
assert sparse_eye3.charpoly(y) == PurePoly(((y - 1)**3))
# test values
M = Matrix([( 0, 1, -1),
( 1, 1, 0),
(-1, 0, 1)])
vals = M.eigenvals()
assert sorted(vals.keys()) == [-1, 1, 2]
R = Rational
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 3, [
Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])]
M = Matrix([[5, 0, 2],
[3, 2, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 1, [Matrix([R(-1)/2, R(3)/2, 1])]),
(2, 1, [Matrix([0, 1, 0])]),
(5, 1, [Matrix([1, 1, 0])])]
assert M.zeros(3, 5) == SparseMatrix(3, 5, {})
A = SparseMatrix(10, 10, {(0, 0): 18, (0, 9): 12, (1, 4): 18, (2, 7): 16, (3, 9): 12, (4, 2): 19, (5, 7): 16, (6, 2): 12, (9, 7): 18})
assert A.row_list() == [(0, 0, 18), (0, 9, 12), (1, 4, 18), (2, 7, 16), (3, 9, 12), (4, 2, 19), (5, 7, 16), (6, 2, 12), (9, 7, 18)]
assert A.col_list() == [(0, 0, 18), (4, 2, 19), (6, 2, 12), (1, 4, 18), (2, 7, 16), (5, 7, 16), (9, 7, 18), (0, 9, 12), (3, 9, 12)]
assert SparseMatrix.eye(2).nnz() == 2
def test_transpose():
assert SparseMatrix(((1, 2), (3, 4))).transpose() == \
SparseMatrix(((1, 3), (2, 4)))
def test_trace():
assert SparseMatrix(((1, 2), (3, 4))).trace() == 5
assert SparseMatrix(((0, 0), (0, 4))).trace() == 4
def test_CL_RL():
assert SparseMatrix(((1, 2), (3, 4))).row_list() == \
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
assert SparseMatrix(((1, 2), (3, 4))).col_list() == \
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
def test_add():
assert SparseMatrix(((1, 0), (0, 1))) + SparseMatrix(((0, 1), (1, 0))) == \
SparseMatrix(((1, 1), (1, 1)))
a = SparseMatrix(100, 100, lambda i, j: int(j != 0 and i % j == 0))
b = SparseMatrix(100, 100, lambda i, j: int(i != 0 and j % i == 0))
assert (len(a._smat) + len(b._smat) - len((a + b)._smat) > 0)
def test_errors():
raises(ValueError, lambda: SparseMatrix(1.4, 2, lambda i, j: 0))
raises(TypeError, lambda: SparseMatrix([1, 2, 3], [1, 2]))
raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[(1, 2, 3)])
raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[5])
raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2, 3])
raises(TypeError,
lambda: SparseMatrix([[1, 2], [3, 4]]).copyin_list([0, 1], set([])))
raises(
IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2])
raises(TypeError, lambda: SparseMatrix([1, 2, 3]).cross(1))
raises(IndexError, lambda: SparseMatrix(1, 2, [1, 2])[3])
raises(ShapeError,
lambda: SparseMatrix(1, 2, [1, 2]) + SparseMatrix(2, 1, [2, 1]))
def test_len():
assert not SparseMatrix()
assert SparseMatrix() == SparseMatrix([])
assert SparseMatrix() == SparseMatrix([[]])
def test_sparse_zeros_sparse_eye():
assert SparseMatrix.eye(3) == eye(3, cls=SparseMatrix)
assert len(SparseMatrix.eye(3)._smat) == 3
assert SparseMatrix.zeros(3) == zeros(3, cls=SparseMatrix)
assert len(SparseMatrix.zeros(3)._smat) == 0
def test_copyin():
s = SparseMatrix(3, 3, {})
s[1, 0] = 1
assert s[:, 0] == SparseMatrix(Matrix([0, 1, 0]))
assert s[3] == 1
assert s[3: 4] == [1]
s[1, 1] = 42
assert s[1, 1] == 42
assert s[1, 1:] == SparseMatrix([[42, 0]])
s[1, 1:] = Matrix([[5, 6]])
assert s[1, :] == SparseMatrix([[1, 5, 6]])
s[1, 1:] = [[42, 43]]
assert s[1, :] == SparseMatrix([[1, 42, 43]])
s[0, 0] = 17
assert s[:, :1] == SparseMatrix([17, 1, 0])
s[0, 0] = [1, 1, 1]
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = Matrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = SparseMatrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
def test_sparse_solve():
from sympy.matrices import SparseMatrix
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
assert A.cholesky() == Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
assert A.cholesky() * A.cholesky().T == Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L, D = A.LDLdecomposition()
assert 15*L == Matrix([
[15, 0, 0],
[ 9, 15, 0],
[-3, 5, 15]])
assert D == Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
assert L * D * L.T == A
A = SparseMatrix(((3, 0, 2), (0, 0, 1), (1, 2, 0)))
assert A.inv() * A == SparseMatrix(eye(3))
A = SparseMatrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, 0, 2]])
ans = SparseMatrix([
[Rational(2, 3), Rational(1, 3), Rational(1, 6)],
[Rational(1, 3), Rational(2, 3), Rational(1, 3)],
[ 0, 0, S.Half]])
assert A.inv(method='CH') == ans
assert A.inv(method='LDL') == ans
assert A * ans == SparseMatrix(eye(3))
s = A.solve(A[:, 0], 'LDL')
assert A*s == A[:, 0]
s = A.solve(A[:, 0], 'CH')
assert A*s == A[:, 0]
A = A.col_join(A)
s = A.solve_least_squares(A[:, 0], 'CH')
assert A*s == A[:, 0]
s = A.solve_least_squares(A[:, 0], 'LDL')
assert A*s == A[:, 0]
def test_lower_triangular_solve():
a, b, c, d = symbols('a:d')
u, v, w, x = symbols('u:x')
A = SparseMatrix([[a, 0], [c, d]])
B = MutableSparseMatrix([[u, v], [w, x]])
C = ImmutableSparseMatrix([[u, v], [w, x]])
sol = Matrix([[u/a, v/a], [(w - c*u/a)/d, (x - c*v/a)/d]])
assert A.lower_triangular_solve(B) == sol
assert A.lower_triangular_solve(C) == sol
def test_upper_triangular_solve():
a, b, c, d = symbols('a:d')
u, v, w, x = symbols('u:x')
A = SparseMatrix([[a, b], [0, d]])
B = MutableSparseMatrix([[u, v], [w, x]])
C = ImmutableSparseMatrix([[u, v], [w, x]])
sol = Matrix([[(u - b*w/d)/a, (v - b*x/d)/a], [w/d, x/d]])
assert A.upper_triangular_solve(B) == sol
assert A.upper_triangular_solve(C) == sol
def test_diagonal_solve():
a, d = symbols('a d')
u, v, w, x = symbols('u:x')
A = SparseMatrix([[a, 0], [0, d]])
B = MutableSparseMatrix([[u, v], [w, x]])
C = ImmutableSparseMatrix([[u, v], [w, x]])
sol = Matrix([[u/a, v/a], [w/d, x/d]])
assert A.diagonal_solve(B) == sol
assert A.diagonal_solve(C) == sol
def test_hermitian():
x = Symbol('x')
a = SparseMatrix([[0, I], [-I, 0]])
assert a.is_hermitian
a = SparseMatrix([[1, I], [-I, 1]])
assert a.is_hermitian
a[0, 0] = 2*I
assert a.is_hermitian is False
a[0, 0] = x
assert a.is_hermitian is None
a[0, 1] = a[1, 0]*I
assert a.is_hermitian is False
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
Miscellaneous algorithms
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname(os.path.realpath(__file__))
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
'''
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import range
import os
import os.path as op
import nibabel as nb
import numpy as np
from math import floor, ceil
from scipy.ndimage.morphology import grey_dilation
from scipy.special import legendre
import scipy.io as sio
import itertools
import scipy.stats as stats
from nipype import logging
import warnings
from . import metrics as nam
from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File,
InputMultiPath, OutputMultiPath,
BaseInterfaceInputSpec, isdefined,
DynamicTraitedSpec, Undefined)
from nipype.utils.filemanip import fname_presuffix, split_filename
iflogger = logging.getLogger('interface')
class PickAtlasInputSpec(BaseInterfaceInputSpec):
atlas = File(exists=True, desc="Location of the atlas that will be used.",
mandatory=True)
labels = traits.Either(
traits.Int, traits.List(traits.Int),
desc=("Labels of regions that will be included in the mask. Must be\
compatible with the atlas used."),
mandatory=True
)
hemi = traits.Enum(
'both', 'left', 'right',
desc="Restrict the mask to only one hemisphere: left or right",
usedefault=True
)
dilation_size = traits.Int(
usedefault=True,
desc="Defines how much the mask will be dilated (expanded in 3D)."
)
output_file = File(desc="Where to store the output mask.")
class PickAtlasOutputSpec(TraitedSpec):
mask_file = File(exists=True, desc="output mask file")
class PickAtlas(BaseInterface):
"""Returns ROI masks given an atlas and a list of labels. Supports dilation
and left right masking (assuming the atlas is properly aligned).
"""
input_spec = PickAtlasInputSpec
output_spec = PickAtlasOutputSpec
def _run_interface(self, runtime):
nim = self._get_brodmann_area()
nb.save(nim, self._gen_output_filename())
return runtime
def _gen_output_filename(self):
if not isdefined(self.inputs.output_file):
output = fname_presuffix(fname=self.inputs.atlas, suffix="_mask",
newpath=os.getcwd(), use_ext=True)
else:
output = os.path.realpath(self.inputs.output_file)
return output
def _get_brodmann_area(self):
nii = nb.load(self.inputs.atlas)
origdata = nii.get_data()
newdata = np.zeros(origdata.shape)
if not isinstance(self.inputs.labels, list):
labels = [self.inputs.labels]
else:
labels = self.inputs.labels
for lab in labels:
newdata[origdata == lab] = 1
if self.inputs.hemi == 'right':
newdata[int(floor(float(origdata.shape[0]) / 2)):, :, :] = 0
elif self.inputs.hemi == 'left':
newdata[:int(ceil(float(origdata.shape[0]) / 2)), :, :] = 0
if self.inputs.dilation_size != 0:
newdata = grey_dilation(
newdata, (2 * self.inputs.dilation_size + 1,
2 * self.inputs.dilation_size +
1,
2 * self.inputs.dilation_size + 1))
return nb.Nifti1Image(newdata, nii.affine, nii.header)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['mask_file'] = self._gen_output_filename()
return outputs
class SimpleThresholdInputSpec(BaseInterfaceInputSpec):
volumes = InputMultiPath(
File(exists=True), desc='volumes to be thresholded', mandatory=True)
threshold = traits.Float(
desc='volumes to be thresholdedeverything below this value will be set\
to zero',
mandatory=True
)
class SimpleThresholdOutputSpec(TraitedSpec):
thresholded_volumes = OutputMultiPath(
File(exists=True), desc="thresholded volumes")
class SimpleThreshold(BaseInterface):
"""Applies a threshold to input volumes
"""
input_spec = SimpleThresholdInputSpec
output_spec = SimpleThresholdOutputSpec
def _run_interface(self, runtime):
for fname in self.inputs.volumes:
img = nb.load(fname)
data = np.array(img.get_data())
active_map = data > self.inputs.threshold
thresholded_map = np.zeros(data.shape)
thresholded_map[active_map] = data[active_map]
new_img = nb.Nifti1Image(thresholded_map, img.affine, img.header)
_, base, _ = split_filename(fname)
nb.save(new_img, base + '_thresholded.nii')
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["thresholded_volumes"] = []
for fname in self.inputs.volumes:
_, base, _ = split_filename(fname)
outputs["thresholded_volumes"].append(
os.path.abspath(base + '_thresholded.nii'))
return outputs
class ModifyAffineInputSpec(BaseInterfaceInputSpec):
volumes = InputMultiPath(
File(exists=True),
desc='volumes which affine matrices will be modified',
mandatory=True
)
transformation_matrix = traits.Array(
value=np.eye(4),
shape=(4, 4),
desc="transformation matrix that will be left multiplied by the\
affine matrix",
usedefault=True
)
class ModifyAffineOutputSpec(TraitedSpec):
transformed_volumes = OutputMultiPath(File(exist=True))
class ModifyAffine(BaseInterface):
"""Left multiplies the affine matrix with a specified values. Saves the volume
as a nifti file.
"""
input_spec = ModifyAffineInputSpec
output_spec = ModifyAffineOutputSpec
def _gen_output_filename(self, name):
_, base, _ = split_filename(name)
return os.path.abspath(base + "_transformed.nii")
def _run_interface(self, runtime):
for fname in self.inputs.volumes:
img = nb.load(fname)
affine = img.affine
affine = np.dot(self.inputs.transformation_matrix, affine)
nb.save(nb.Nifti1Image(img.get_data(), affine, img.header),
self._gen_output_filename(fname))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['transformed_volumes'] = []
for fname in self.inputs.volumes:
outputs['transformed_volumes'].append(
self._gen_output_filename(fname))
return outputs
class CreateNiftiInputSpec(BaseInterfaceInputSpec):
data_file = File(exists=True, mandatory=True, desc="ANALYZE img file")
header_file = File(
exists=True, mandatory=True, desc="corresponding ANALYZE hdr file")
affine = traits.Array(desc="affine transformation array")
class CreateNiftiOutputSpec(TraitedSpec):
nifti_file = File(exists=True)
class CreateNifti(BaseInterface):
"""Creates a nifti volume
"""
input_spec = CreateNiftiInputSpec
output_spec = CreateNiftiOutputSpec
def _gen_output_file_name(self):
_, base, _ = split_filename(self.inputs.data_file)
return os.path.abspath(base + ".nii")
def _run_interface(self, runtime):
hdr = nb.AnalyzeHeader.from_fileobj(
open(self.inputs.header_file, 'rb'))
if isdefined(self.inputs.affine):
affine = self.inputs.affine
else:
affine = None
data = hdr.data_from_fileobj(open(self.inputs.data_file, 'rb'))
img = nb.Nifti1Image(data, affine, hdr)
nb.save(img, self._gen_output_file_name())
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['nifti_file'] = self._gen_output_file_name()
return outputs
class TSNRInputSpec(BaseInterfaceInputSpec):
in_file = InputMultiPath(File(exists=True), mandatory=True,
desc='realigned 4D file or a list of 3D files')
regress_poly = traits.Range(low=1, desc='Remove polynomials')
tsnr_file = File('tsnr.nii.gz', usedefault=True, hash_files=False,
desc='output tSNR file')
mean_file = File('mean.nii.gz', usedefault=True, hash_files=False,
desc='output mean file')
stddev_file = File('stdev.nii.gz', usedefault=True, hash_files=False,
desc='output tSNR file')
detrended_file = File('detrend.nii.gz', usedefault=True, hash_files=False,
desc='input file after detrending')
class TSNROutputSpec(TraitedSpec):
tsnr_file = File(exists=True, desc='tsnr image file')
mean_file = File(exists=True, desc='mean image file')
stddev_file = File(exists=True, desc='std dev image file')
detrended_file = File(desc='detrended input file')
class TSNR(BaseInterface):
"""Computes the time-course SNR for a time series
Typically you want to run this on a realigned time-series.
Example
-------
>>> tsnr = TSNR()
>>> tsnr.inputs.in_file = 'functional.nii'
>>> res = tsnr.run() # doctest: +SKIP
"""
input_spec = TSNRInputSpec
output_spec = TSNROutputSpec
def _run_interface(self, runtime):
img = nb.load(self.inputs.in_file[0])
header = img.header.copy()
vollist = [nb.load(filename) for filename in self.inputs.in_file]
data = np.concatenate([vol.get_data().reshape(
vol.get_shape()[:3] + (-1,)) for vol in vollist], axis=3)
data = np.nan_to_num(data)
if data.dtype.kind == 'i':
header.set_data_dtype(np.float32)
data = data.astype(np.float32)
if isdefined(self.inputs.regress_poly):
timepoints = img.shape[-1]
X = np.ones((timepoints, 1))
for i in range(self.inputs.regress_poly):
X = np.hstack((X, legendre(
i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
betas = np.dot(np.linalg.pinv(X), np.rollaxis(data, 3, 2))
datahat = np.rollaxis(np.dot(X[:, 1:],
np.rollaxis(
betas[1:, :, :, :], 0, 3)),
0, 4)
data = data - datahat
img = nb.Nifti1Image(data, img.get_affine(), header)
nb.save(img, op.abspath(self.inputs.detrended_file))
meanimg = np.mean(data, axis=3)
stddevimg = np.std(data, axis=3)
tsnr = np.zeros_like(meanimg)
tsnr[stddevimg > 1.e-3] = meanimg[stddevimg > 1.e-3] / stddevimg[stddevimg > 1.e-3]
img = nb.Nifti1Image(tsnr, img.get_affine(), header)
nb.save(img, op.abspath(self.inputs.tsnr_file))
img = nb.Nifti1Image(meanimg, img.get_affine(), header)
nb.save(img, op.abspath(self.inputs.mean_file))
img = nb.Nifti1Image(stddevimg, img.get_affine(), header)
nb.save(img, op.abspath(self.inputs.stddev_file))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
for k in ['tsnr_file', 'mean_file', 'stddev_file']:
outputs[k] = op.abspath(getattr(self.inputs, k))
if isdefined(self.inputs.regress_poly):
outputs['detrended_file'] = op.abspath(self.inputs.detrended_file)
return outputs
class GunzipInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True)
class GunzipOutputSpec(TraitedSpec):
out_file = File(exists=True)
class Gunzip(BaseInterface):
"""Gunzip wrapper
"""
input_spec = GunzipInputSpec
output_spec = GunzipOutputSpec
def _gen_output_file_name(self):
_, base, ext = split_filename(self.inputs.in_file)
if ext[-2:].lower() == ".gz":
ext = ext[:-3]
return os.path.abspath(base + ext[:-3])
def _run_interface(self, runtime):
import gzip
in_file = gzip.open(self.inputs.in_file, 'rb')
out_file = open(self._gen_output_file_name(), 'wb')
out_file.write(in_file.read())
out_file.close()
in_file.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self._gen_output_file_name()
return outputs
def replaceext(in_list, ext):
out_list = list()
for filename in in_list:
path, name, _ = split_filename(op.abspath(filename))
out_name = op.join(path, name) + ext
out_list.append(out_name)
return out_list
def matlab2csv(in_array, name, reshape):
output_array = np.asarray(in_array)
if reshape:
if len(np.shape(output_array)) > 1:
output_array = np.reshape(output_array, (
np.shape(output_array)[0] * np.shape(output_array)[1], 1))
iflogger.info(np.shape(output_array))
output_name = op.abspath(name + '.csv')
np.savetxt(output_name, output_array, delimiter=',')
return output_name
class Matlab2CSVInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True, desc='Input MATLAB .mat file')
reshape_matrix = traits.Bool(
True, usedefault=True,
desc='The output of this interface is meant for R, so matrices will be\
reshaped to vectors by default.'
)
class Matlab2CSVOutputSpec(TraitedSpec):
csv_files = OutputMultiPath(
File(desc='Output CSV files for each variable saved in the input .mat\
file')
)
class Matlab2CSV(BaseInterface):
"""Simple interface to save the components of a MATLAB .mat file as a text
file with comma-separated values (CSVs).
CSV files are easily loaded in R, for use in statistical processing.
For further information, see cran.r-project.org/doc/manuals/R-data.pdf
Example
-------
>>> from nipype.algorithms import misc
>>> mat2csv = misc.Matlab2CSV()
>>> mat2csv.inputs.in_file = 'cmatrix.mat'
>>> mat2csv.run() # doctest: +SKIP
"""
input_spec = Matlab2CSVInputSpec
output_spec = Matlab2CSVOutputSpec
def _run_interface(self, runtime):
in_dict = sio.loadmat(op.abspath(self.inputs.in_file))
# Check if the file has multiple variables in it. If it does, loop
# through them and save them as individual CSV files.
# If not, save the variable as a single CSV file using the input file
# name and a .csv extension.
saved_variables = list()
for key in list(in_dict.keys()):
if not key.startswith('__'):
if isinstance(in_dict[key][0], np.ndarray):
saved_variables.append(key)
else:
iflogger.info('One of the keys in the input file, {k}, is not a Numpy array'.format(k=key))
if len(saved_variables) > 1:
iflogger.info(
'{N} variables found:'.format(N=len(saved_variables)))
iflogger.info(saved_variables)
for variable in saved_variables:
iflogger.info(
'...Converting {var} - type {ty} - to\
CSV'.format(var=variable, ty=type(in_dict[variable]))
)
matlab2csv(
in_dict[variable], variable, self.inputs.reshape_matrix)
elif len(saved_variables) == 1:
_, name, _ = split_filename(self.inputs.in_file)
variable = saved_variables[0]
iflogger.info('Single variable found {var}, type {ty}:'.format(
var=variable, ty=type(in_dict[variable])))
iflogger.info('...Converting {var} to CSV from {f}'.format(
var=variable, f=self.inputs.in_file))
matlab2csv(in_dict[variable], name, self.inputs.reshape_matrix)
else:
iflogger.error('No values in the MATLAB file?!')
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
in_dict = sio.loadmat(op.abspath(self.inputs.in_file))
saved_variables = list()
for key in list(in_dict.keys()):
if not key.startswith('__'):
if isinstance(in_dict[key][0], np.ndarray):
saved_variables.append(key)
else:
iflogger.error('One of the keys in the input file, {k}, is\
not a Numpy array'.format(k=key))
if len(saved_variables) > 1:
outputs['csv_files'] = replaceext(saved_variables, '.csv')
elif len(saved_variables) == 1:
_, name, ext = split_filename(self.inputs.in_file)
outputs['csv_files'] = op.abspath(name + '.csv')
else:
iflogger.error('No values in the MATLAB file?!')
return outputs
def merge_csvs(in_list):
for idx, in_file in enumerate(in_list):
try:
in_array = np.loadtxt(in_file, delimiter=',')
except ValueError as ex:
try:
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1)
except ValueError as ex:
first = open(in_file, 'r')
header_line = first.readline()
header_list = header_line.split(',')
n_cols = len(header_list)
try:
in_array = np.loadtxt(
in_file, delimiter=',', skiprows=1,
usecols=list(range(1, n_cols))
)
except ValueError as ex:
in_array = np.loadtxt(
in_file, delimiter=',', skiprows=1, usecols=list(range(1, n_cols - 1)))
if idx == 0:
out_array = in_array
else:
out_array = np.dstack((out_array, in_array))
out_array = np.squeeze(out_array)
iflogger.info('Final output array shape:')
iflogger.info(np.shape(out_array))
return out_array
def remove_identical_paths(in_files):
import os.path as op
from ..utils.filemanip import split_filename
if len(in_files) > 1:
out_names = list()
commonprefix = op.commonprefix(in_files)
lastslash = commonprefix.rfind('/')
commonpath = commonprefix[0:(lastslash + 1)]
for fileidx, in_file in enumerate(in_files):
path, name, ext = split_filename(in_file)
in_file = op.join(path, name)
name = in_file.replace(commonpath, '')
name = name.replace('_subject_id_', '')
out_names.append(name)
else:
path, name, ext = split_filename(in_files[0])
out_names = [name]
return out_names
def maketypelist(rowheadings, shape, extraheadingBool, extraheading):
typelist = []
if rowheadings:
typelist.append(('heading', 'a40'))
if len(shape) > 1:
for idx in range(1, (min(shape) + 1)):
typelist.append((str(idx), float))
else:
for idx in range(1, (shape[0] + 1)):
typelist.append((str(idx), float))
if extraheadingBool:
typelist.append((extraheading, 'a40'))
iflogger.info(typelist)
return typelist
def makefmtlist(output_array, typelist, rowheadingsBool,
shape, extraheadingBool):
fmtlist = []
if rowheadingsBool:
fmtlist.append('%s')
if len(shape) > 1:
output = np.zeros(max(shape), typelist)
for idx in range(1, min(shape) + 1):
output[str(idx)] = output_array[:, idx - 1]
fmtlist.append('%f')
else:
output = np.zeros(1, typelist)
for idx in range(1, len(output_array) + 1):
output[str(idx)] = output_array[idx - 1]
fmtlist.append('%f')
if extraheadingBool:
fmtlist.append('%s')
fmt = ','.join(fmtlist)
return fmt, output
class MergeCSVFilesInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True,
desc='Input comma-separated value (CSV) files')
out_file = File('merged.csv', usedefault=True,
desc='Output filename for merged CSV file')
column_headings = traits.List(
traits.Str, desc='List of column headings to save in merged CSV file\
(must be equal to number of input files). If left undefined, these\
will be pulled from the input filenames.')
row_headings = traits.List(
traits.Str, desc='List of row headings to save in merged CSV file\
(must be equal to number of rows in the input files).')
row_heading_title = traits.Str(
'label', usedefault=True, desc='Column heading for the row headings\
added')
extra_column_heading = traits.Str(
desc='New heading to add for the added field.')
extra_field = traits.Str(
desc='New field to add to each row. This is useful for saving the\
group or subject ID in the file.')
class MergeCSVFilesOutputSpec(TraitedSpec):
csv_file = File(desc='Output CSV file containing columns ')
class MergeCSVFiles(BaseInterface):
"""This interface is designed to facilitate data loading in the R environment.
It takes input CSV files and merges them into a single CSV file.
If provided, it will also incorporate column heading names into the
resulting CSV file.
CSV files are easily loaded in R, for use in statistical processing.
For further information, see cran.r-project.org/doc/manuals/R-data.pdf
Example
-------
>>> from nipype.algorithms import misc
>>> mat2csv = misc.MergeCSVFiles()
>>> mat2csv.inputs.in_files = ['degree.mat','clustering.mat']
>>> mat2csv.inputs.column_headings = ['degree','clustering']
>>> mat2csv.run() # doctest: +SKIP
"""
input_spec = MergeCSVFilesInputSpec
output_spec = MergeCSVFilesOutputSpec
def _run_interface(self, runtime):
extraheadingBool = False
extraheading = ''
rowheadingsBool = False
"""
This block defines the column headings.
"""
if isdefined(self.inputs.column_headings):
iflogger.info('Column headings have been provided:')
headings = self.inputs.column_headings
else:
iflogger.info(
'Column headings not provided! Pulled from input filenames:')
headings = remove_identical_paths(self.inputs.in_files)
if isdefined(self.inputs.extra_field):
if isdefined(self.inputs.extra_column_heading):
extraheading = self.inputs.extra_column_heading
iflogger.info('Extra column heading provided: {col}'.format(
col=extraheading))
else:
extraheading = 'type'
iflogger.info(
'Extra column heading was not defined. Using "type"')
headings.append(extraheading)
extraheadingBool = True
if len(self.inputs.in_files) == 1:
iflogger.warn('Only one file input!')
if isdefined(self.inputs.row_headings):
iflogger.info('Row headings have been provided. Adding "labels"\
column header.')
prefix = '"{p}","'.format(p=self.inputs.row_heading_title)
csv_headings = prefix + '","'.join(itertools.chain(
headings)) + '"\n'
rowheadingsBool = True
else:
iflogger.info('Row headings have not been provided.')
csv_headings = '"' + '","'.join(itertools.chain(headings)) + '"\n'
iflogger.info('Final Headings:')
iflogger.info(csv_headings)
"""
Next we merge the arrays and define the output text file
"""
output_array = merge_csvs(self.inputs.in_files)
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
file_handle = open(out_file, 'w')
file_handle.write(csv_headings)
shape = np.shape(output_array)
typelist = maketypelist(
rowheadingsBool, shape, extraheadingBool, extraheading)
fmt, output = makefmtlist(
output_array, typelist, rowheadingsBool, shape, extraheadingBool)
if rowheadingsBool:
row_heading_list = self.inputs.row_headings
row_heading_list_with_quotes = []
for row_heading in row_heading_list:
row_heading_with_quotes = '"' + row_heading + '"'
row_heading_list_with_quotes.append(row_heading_with_quotes)
row_headings = np.array(row_heading_list_with_quotes, dtype='|S40')
output['heading'] = row_headings
if isdefined(self.inputs.extra_field):
extrafieldlist = []
if len(shape) > 1:
mx = shape[0]
else:
mx = 1
for idx in range(0, mx):
extrafieldlist.append(self.inputs.extra_field)
iflogger.info(len(extrafieldlist))
output[extraheading] = extrafieldlist
iflogger.info(output)
iflogger.info(fmt)
np.savetxt(file_handle, output, fmt, delimiter=',')
file_handle.close()
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
outputs['csv_file'] = out_file
return outputs
class AddCSVColumnInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='Input comma-separated value (CSV) files')
out_file = File('extra_heading.csv', usedefault=True,
desc='Output filename for merged CSV file')
extra_column_heading = traits.Str(
desc='New heading to add for the added field.')
extra_field = traits.Str(
desc='New field to add to each row. This is useful for saving the\
group or subject ID in the file.')
class AddCSVColumnOutputSpec(TraitedSpec):
csv_file = File(desc='Output CSV file containing columns ')
class AddCSVColumn(BaseInterface):
"""Short interface to add an extra column and field to a text file
Example
-------
>>> from nipype.algorithms import misc
>>> addcol = misc.AddCSVColumn()
>>> addcol.inputs.in_file = 'degree.csv'
>>> addcol.inputs.extra_column_heading = 'group'
>>> addcol.inputs.extra_field = 'male'
>>> addcol.run() # doctest: +SKIP
"""
input_spec = AddCSVColumnInputSpec
output_spec = AddCSVColumnOutputSpec
def _run_interface(self, runtime):
in_file = open(self.inputs.in_file, 'r')
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
out_file = open(out_file, 'w')
firstline = in_file.readline()
firstline = firstline.replace('\n', '')
new_firstline = firstline + ',"' + \
self.inputs.extra_column_heading + '"\n'
out_file.write(new_firstline)
for line in in_file:
new_line = line.replace('\n', '')
new_line = new_line + ',' + self.inputs.extra_field + '\n'
out_file.write(new_line)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.csv':
ext = '.csv'
out_file = op.abspath(name + ext)
outputs['csv_file'] = out_file
return outputs
class AddCSVRowInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
in_file = traits.File(mandatory=True,
desc='Input comma-separated value (CSV) files')
_outputs = traits.Dict(traits.Any, value={}, usedefault=True)
def __setattr__(self, key, value):
if key not in self.copyable_trait_names():
if not isdefined(value):
super(AddCSVRowInputSpec, self).__setattr__(key, value)
self._outputs[key] = value
else:
if key in self._outputs:
self._outputs[key] = value
super(AddCSVRowInputSpec, self).__setattr__(key, value)
class AddCSVRowOutputSpec(TraitedSpec):
csv_file = File(desc='Output CSV file containing rows ')
class AddCSVRow(BaseInterface):
"""Simple interface to add an extra row to a csv file
.. note:: Requires `pandas <http://pandas.pydata.org/>`_
.. warning:: Multi-platform thread-safe execution is possible with
`lockfile <https://pythonhosted.org/lockfile/lockfile.html>`_. Please
recall that (1) this module is alpha software; and (2) it should be
installed for thread-safe writing.
If lockfile is not installed, then the interface is not thread-safe.
Example
-------
>>> from nipype.algorithms import misc
>>> addrow = misc.AddCSVRow()
>>> addrow.inputs.in_file = 'scores.csv'
>>> addrow.inputs.si = 0.74
>>> addrow.inputs.di = 0.93
>>> addrow.inputs.subject_id = 'S400'
>>> addrow.inputs.list_of_values = [ 0.4, 0.7, 0.3 ]
>>> addrow.run() # doctest: +SKIP
"""
input_spec = AddCSVRowInputSpec
output_spec = AddCSVRowOutputSpec
def __init__(self, infields=None, force_run=True, **kwargs):
super(AddCSVRow, self).__init__(**kwargs)
undefined_traits = {}
self._infields = infields
self._have_lock = False
self._lock = None
if infields:
for key in infields:
self.inputs.add_trait(key, traits.Any)
self.inputs._outputs[key] = Undefined
undefined_traits[key] = Undefined
self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
if force_run:
self._always_run = True
def _run_interface(self, runtime):
try:
import pandas as pd
except ImportError:
raise ImportError(('This interface requires pandas '
'(http://pandas.pydata.org/) to run.'))
try:
import lockfile as pl
self._have_lock = True
except ImportError:
from warnings import warn
warn(('Python module lockfile was not found: AddCSVRow will not be'
' thread-safe in multi-processor execution'))
input_dict = {}
for key, val in list(self.inputs._outputs.items()):
# expand lists to several columns
if key == 'trait_added' and val in self.inputs.copyable_trait_names():
continue
if isinstance(val, list):
for i, v in enumerate(val):
input_dict['%s_%d' % (key, i)] = v
else:
input_dict[key] = val
df = pd.DataFrame([input_dict])
if self._have_lock:
self._lock = pl.FileLock(self.inputs.in_file)
# Acquire lock
self._lock.acquire()
if op.exists(self.inputs.in_file):
formerdf = pd.read_csv(self.inputs.in_file, index_col=0)
df = pd.concat([formerdf, df], ignore_index=True)
with open(self.inputs.in_file, 'w') as f:
df.to_csv(f)
if self._have_lock:
self._lock.release()
# Using nipype.external.portalocker this might be something like:
# with pl.Lock(self.inputs.in_file, timeout=1) as fh:
# if op.exists(fh):
# formerdf = pd.read_csv(fh, index_col=0)
# df = pd.concat([formerdf, df], ignore_index=True)
# df.to_csv(fh)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['csv_file'] = self.inputs.in_file
return outputs
def _outputs(self):
return self._add_output_traits(super(AddCSVRow, self)._outputs())
def _add_output_traits(self, base):
return base
class CalculateNormalizedMomentsInputSpec(TraitedSpec):
timeseries_file = File(
exists=True, mandatory=True,
desc='Text file with timeseries in columns and timepoints in rows,\
whitespace separated')
moment = traits.Int(
mandatory=True,
desc="Define which moment should be calculated, 3 for skewness, 4 for\
kurtosis.")
class CalculateNormalizedMomentsOutputSpec(TraitedSpec):
moments = traits.List(traits.Float(), desc='Moments')
class CalculateNormalizedMoments(BaseInterface):
"""Calculates moments of timeseries.
Example
-------
>>> from nipype.algorithms import misc
>>> skew = misc.CalculateNormalizedMoments()
>>> skew.inputs.moment = 3
>>> skew.inputs.timeseries_file = 'timeseries.txt'
>>> skew.run() # doctest: +SKIP
"""
input_spec = CalculateNormalizedMomentsInputSpec
output_spec = CalculateNormalizedMomentsOutputSpec
def _run_interface(self, runtime):
self._moments = calc_moments(
self.inputs.timeseries_file, self.inputs.moment)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['skewness'] = self._moments
return outputs
def calc_moments(timeseries_file, moment):
"""Returns nth moment (3 for skewness, 4 for kurtosis) of timeseries
(list of values; one per timeseries).
Keyword arguments:
timeseries_file -- text file with white space separated timepoints in rows
"""
timeseries = np.genfromtxt(timeseries_file)
m2 = stats.moment(timeseries, 2, axis=0)
m3 = stats.moment(timeseries, moment, axis=0)
zero = (m2 == 0)
return np.where(zero, 0, m3 / m2 ** (moment / 2.0))
class AddNoiseInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='input image that will be corrupted with noise')
in_mask = File(exists=True, desc=('input mask, voxels outside this mask '
'will be considered background'))
snr = traits.Float(10.0, desc='desired output SNR in dB', usedefault=True)
dist = traits.Enum('normal', 'rician', usedefault=True, mandatory=True,
desc=('desired noise distribution'))
bg_dist = traits.Enum('normal', 'rayleigh', usedefault=True, mandatory=True,
desc=('desired noise distribution, currently '
'only normal is implemented'))
out_file = File(desc='desired output filename')
class AddNoiseOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='corrupted image')
class AddNoise(BaseInterface):
"""
Corrupts with noise the input image
Example
-------
>>> from nipype.algorithms.misc import AddNoise
>>> noise = AddNoise()
>>> noise.inputs.in_file = 'T1.nii'
>>> noise.inputs.in_mask = 'mask.nii'
>>> noise.snr = 30.0
>>> noise.run() # doctest: +SKIP
"""
input_spec = AddNoiseInputSpec
output_spec = AddNoiseOutputSpec
def _run_interface(self, runtime):
in_image = nb.load(self.inputs.in_file)
in_data = in_image.get_data()
snr = self.inputs.snr
if isdefined(self.inputs.in_mask):
in_mask = nb.load(self.inputs.in_mask).get_data()
else:
in_mask = np.ones_like(in_data)
result = self.gen_noise(in_data, mask=in_mask, snr_db=snr,
dist=self.inputs.dist, bg_dist=self.inputs.bg_dist)
res_im = nb.Nifti1Image(result, in_image.affine, in_image.header)
res_im.to_filename(self._gen_output_filename())
return runtime
def _gen_output_filename(self):
if not isdefined(self.inputs.out_file):
_, base, ext = split_filename(self.inputs.in_file)
out_file = os.path.abspath('%s_SNR%03.2f%s' % (base, self.inputs.snr, ext))
else:
out_file = self.inputs.out_file
return out_file
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self._gen_output_filename()
return outputs
def gen_noise(self, image, mask=None, snr_db=10.0, dist='normal', bg_dist='normal'):
"""
Generates a copy of an image with a certain amount of
added gaussian noise (rayleigh for background in mask)
"""
from math import sqrt
snr = sqrt(np.power(10.0, snr_db / 10.0))
if mask is None:
mask = np.ones_like(image)
else:
mask[mask > 0] = 1
mask[mask < 1] = 0
if mask.ndim < image.ndim:
mask = np.rollaxis(np.array([mask] * image.shape[3]), 0, 4)
signal = image[mask > 0].reshape(-1)
if dist == 'normal':
signal = signal - signal.mean()
sigma_n = sqrt(signal.var() / snr)
noise = np.random.normal(size=image.shape, scale=sigma_n)
if (np.any(mask == 0)) and (bg_dist == 'rayleigh'):
bg_noise = np.random.rayleigh(size=image.shape, scale=sigma_n)
noise[mask == 0] = bg_noise[mask == 0]
im_noise = image + noise
elif dist == 'rician':
sigma_n = signal.mean() / snr
n_1 = np.random.normal(size=image.shape, scale=sigma_n)
n_2 = np.random.normal(size=image.shape, scale=sigma_n)
stde_1 = n_1 / sqrt(2.0)
stde_2 = n_2 / sqrt(2.0)
im_noise = np.sqrt((image + stde_1)**2 + (stde_2)**2)
else:
raise NotImplementedError(('Only normal and rician distributions '
'are supported'))
return im_noise
class NormalizeProbabilityMapSetInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True, mandatory=True,
desc='The tpms to be normalized'))
in_mask = File(exists=True,
desc='Masked voxels must sum up 1.0, 0.0 otherwise.')
class NormalizeProbabilityMapSetOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc="normalized maps")
class NormalizeProbabilityMapSet(BaseInterface):
""" Returns the input tissue probability maps (tpms, aka volume fractions)
normalized to sum up 1.0 at each voxel within the mask.
.. note:: Please recall this is not a spatial normalization algorithm
Example
-------
>>> from nipype.algorithms import misc
>>> normalize = misc.NormalizeProbabilityMapSet()
>>> normalize.inputs.in_files = [ 'tpm_00.nii.gz', 'tpm_01.nii.gz', \
'tpm_02.nii.gz' ]
>>> normalize.inputs.in_mask = 'tpms_msk.nii.gz'
>>> normalize.run() # doctest: +SKIP
"""
input_spec = NormalizeProbabilityMapSetInputSpec
output_spec = NormalizeProbabilityMapSetOutputSpec
def _run_interface(self, runtime):
mask = None
if isdefined(self.inputs.in_mask):
mask = self.inputs.in_mask
self._out_filenames = normalize_tpms(self.inputs.in_files, mask)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_files'] = self._out_filenames
return outputs
class SplitROIsInputSpec(TraitedSpec):
in_file = File(exists=True, mandatory=True,
desc='file to be splitted')
in_mask = File(exists=True, desc='only process files inside mask')
roi_size = traits.Tuple(traits.Int, traits.Int, traits.Int,
desc='desired ROI size')
class SplitROIsOutputSpec(TraitedSpec):
out_files = OutputMultiPath(File(exists=True),
desc='the resulting ROIs')
out_masks = OutputMultiPath(File(exists=True),
desc='a mask indicating valid values')
out_index = OutputMultiPath(File(exists=True),
desc='arrays keeping original locations')
class SplitROIs(BaseInterface):
"""
Splits a 3D image in small chunks to enable parallel processing.
ROIs keep time series structure in 4D images.
>>> from nipype.algorithms import misc
>>> rois = misc.SplitROIs()
>>> rois.inputs.in_file = 'diffusion.nii'
>>> rois.inputs.in_mask = 'mask.nii'
>>> rois.run() # doctest: +SKIP
"""
input_spec = SplitROIsInputSpec
output_spec = SplitROIsOutputSpec
def _run_interface(self, runtime):
mask = None
roisize = None
self._outnames = {}
if isdefined(self.inputs.in_mask):
mask = self.inputs.in_mask
if isdefined(self.inputs.roi_size):
roisize = self.inputs.roi_size
res = split_rois(self.inputs.in_file,
mask, roisize)
self._outnames['out_files'] = res[0]
self._outnames['out_masks'] = res[1]
self._outnames['out_index'] = res[2]
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
for k, v in self._outnames.items():
outputs[k] = v
return outputs
class MergeROIsInputSpec(TraitedSpec):
in_files = InputMultiPath(File(exists=True, mandatory=True,
desc='files to be re-merged'))
in_index = InputMultiPath(File(exists=True, mandatory=True),
desc='array keeping original locations')
in_reference = File(exists=True, desc='reference file')
class MergeROIsOutputSpec(TraitedSpec):
merged_file = File(exists=True, desc='the recomposed file')
class MergeROIs(BaseInterface):
"""
Splits a 3D image in small chunks to enable parallel processing.
ROIs keep time series structure in 4D images.
Example
-------
>>> from nipype.algorithms import misc
>>> rois = misc.MergeROIs()
>>> rois.inputs.in_files = ['roi%02d.nii' % i for i in range(1, 6)]
>>> rois.inputs.in_reference = 'mask.nii'
>>> rois.inputs.in_index = ['roi%02d_idx.npz' % i for i in range(1, 6)]
>>> rois.run() # doctest: +SKIP
"""
input_spec = MergeROIsInputSpec
output_spec = MergeROIsOutputSpec
def _run_interface(self, runtime):
res = merge_rois(self.inputs.in_files,
self.inputs.in_index,
self.inputs.in_reference)
self._merged = res
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['merged_file'] = self._merged
return outputs
def normalize_tpms(in_files, in_mask=None, out_files=[]):
"""
Returns the input tissue probability maps (tpms, aka volume fractions)
normalized to sum up 1.0 at each voxel within the mask.
"""
import nibabel as nib
import numpy as np
import os.path as op
in_files = np.atleast_1d(in_files).tolist()
if len(out_files) != len(in_files):
for i, finname in enumerate(in_files):
fname, fext = op.splitext(op.basename(finname))
if fext == '.gz':
fname, fext2 = op.splitext(fname)
fext = fext2 + fext
out_file = op.abspath('%s_norm_%02d%s' % (fname, i, fext))
out_files += [out_file]
imgs = [nib.load(fim) for fim in in_files]
if len(in_files) == 1:
img_data = imgs[0].get_data()
img_data[img_data > 0.0] = 1.0
hdr = imgs[0].header.copy()
hdr['data_type'] = 16
hdr.set_data_dtype(np.float32)
nib.save(nib.Nifti1Image(img_data.astype(np.float32), imgs[0].affine,
hdr), out_files[0])
return out_files[0]
img_data = np.array([im.get_data() for im in imgs]).astype(np.float32)
# img_data[img_data>1.0] = 1.0
img_data[img_data < 0.0] = 0.0
weights = np.sum(img_data, axis=0)
msk = np.ones_like(imgs[0].get_data())
msk[weights <= 0] = 0
if in_mask is not None:
msk = nib.load(in_mask).get_data()
msk[msk <= 0] = 0
msk[msk > 0] = 1
msk = np.ma.masked_equal(msk, 0)
for i, out_file in enumerate(out_files):
data = np.ma.masked_equal(img_data[i], 0)
probmap = data / weights
hdr = imgs[i].header.copy()
hdr['data_type'] = 16
hdr.set_data_dtype('float32')
nib.save(nib.Nifti1Image(probmap.astype(np.float32), imgs[i].affine,
hdr), out_file)
return out_files
def split_rois(in_file, mask=None, roishape=None):
"""
Splits an image in ROIs for parallel processing
"""
import nibabel as nb
import numpy as np
from math import sqrt, ceil
import os.path as op
if roishape is None:
roishape = (10, 10, 1)
im = nb.load(in_file)
imshape = im.shape
dshape = imshape[:3]
nvols = imshape[-1]
roisize = roishape[0] * roishape[1] * roishape[2]
droishape = (roishape[0], roishape[1], roishape[2], nvols)
if mask is not None:
mask = nb.load(mask).get_data()
mask[mask > 0] = 1
mask[mask < 1] = 0
else:
mask = np.ones(dshape)
mask = mask.reshape(-1).astype(np.uint8)
nzels = np.nonzero(mask)
els = np.sum(mask)
nrois = int(ceil(els / float(roisize)))
data = im.get_data().reshape((mask.size, -1))
data = np.squeeze(data.take(nzels, axis=0))
nvols = data.shape[-1]
roidefname = op.abspath('onesmask.nii.gz')
nb.Nifti1Image(np.ones(roishape, dtype=np.uint8), None,
None).to_filename(roidefname)
out_files = []
out_mask = []
out_idxs = []
for i in range(nrois):
first = i * roisize
last = (i + 1) * roisize
fill = 0
if last > els:
fill = last - els
last = els
droi = data[first:last, ...]
iname = op.abspath('roi%010d_idx' % i)
out_idxs.append(iname + '.npz')
np.savez(iname, (nzels[0][first:last],))
if fill > 0:
droi = np.vstack((droi, np.zeros((fill, nvols), dtype=np.float32)))
partialmsk = np.ones((roisize,), dtype=np.uint8)
partialmsk[-fill:] = 0
partname = op.abspath('partialmask.nii.gz')
nb.Nifti1Image(partialmsk.reshape(roishape), None,
None).to_filename(partname)
out_mask.append(partname)
else:
out_mask.append(roidefname)
fname = op.abspath('roi%010d.nii.gz' % i)
nb.Nifti1Image(droi.reshape(droishape),
None, None).to_filename(fname)
out_files.append(fname)
return out_files, out_mask, out_idxs
def merge_rois(in_files, in_idxs, in_ref,
dtype=None, out_file=None):
"""
Re-builds an image resulting from a parallelized processing
"""
import nibabel as nb
import numpy as np
import os.path as op
import subprocess as sp
if out_file is None:
out_file = op.abspath('merged.nii.gz')
if dtype is None:
dtype = np.float32
# if file is compressed, uncompress using os
# to avoid memory errors
if op.splitext(in_ref)[1] == '.gz':
try:
iflogger.info('uncompress %i' % in_ref)
sp.check_call(['gunzip', in_ref], stdout=sp.PIPE, shell=True)
in_ref = op.splitext(in_ref)[0]
except:
pass
ref = nb.load(in_ref)
aff = ref.affine
hdr = ref.header.copy()
rsh = ref.shape
del ref
npix = rsh[0] * rsh[1] * rsh[2]
fcdata = nb.load(in_files[0]).get_data()
if fcdata.ndim == 4:
ndirs = fcdata.shape[-1]
else:
ndirs = 1
newshape = (rsh[0], rsh[1], rsh[2], ndirs)
hdr.set_data_dtype(dtype)
hdr.set_xyzt_units('mm', 'sec')
if ndirs < 300:
data = np.zeros((npix, ndirs))
for cname, iname in zip(in_files, in_idxs):
f = np.load(iname)
idxs = np.squeeze(f['arr_0'])
cdata = nb.load(cname).get_data().reshape(-1, ndirs)
nels = len(idxs)
idata = (idxs, )
try:
data[idata, ...] = cdata[0:nels, ...]
except:
print(('Consistency between indexes and chunks was '
'lost: data=%s, chunk=%s') % (str(data.shape),
str(cdata.shape)))
raise
hdr.set_data_shape(newshape)
nb.Nifti1Image(data.reshape(newshape).astype(dtype),
aff, hdr).to_filename(out_file)
else:
hdr.set_data_shape(rsh[:3])
nii = []
for d in range(ndirs):
fname = op.abspath('vol%06d.nii' % d)
nb.Nifti1Image(np.zeros(rsh[:3]), aff, hdr).to_filename(fname)
nii.append(fname)
for cname, iname in zip(in_files, in_idxs):
f = np.load(iname)
idxs = np.squeeze(f['arr_0'])
for d, fname in enumerate(nii):
data = nb.load(fname).get_data().reshape(-1)
cdata = nb.load(cname).get_data().reshape(-1, ndirs)[:, d]
nels = len(idxs)
idata = (idxs, )
data[idata] = cdata[0:nels]
nb.Nifti1Image(data.reshape(rsh[:3]),
aff, hdr).to_filename(fname)
imgs = [nb.load(im) for im in nii]
allim = nb.concat_images(imgs)
allim.to_filename(out_file)
return out_file
# Deprecated interfaces ------------------------------------------------------
class Distance(nam.Distance):
"""Calculates distance between two volumes.
.. deprecated:: 0.10.0
Use :py:class:`nipype.algorithms.metrics.Distance` instead.
"""
def __init__(self, **inputs):
super(nam.Distance, self).__init__(**inputs)
warnings.warn(("This interface has been deprecated since 0.10.0,"
" please use nipype.algorithms.metrics.Distance"),
DeprecationWarning)
class Overlap(nam.Overlap):
"""Calculates various overlap measures between two maps.
.. deprecated:: 0.10.0
Use :py:class:`nipype.algorithms.metrics.Overlap` instead.
"""
def __init__(self, **inputs):
super(nam.Overlap, self).__init__(**inputs)
warnings.warn(("This interface has been deprecated since 0.10.0,"
" please use nipype.algorithms.metrics.Overlap"),
DeprecationWarning)
class FuzzyOverlap(nam.FuzzyOverlap):
"""Calculates various overlap measures between two maps, using a fuzzy
definition.
.. deprecated:: 0.10.0
Use :py:class:`nipype.algorithms.metrics.FuzzyOverlap` instead.
"""
def __init__(self, **inputs):
super(nam.FuzzyOverlap, self).__init__(**inputs)
warnings.warn(("This interface has been deprecated since 0.10.0,"
" please use nipype.algorithms.metrics.FuzzyOverlap"),
DeprecationWarning)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import live_values
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import type_info
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
class Feature(Enum):
"""Constants to use when selecting AutoGraph features."""
ALL = 'Enable all features.'
AUTO_CONTROL_DEPS = (
'Insert of control dependencies in the generated code.')
DECORATORS = (
'Allow decorators in local functions. Note that special decorators, '
' like ag.convert or tf.function are allowed regardless of this toggle.')
LISTS = 'Convert list idioms, like initializers, slices, append, etc.'
def __repr__(self):
return self.name
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
verbose: bool, whether to log the converted code.
strip_decorators: Tuple[Callable], contains decorators that should be in
excluded from the compiled output. By default, when converting a function
before the decorators are applied, the compiled output will include those
decorators.
force_conversion: bool, whether to force convertinng the target entity. When
force_conversion is turned off, the converter may decide to return the
function as-is.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
verbose=False,
strip_decorators=None,
force_conversion=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.verbose = verbose
self.strip_decorators = strip_decorators or ()
self.force_conversion = force_conversion
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def to_ast(self, namespace, internal_convert_user_code=None):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Args:
namespace: Dict[str, Any], the namespace to use when serializing values to
names.
internal_convert_user_code: Optional[bool], allows ovrriding the
corresponding value.
Returns:
ast.Node
"""
template = """
constructor_name(
recursive=recursive_val,
verbose=verbose_val,
strip_decorators=strip_decorators_val,
force_conversion=force_conversion_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def as_qualified_name(o):
name = inspect_utils.getqualifiedname(namespace, o)
if not name:
raise ValueError('Could not locate entity {} in {}'.format(
o, namespace))
return name
def list_of_names(values):
return parser.parse_expression('({})'.format(', '.join(
tuple(as_qualified_name(v) for v in values))))
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.Feature.{}'.format(v)
for v in Feature.__members__
if v in values)))
if internal_convert_user_code is not None:
internal_convert_user_code = self.internal_convert_user_code
expr_ast = templates.replace(
template,
constructor_name=parser.parse_expression(
as_qualified_name(ConversionOptions)),
recursive_val=parser.parse_expression(str(self.recursive)),
verbose_val=parser.parse_expression(str(self.verbose)),
strip_decorators_val=list_of_names(self.strip_decorators),
force_conversion_val=parser.parse_expression(
str(self.force_conversion)),
internal_convert_user_code_val=parser.parse_expression(
str(internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
class ProgramContext(object):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
dependency_cache: Dict[Any, ast.AST], the original entities mapped to their
converted AST
additional_imports: Set[Any], additional entities which for any reason
cannot be attached after loading and need to be explicitly imported in the
generated code
name_map: Dict[str, str], map of original entity name to the name of their
converted counterparts
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
uncompiled_modules: Set[Tuple[str, ...]], with each tuple representing the
fully qualified name of a package containing functions that will not be
compiled.
required_imports: str, containing an import statement on each line. These
are all the imports necessary for the compiled code to run, in addition to
the closures of each entity, which are attached dynamically.
"""
def __init__(
self,
options,
partial_types,
autograph_module,
uncompiled_modules,
):
self.options = options
self.partial_types = partial_types if partial_types else ()
self.autograph_module = autograph_module
self.uncompiled_modules = uncompiled_modules
self.conversion_order = []
self.dependency_cache = {}
self.additional_imports = set()
self.name_map = {}
@property
def required_imports(self):
"""Returns a block containing all imports required by the converted code."""
# TODO(mdan): Check that these don't clobber one another.
return '\n'.join(config.COMPILED_IMPORT_STATEMENTS +
tuple(self.additional_imports))
def new_namer(self, namespace):
return naming.Namer(namespace, self.options.recursive, self.name_map,
self.partial_types)
def update_name_map(self, namer):
"""Updates renamed_calls based on the recent activity from the namer.
Whenever we convert a new entity, any references to other entities are being
renamed to match their soon-to-be-converted counterparts. The namer keeps
track of these renames. When conversion is complete, we copy those renames
so that when those referenced entities are being converted, their new name
matches.
Args:
namer: naming.Namer
Raises:
ValueError: when an entity was renamed twice and to different names.
"""
# TODO(mdan): Have call_trees do this directly.
# This is done so indirectly, via the namer, for historic reasons. But
# now we can have the converter that does the rename record the new name
# as well and skip this step altogether.
for o, name in namer.renamed_calls.items():
if o in self.name_map:
if self.name_map[o] != name:
raise ValueError(
'Calls to %s were converted using multiple names (%s). This is '
'possible when an entity with one of these names already '
'existed. To fix, avoid using any of these names.' %
(o, (name, self.name_map[o])))
else:
self.name_map[o] = name
def add_to_cache(self, original_entity, converted_ast):
self.conversion_order.append(original_entity)
self.dependency_cache[original_entity] = converted_ast
class EntityContext(object):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext
"""
def __init__(self, namer, entity_info, program_ctx):
self.namer = namer
self.info = entity_info
self.program = program_ctx
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx.info)
self.ctx = ctx # Keeping this short because it's used frequently.
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg,
compiler.ast_to_source(other_value).strip(),
compiler.ast_to_source(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
class AgAnno(Enum):
"""Annotation labels specific to AutoGraph. See anno.py."""
DIRECTIVES = 'User directives associated with the annotated statement.'
def __repr__(self):
return self.name
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context.info, None)
node = reaching_definitions.resolve(node, context.info, graphs, AnnotatedDef)
node = liveness.resolve(node, context.info, graphs)
node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
node = type_info.resolve(node, context.info)
# This second call allows resolving first-order class attributes.
node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core functionality for tree_math."""
import functools
import operator
import typing
from typing import Tuple
from jax import tree_util
import jax.numpy as jnp
def _flatten_together(*args):
"""Flatten a collection of pytrees with matching structure/shapes together."""
all_values, all_treedefs = zip(*map(tree_util.tree_flatten, args))
all_treedefs = typing.cast(Tuple[tree_util.PyTreeDef, ...], all_treedefs)
if not all(treedef == all_treedefs[0] for treedef in all_treedefs[1:]):
treedefs_str = " vs ".join(map(str, all_treedefs))
raise ValueError(
f"arguments have different tree structures: {treedefs_str}"
)
all_shapes = [list(map(jnp.shape, values)) for values in all_values]
if not all(shapes == all_shapes[0] for shapes in all_shapes[1:]):
shapes_str = " vs ".join(map(str, all_shapes))
raise ValueError(f"tree leaves have different array shapes: {shapes_str}")
return all_values, all_treedefs[0]
def _argnums_partial(f, args, static_argnums):
def g(*args3):
args3 = list(args3)
for i in static_argnums:
args3.insert(i, args[i])
return f(*args3)
args2 = tuple(x for i, x in enumerate(args) if i not in static_argnums)
return g, args2
def broadcasting_map(func, *args):
"""Like tree_map, but scalar arguments are broadcast to all leaves."""
static_argnums = [i for i, x in enumerate(args) if not isinstance(x, VectorMixin)]
func2, vector_args = _argnums_partial(func, args, static_argnums)
for arg in args:
if not isinstance(arg, VectorMixin):
shape = jnp.shape(arg)
if shape:
raise TypeError(
f"non-tree_math.VectorMixin argument is not a scalar: {arg!r}"
)
if not vector_args:
return func2() # result is a scalar
_flatten_together(*[arg for arg in vector_args]) # check shapes
return tree_util.tree_map(func2, *vector_args)
def _binary_method(func, name):
"""Implement a forward binary method, e.g., __add__."""
def wrapper(self, other):
return broadcasting_map(func, self, other)
wrapper.__name__ = f"__{name}__"
return wrapper
def _reflected_binary_method(func, name):
"""Implement a reflected binary method, e.g., __radd__."""
def wrapper(self, other):
return broadcasting_map(func, other, self)
wrapper.__name__ = f"__r{name}__"
return wrapper
def _numeric_methods(func, name):
"""Implement forward and reflected methods."""
return (_binary_method(func, name), _reflected_binary_method(func, name))
def _unary_method(func, name):
def wrapper(self):
return tree_util.tree_map(func, self)
wrapper.__name__ = f"__{name}__"
return wrapper
def dot(left, right, *, precision="highest"):
"""Dot product between tree math vectors.
Note that unlike jax.numpy.dot, tree_math.dot defaults to full (highest)
precision. This is more useful for numerical algorithms and will be the
default for jax.numpy in the future:
https://github.com/google/jax/pull/7859
Args:
left: left argument.
right: right argument.
precision: precision.
Returns:
Resulting dot product (scalar).
"""
if not isinstance(left, VectorMixin) or not isinstance(right, VectorMixin):
raise TypeError("matmul arguments must both be tree_math.VectorMixin objects")
def _vector_dot(a, b):
return jnp.dot(jnp.ravel(a), jnp.ravel(b), precision=precision)
(left_values, right_values), _ = _flatten_together(left, right)
parts = map(_vector_dot, left_values, right_values)
return functools.reduce(operator.add, parts)
class VectorMixin:
"""A mixin class that adds a 1D vector-like behaviour to any custom pytree class."""
@property
def size(self):
values = tree_util.tree_leaves(self)
return sum(jnp.size(value) for value in values)
def __len__(self):
return self.size
@property
def shape(self):
return (self.size,)
@property
def ndim(self):
return 1
@property
def dtype(self):
values = tree_util.tree_leaves(self)
return jnp.result_type(*values)
# comparison
__lt__ = _binary_method(operator.lt, "lt")
__le__ = _binary_method(operator.le, "le")
__eq__ = _binary_method(operator.eq, "eq")
__ne__ = _binary_method(operator.ne, "ne")
__ge__ = _binary_method(operator.ge, "ge")
__gt__ = _binary_method(operator.gt, "gt")
# arithmetic
__add__, __radd__ = _numeric_methods(operator.add, "add")
__sub__, __rsub__ = _numeric_methods(operator.sub, "sub")
__mul__, __rmul__ = _numeric_methods(operator.mul, "mul")
__truediv__, __rtruediv__ = _numeric_methods(operator.truediv, "truediv")
__floordiv__, __rfloordiv__ = _numeric_methods(operator.floordiv, "floordiv")
__mod__, __rmod__ = _numeric_methods(operator.mod, "mod")
__pow__, __rpow__ = _numeric_methods(operator.pow, "pow")
__matmul__ = __rmatmul__ = dot
# TODO(shoyer): implement this via divmod() on the leaves
def __divmod__(self, other):
return self // other, self % other
def __rdivmod__(self, other):
return other // self, other % self
# bitwise
__lshift__, __rlshift__ = _numeric_methods(operator.lshift, "lshift")
__rshift__, __rrshift__ = _numeric_methods(operator.rshift, "rshift")
__and__, __rand__ = _numeric_methods(operator.and_, "and")
__xor__, __rxor__ = _numeric_methods(operator.xor, "xor")
__or__, __ror__ = _numeric_methods(operator.or_, "or")
# unary methods
__neg__ = _unary_method(operator.neg, "neg")
__pos__ = _unary_method(operator.pos, "pos")
__abs__ = _unary_method(abs, "abs")
__invert__ = _unary_method(operator.invert, "invert")
# numpy methods
conj = _unary_method(jnp.conj, "conj")
dot = dot
real = property(_unary_method(jnp.real, "real"))
imag = property(_unary_method(jnp.imag, "imag"))
def sum(self):
parts = map(jnp.sum, tree_util.tree_leaves(self))
return functools.reduce(operator.add, parts)
def mean(self):
return self.sum() / len(self)
def min(self):
parts = map(jnp.min, tree_util.tree_leaves(self))
return jnp.asarray(list(parts)).min()
def max(self):
parts = map(jnp.max, tree_util.tree_leaves(self))
return jnp.asarray(list(parts)).max()
@tree_util.register_pytree_node_class
class Vector(VectorMixin):
"""A wrapper for treating an arbitrary pytree as a 1D vector."""
def __init__(self, tree):
self._tree = tree
@property
def tree(self):
return self._tree
# TODO(shoyer): consider casting to a common dtype?
def __repr__(self):
return f"tree_math.Vector({self._tree!r})"
def tree_flatten(self):
return (self._tree,), None
@classmethod
def tree_unflatten(cls, _, args):
return cls(*args)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils.version import LooseVersion
from functools import reduce
import operator
from cms import __version__ as CMS_VERSION
from django.core import checks
from django.db import models
from django.db.models.aggregates import Sum
from django.db.models.functions import Coalesce
from django.utils import six
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.translation import ugettext_lazy as _
from polymorphic.managers import PolymorphicManager
from polymorphic.models import PolymorphicModel
from shop import deferred
from shop.conf import app_settings
from shop.exceptions import ProductNotAvailable
class Availability(object):
"""
Contains the currently available quantity for a given product and period.
"""
def __init__(self, **kwargs):
"""
:param earliest:
Point in time from when on this product will be available.
:param latest:
Point in time until this product will be available.
:param quantity:
Number of available items. The type of this value is the same as the type of ``quantity``
in :class:`shop.models.cart.CartItemModel`.
:param sell_short:
If ``True``, sell the product even though it's not in stock. It then will be shipped
at the point in time specified by ``earliest``.
:param limited_offer:
If ``True``, sell the product until the point in time specified by ``latest``. After
that period, the product will not be available anymore.
"""
tzinfo = timezone.get_current_timezone()
self.earliest = kwargs.get('earliest', timezone.datetime.min.replace(tzinfo=tzinfo))
self.latest = kwargs.get('latest', timezone.datetime.max.replace(tzinfo=tzinfo))
quantity = kwargs.get('quantity', app_settings.MAX_PURCHASE_QUANTITY)
self.quantity = min(quantity, app_settings.MAX_PURCHASE_QUANTITY)
self.sell_short = bool(kwargs.get('sell_short', False))
self.limited_offer = bool(kwargs.get('limited_offer', False))
self.inventory = bool(kwargs.get('inventory', None))
class AvailableProductMixin(object):
"""
Add this mixin class to the product models declaration, wanting to keep track on the
current amount of products in stock. In comparison to
:class:`shop.models.product.ReserveProductMixin`, this mixin does not reserve items in pending
carts, with the risk for overselling. It thus is suited for products kept in the cart
for a long period.
The product class must implement a field named ``quantity`` accepting numerical values.
"""
def get_availability(self, request, **kwargs):
"""
Returns the current available quantity for this product.
If other customers have pending carts containing this same product, the quantity
is not not adjusted. This may result in a situation, where someone adds a product
to the cart, but then is unable to purchase, because someone else bought it in the
meantime.
"""
return Availability(quantity=self.quantity)
def deduct_from_stock(self, quantity, **kwargs):
if quantity > self.quantity:
raise ProductNotAvailable(self)
self.quantity -= quantity
self.save(update_fields=['quantity'])
def managed_availability(self):
return True
@classmethod
def check(cls, **kwargs):
from shop.models.cart import CartItemModel
errors = super(AvailableProductMixin, cls).check(**kwargs)
for cart_field in CartItemModel._meta.fields:
if cart_field.attname == 'quantity':
break
else:
msg = "Class `{}` must implement a field named `quantity`."
errors.append(checks.Error(msg.format(CartItemModel.__name__)))
for field in cls._meta.fields:
if field.attname == 'quantity':
if field.get_internal_type() != cart_field.get_internal_type():
msg = "Field `{}.quantity` must be of same type as `{}.quantity`."
errors.append(checks.Error(msg.format(cls.__name__, CartItemModel.__name__)))
break
else:
msg = "Class `{}` must implement a field named `quantity`."
errors.append(checks.Error(msg.format(cls.__name__)))
return errors
class BaseReserveProductMixin(object):
def get_availability(self, request, **kwargs):
"""
Returns the current available quantity for this product.
If other customers have pending carts containing this same product, the quantity
is adjusted accordingly. Therefore make sure to invalidate carts, which were not
converted into an order after a determined period of time. Otherwise the quantity
returned by this function might be considerably lower, than what it could be.
"""
from shop.models.cart import CartItemModel
availability = super(BaseReserveProductMixin, self).get_availability(request, **kwargs)
cart_items = CartItemModel.objects.filter(product=self).values('quantity')
availability.quantity -= cart_items.aggregate(sum=Coalesce(Sum('quantity'), 0))['sum']
return availability
class ReserveProductMixin(BaseReserveProductMixin, AvailableProductMixin):
"""
Add this mixin class to the product models declaration, wanting to keep track on the
current amount of products in stock. In comparison to
:class:`shop.models.product.AvailableProductMixin`, this mixin reserves items in pending
carts, without the risk for overselling. On the other hand, the shop may run out of sellable
items, if customers keep products in the cart for a long period, without proceeding to checkout.
Use this mixin for products kept for a short period until checking out the cart, for
instance for ticket sales. Ensure that pending carts are flushed regularly.
The product class must implement a field named ``quantity`` accepting numerical values.
"""
class BaseProductManager(PolymorphicManager):
"""
A base ModelManager for all non-object manipulation needs, mostly statistics and querying.
"""
def select_lookup(self, search_term):
"""
Returning a queryset containing the products matching the declared lookup fields together
with the given search term. Each product can define its own lookup fields using the
member list or tuple `lookup_fields`.
"""
filter_by_term = (models.Q((sf, search_term)) for sf in self.model.lookup_fields)
queryset = self.get_queryset().filter(reduce(operator.or_, filter_by_term))
return queryset
def indexable(self):
"""
Return a queryset of indexable Products.
"""
queryset = self.get_queryset().filter(active=True)
return queryset
class PolymorphicProductMetaclass(deferred.PolymorphicForeignKeyBuilder):
@classmethod
def perform_meta_model_check(cls, Model):
"""
Perform some safety checks on the ProductModel being created.
"""
if not isinstance(Model.objects, BaseProductManager):
msg = "Class `{}.objects` must provide ModelManager inheriting from BaseProductManager"
raise NotImplementedError(msg.format(Model.__name__))
if not isinstance(getattr(Model, 'lookup_fields', None), (list, tuple)):
msg = "Class `{}` must provide a tuple of `lookup_fields` so that we can easily lookup for Products"
raise NotImplementedError(msg.format(Model.__name__))
if not callable(getattr(Model, 'get_price', None)):
msg = "Class `{}` must provide a method implementing `get_price(request)`"
raise NotImplementedError(msg.format(cls.__name__))
class BaseProduct(six.with_metaclass(PolymorphicProductMetaclass, PolymorphicModel)):
"""
An abstract basic product model for the shop. It is intended to be overridden by one or
more polymorphic models, adding all the fields and relations, required to describe this
type of product.
Some attributes for this class are mandatory. They shall be implemented as property method.
The following fields MUST be implemented by the inheriting class:
``product_name``: Return the pronounced name for this product in its localized language.
Additionally the inheriting class MUST implement the following methods ``get_absolute_url()``
and ``get_price()``. See below for details.
Unless each product variant offers it's own product code, it is strongly recommended to add
a field ``product_code = models.CharField(_("Product code"), max_length=255, unique=True)``
to the class implementing the product.
"""
created_at = models.DateTimeField(
_("Created at"),
auto_now_add=True,
)
updated_at = models.DateTimeField(
_("Updated at"),
auto_now=True,
)
active = models.BooleanField(
_("Active"),
default=True,
help_text=_("Is this product publicly visible."),
)
class Meta:
abstract = True
verbose_name = _("Product")
verbose_name_plural = _("Products")
def product_type(self):
"""
Returns the polymorphic type of the product.
"""
return force_text(self.polymorphic_ctype)
product_type.short_description = _("Product type")
@property
def product_model(self):
"""
Returns the polymorphic model name of the product's class.
"""
return self.polymorphic_ctype.model
def get_absolute_url(self):
"""
Hook for returning the canonical Django URL of this product.
"""
msg = "Method get_absolute_url() must be implemented by subclass: `{}`"
raise NotImplementedError(msg.format(self.__class__.__name__))
def get_price(self, request):
"""
Hook for returning the current price of this product.
The price shall be of type Money. Read the appropriate section on how to create a Money
type for the chosen currency.
Use the `request` object to vary the price according to the logged in user,
its country code or the language.
"""
msg = "Method get_price() must be implemented by subclass: `{}`"
raise NotImplementedError(msg.format(self.__class__.__name__))
def get_product_variant(self, **kwargs):
"""
Hook for returning the variant of a product using parameters passed in by **kwargs.
If the product has no variants, then return the product itself.
:param **kwargs: A dictionary describing the product's variations.
"""
return self
def get_availability(self, request, **kwargs):
"""
Hook for checking the availability of a product.
:param request:
Optionally used to vary the availability according to the logged in user,
its country code or language.
:param **kwargs:
Extra arguments passed to the underlying method. Useful for products with
variations.
:return: An object of type :class:`shop.models.product.Availability`.
"""
return Availability()
def managed_availability(self):
"""
:return True: If this product has its quantity managed by some inventory functionality.
"""
return False
def is_in_cart(self, cart, watched=False, **kwargs):
"""
Checks if the current product is already in the given cart, and if so, returns the
corresponding cart_item.
:param watched (bool): This is used to determine if this check shall only be performed
for the watch-list.
:param **kwargs: Optionally one may pass arbitrary information about the product being looked
up. This can be used to determine if a product with variations shall be considered
equal to the same cart item, resulting in an increase of it's quantity, or if it
shall be considered as a separate cart item, resulting in the creation of a new item.
:returns: The cart item (of type CartItem) containing the product considered as equal to the
current one, or ``None`` if no product matches in the cart.
"""
from shop.models.cart import CartItemModel
cart_item_qs = CartItemModel.objects.filter(cart=cart, product=self)
return cart_item_qs.first()
def deduct_from_stock(self, quantity, **kwargs):
"""
Hook to deduct a number of items of the current product from the stock's inventory.
:param quantity: Number of items to deduct.
:param **kwargs:
Extra arguments passed to the underlying method. Useful for products with
variations.
"""
def get_weight(self):
"""
Optional hook to return the product's gross weight in kg. This information is required to
estimate the shipping costs. The merchants product model shall override this method.
"""
return 0
@classmethod
def check(cls, **kwargs):
errors = super(BaseProduct, cls).check(**kwargs)
try:
cls.product_name
except AttributeError:
msg = "Class `{}` must provide a model field implementing `product_name`"
errors.append(checks.Error(msg.format(cls.__name__)))
return errors
ProductModel = deferred.MaterializedModel(BaseProduct)
class CMSPageReferenceMixin(object):
"""
Products which refer to CMS pages in order to emulate categories, normally need a method for
being accessed directly through a canonical URL. Add this mixin class for adding a
``get_absolute_url()`` method to any to product model.
"""
category_fields = ['cms_pages'] # used by ProductIndex to fill the categories
def get_absolute_url(self):
"""
Return the absolute URL of a product
"""
# sorting by highest level, so that the canonical URL
# associates with the most generic category
if LooseVersion(CMS_VERSION) < LooseVersion('3.5'):
cms_page = self.cms_pages.order_by('depth').last()
else:
cms_page = self.cms_pages.order_by('node__path').last()
if cms_page is None:
return urljoin('/category-not-assigned/', self.slug)
return urljoin(cms_page.get_absolute_url(), self.slug)
|
|
###############################################################################
## fs.py
## 9te [angband.ornl.gov]
## Wed Jan 12 10:37:50 2011
###############################################################################
## Copyright (C) 2008 Oak Ridge National Laboratory, UT-Battelle, LLC.
##---------------------------------------------------------------------------##
## generated by /data/denovo/production/head/setup/bin/pygen built on 20110112
###############################################################################
import os, sys, math, string
# pyspn equation type
from spn_fv import *
print_it = False
##---------------------------------------------------------------------------##
## MAIN
##---------------------------------------------------------------------------##
initialize(sys.argv)
if node() == 0:
print "Denovo - pyspn Python Front-End"
print "-------------------------------"
print "Release : %16s" % (release())
print "Release Date : %16s" % (release_date())
print "Build Date : %16s" % (build_date())
print
timer = Timer()
timer.start()
##---------------------------------------------------------------------------##
## XS DATA
####### UO2 Fuel-Clad Macroscopic Cross Sections ##########
## Transport-corrected Total Cross Sections
T_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_UO2[0] = 1.77949e-1
T_UO2[1] = 3.29805e-1
T_UO2[2] = 4.80388e-1
T_UO2[3] = 5.54367e-1
T_UO2[4] = 3.11801e-1
T_UO2[5] = 3.95168e-1
T_UO2[6] = 5.64406e-1
## Fission Cross Section
F_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
F_UO2[0] = 7.21206e-3
F_UO2[1] = 8.19301e-4
F_UO2[2] = 6.45320e-3
F_UO2[3] = 1.85648e-2
F_UO2[4] = 1.78084e-2
F_UO2[5] = 8.30348e-2
F_UO2[6] = 2.16004e-1
## Nu
N_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
N_UO2[0] = 2.78145
N_UO2[1] = 2.47443
N_UO2[2] = 2.43383
N_UO2[3] = 2.43380
N_UO2[4] = 2.43380
N_UO2[5] = 2.43380
N_UO2[6] = 2.43380
## Chi
C_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
C_UO2[0] = 5.87910e-1
C_UO2[1] = 4.11760e-1
C_UO2[2] = 3.39060e-4
C_UO2[3] = 1.17610e-7
C_UO2[4] = 0.00000000
C_UO2[5] = 0.00000000
C_UO2[6] = 0.00000000
## Scattering Matrix for UO2 Fuel-Clad (Macroscopic)
S_UO2 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]]]
S_UO2[0] = [[1.27537e-1]]
S_UO2[1] = [[4.23780e-2], [3.24456e-1]]
S_UO2[2] = [[9.43740e-6], [1.63140e-3], [4.50940e-1]]
S_UO2[3] = [[5.51630e-9], [3.14270e-9], [2.67920e-3], [4.52565e-1], [1.25250e-4]]
S_UO2[4] = [[0.00000000], [0.00000000], [0.00000000], [5.56640e-3], [2.71401e-1], [1.29680e-3]]
S_UO2[5] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [1.02550e-2], [2.65802e-1], [8.54580e-3]]
S_UO2[6] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [1.00210e-8], [1.68090e-2], [2.73080e-1]]
## Upscattering Matrix
U_UO2 = [ [], [], [], [], [], [], [] ]
U_UO2[0] = []
U_UO2[1] = []
U_UO2[2] = []
U_UO2[3] = [4]
U_UO2[4] = [5]
U_UO2[5] = [6]
U_UO2[6] = []
######## 4.3% MOX Fuel-Clad Macroscopic Cross-Sections ############
## Transport-corrected Total Cross Sections
T_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_MOX43[0] = 1.78731e-1
T_MOX43[1] = 3.30849e-1
T_MOX43[2] = 4.83772e-1
T_MOX43[3] = 5.66922e-1
T_MOX43[4] = 4.26227e-1
T_MOX43[5] = 6.78997e-1
T_MOX43[6] = 6.82852e-1
## Fission Cross-Sections
F_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
F_MOX43[0] = 7.62704e-3
F_MOX43[1] = 8.76898e-4
F_MOX43[2] = 5.69835e-3
F_MOX43[3] = 2.28872e-2
F_MOX43[4] = 1.07635e-2
F_MOX43[5] = 2.32757e-1
F_MOX43[6] = 2.48968e-1
## Nu Cross-Sections
N_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
N_MOX43[0] = 2.85209
N_MOX43[1] = 2.89099
N_MOX43[2] = 2.85486
N_MOX43[3] = 2.86073
N_MOX43[4] = 2.85447
N_MOX43[5] = 2.86415
N_MOX43[6] = 2.86780
## Chi Cross-Sections
C_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
C_MOX43[0] = 5.87910e-1
C_MOX43[1] = 4.11760e-1
C_MOX43[2] = 3.39060e-4
C_MOX43[3] = 1.17610e-7
C_MOX43[4] = 0.00000000
C_MOX43[5] = 0.00000000
C_MOX43[6] = 0.00000000
## Scattering Matrix for 4.3% MOX Fuel-Clad (Macroscopic)
S_MOX43 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]] ]
S_MOX43[0] = [[1.28876e-1]]
S_MOX43[1] = [[4.14130e-2], [3.25452e-1]]
S_MOX43[2] = [[8.22900e-6], [1.63950e-3], [4.53188e-1]]
S_MOX43[3] = [[5.04050e-9], [1.59820e-9], [2.61420e-3], [4.57173e-1], [1.60460e-4]]
S_MOX43[4] = [[0.00000000], [0.00000000], [0.00000000], [5.53940e-3], [2.76814e-1], [2.00510e-3]]
S_MOX43[5] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [9.31270e-3], [2.52962e-1], [8.49480e-3]]
S_MOX43[6] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [9.16560e-9], [1.48500e-2], [2.65007e-1]]
## Upscattering Matrix
U_MOX43 = [ [], [], [], [], [], [], [] ]
U_MOX43[0] = []
U_MOX43[1] = []
U_MOX43[2] = []
U_MOX43[3] = [4]
U_MOX43[4] = [5]
U_MOX43[5] = [6]
U_MOX43[6] = []
############### Moderator 1 Macroscopic Cross-Sections ################
## Transport-corrected Total Cross Section
T_MOD1 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_MOD1[0] = 1.59206e-1
T_MOD1[1] = 4.12970e-1
T_MOD1[2] = 5.90310e-1
T_MOD1[3] = 5.84350e-1
T_MOD1[4] = 7.18000e-1
T_MOD1[5] = 1.25445
T_MOD1[6] = 2.65038
## Scattering Matrix for Moderator (Macroscopic)
S_MOD1 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]] ]
S_MOD1[0] = [[4.44777e-2]]
S_MOD1[1] = [[1.13400e-1], [2.82334e-1]]
S_MOD1[2] = [[7.23470e-4], [1.29940e-1], [3.45256e-1]]
S_MOD1[3] = [[3.74990e-6], [6.23400e-4], [2.24570e-1], [9.10284e-2], [7.14370e-5]]
S_MOD1[4] = [[5.31840e-8], [4.80020e-5], [1.69990e-2], [4.15510e-1], [1.39138e-1], [2.21570e-3]]
S_MOD1[5] = [[0.00000000], [7.44860e-6], [2.64430e-3], [6.37320e-2], [5.11820e-1], [6.99913e-1], [1.32440e-1]]
S_MOD1[6] = [[0.00000000], [1.04550e-6], [5.03440e-4], [1.21390e-2], [6.12290e-2], [5.37320e-1], [2.48070 ]]
## Upscattering Matrix
U_MOD1 = [ [], [], [], [], [], [], [] ]
U_MOD1[0] = []
U_MOD1[1] = []
U_MOD1[2] = []
U_MOD1[3] = [4]
U_MOD1[4] = [5]
U_MOD1[5] = [6]
U_MOD1[6] = []
################### Create nuf vectors
NUF_UO2 = []
NUF_MOX43 = []
for i in range(0, 7):
NUF_UO2.append( N_UO2[i] * F_UO2[i] )
NUF_MOX43.append( N_MOX43[i] * F_MOX43[i] )
##---------------------------------------------------------------------------##
## BUILD MESH
def build_mesh(N):
# vacuum = 0
# UO2 = 1
# MOX = 2
# moderator = 3
# UO2 pins
uo2_pin = Pincell()
uo2_ids = [1]
uo2_r = [0.4759]
uo2_pin.set_shells(uo2_ids, uo2_r, 3)
# MOX pins
mox_pin = Pincell()
mox_ids = [2]
mox_r = [0.4759]
mox_pin.set_shells(mox_ids, mox_r, 3)
# Make a 2x2 uo2 lattice and a 2x2 mox lattice
uo2_lat = Lattice(2)
mox_lat = Lattice(2)
# lattices are uniform
layout = [0, 0, 0, 0]
uo2_lat.set_pins(layout)
mox_lat.set_pins(layout)
# assign the pins in the lattices
uo2_lat.assign_pin(uo2_pin, 0)
mox_lat.assign_pin(mox_pin, 0)
# build the lattice
uo2_lat.build_lattice(N)
mox_lat.build_lattice(N)
# print out mixing tables
if print_it:
print "UO2 Lattice"
for m in xrange(uo2_lat.num_mixtures()):
vf = uo2_lat.f(m)
print "%4i" % (m),
for f in vf:
print "%9.6f" % (f),
print
print "MOX Lattice"
for m in xrange(mox_lat.num_mixtures()):
vf = mox_lat.f(m)
print "%4i" % (m),
for f in vf:
print "%9.6f" % (f),
print
# make the mixtable for the combined lattices by appending the mox table
# to the UO2 table (don't include the clean mixtures at the front of the
# table)
num_mixtures = uo2_lat.num_mixtures() + mox_lat.num_mixtures() - 4
table = Vec_Dbl(num_mixtures * 4)
ctr = 0
mox_offset = uo2_lat.num_mixtures()
# add UO2 mixtures
for m in xrange(uo2_lat.num_mixtures()):
vf = uo2_lat.f(m)
for f in vf:
table[ctr] = f
ctr = ctr + 1
# add MOX mixtures, skipping the clean mixes
for m in xrange(4, mox_lat.num_mixtures()):
vf = mox_lat.f(m)
for f in vf:
table[ctr] = f
ctr = ctr + 1
# make the cleanids
cleanids = [0, 1, 2, 3]
# the total core is 3x3 assemblies (2x2 fuel surrounded by water)
xylat = uo2_lat.xy_planes()
Nr = len(xylat) - 1
delta = Vec_Dbl(Nr, 0.0)
for i in xrange(Nr):
delta[i] = xylat[i+1] - xylat[i]
if Nr % 2 != 0:
print "Non-even lattices cells."
sys.exit(1)
# build the core planes
xycore = Vec_Dbl(int(2.5*Nr) + 1, 0.0)
for n in xrange(2):
for i in xrange(Nr):
index = i + n * Nr
xycore[index + 1] = xycore[index] + delta[i]
for i in xrange(Nr/2):
index = i + 2 * Nr
xycore[index + 1] = xycore[index] + delta[i]
# z-planes (14 in each assembly)
height = 14.28 * 1.5
Nz = 21
z = [0.0] * (Nz + 1)
dz = height / float(Nz)
for k in xrange(Nz):
z[k+1] = z[k] + dz
# get matids for each lattice
uo2ids = Vec_Int(uo2_lat.mixids())
moxids = Vec_Int(mox_lat.mixids())
# update the mox mixtures (leave clean zones alone)
for m in xrange(len(moxids)):
if moxids[m] > 3:
moxids[m] = moxids[m] + mox_offset - 4
# assign the matids
Nx = len(xycore) - 1
Ny = len(xycore) - 1
# arrangement
# |-----|-----|-----|
# | | | |
# | mod | mod | mod |
# | | | |
# |-----|-----|-----|
# | | | |
# | mox | uo2 | mod | y
# | | | |
# |-----|-----|-----|
# | | | |
# | uo2 | mox | mod |
# | | | |
# |-----|-----|-----|
# x
mixids = Vec_Int(Nx * Ny * Nz, 3)
kend = Nz / 2
# (0, 0) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = i + j * Ny + k * Nx * Ny
mixids[cell] = uo2ids[lat_cell]
# (1, 0) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = (i + Nr) + j * Ny + k * Nx * Ny
mixids[cell] = moxids[lat_cell]
# (0, 1) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = i + (j + Nr) * Ny + k * Nx * Ny
mixids[cell] = moxids[lat_cell]
# (1, 1) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = (i + Nr) + (j + Nr) * Ny + k * Nx * Ny
mixids[cell] = uo2ids[lat_cell]
return (xycore, z, mixids, cleanids, table)
##---------------------------------------------------------------------------##
## DB
##---------------------------------------------------------------------------##
entries = {
"problem_type" : "FIXED_SOURCE",
"num_groups" : 7,
"downscatter" : False,
"Pn_order" : 0,
"tolerance" : 1.0e-3,
"max_itr" : 400,
"linear_solver_xml_file" : "azilu01.xml",
"boundary" : "reflect",
"boundary_db" : {"reflect" : [1, 0, 1, 0, 1, 0]},
"SPN_order" : 7
}
db = DB.from_dict(entries)
# decomposition
if nodes() == 1:
db.insert("num_blocks_i", 1)
db.insert("num_blocks_j", 1)
elif nodes() == 2:
db.insert("num_blocks_i", 2)
db.insert("num_blocks_j", 1)
elif nodes() == 16:
db.insert("num_blocks_i", 4)
db.insert("num_blocks_j", 4)
# Mesh
(r, z, mixids, cleanids, table) = build_mesh(10)
db.insert("x_edges", r)
db.insert("y_edges", r)
db.insert("z_edges", z)
##---------------------------------------------------------------------------##
## MANAGER
##---------------------------------------------------------------------------##
# make manager, material, and angles
manager = Manager()
mat = Mat()
# partition the problem
manager.partition(db, mat)
# get mapping and mesh objects
mapp = manager.get_map()
indexer = manager.get_indexer()
mesh = manager.get_mesh()
# global and local cell numbers
Gx = indexer.num_global(X)
Gy = indexer.num_global(Y)
Gz = mesh.num_cells_dim(Z)
Nx = mesh.num_cells_dim(X)
Ny = mesh.num_cells_dim(Y)
Nz = mesh.num_cells_dim(Z)
if node() == 0:
print ">>> Partitioned global mesh with %i x %i x %i cells" \
% (Gx, Gy, Gz)
##---------------------------------------------------------------------------##
## MATERIAL SETUP
##---------------------------------------------------------------------------##
# vacuum = 0
# UO2 = 1
# MOX = 2
# moderator = 3
# set database
xsdb = XS_DB(db)
xsdb.set_num(4)
xsdb.assign_zero(0)
for g in xrange(0, xsdb.num_groups()):
xsdb.assign_upscatter(1, g, T_UO2[g], U_UO2[g], S_UO2[g])
xsdb.assign_upscatter(2, g, T_MOX43[g], U_MOX43[g], S_MOX43[g])
xsdb.assign_upscatter(3, g, T_MOD1[g], U_MOD1[g], S_MOD1[g])
## Assign fission data
xsdb.assign_fission(1, NUF_UO2, C_UO2)
xsdb.assign_fission(2, NUF_MOX43, C_MOX43)
# make macro mixer
mixer = Macro_Mixer(xsdb)
mixer.set(cleanids, table)
# make the material database
mixer.mix_with_global_ids(mixids, mat)
##---------------------------------------------------------------------------##
## ENERGY PARTITIONING
##---------------------------------------------------------------------------##
manager.partition_energy(mat)
##---------------------------------------------------------------------------##
## SOURCE SETUP
##---------------------------------------------------------------------------##
# allocate source and problem state
source = Isotropic_Source()
manager.setup(source)
total = Gx * Gy * Gz
Ng = mat.num_groups()
srcids = Vec_Int(total, 0)
srcstr = Vec_Dbl(total, 0.0)
num_shapes = 2
shapes = Vec_Dbl(2 * mat.num_groups(), 0.0)
chi0 = xsdb.fission_data(1, 0, CHI)
chi1 = xsdb.fission_data(2, 0, CHI)
# source 0 spectrum -> UO2 Chi
# source 1 spectrum -> MOX Chi
# make shapes
ctr = 0
for g in xrange(Ng):
shapes[ctr] = xsdb.fission_data(1, g, CHI)
ctr += 1
for g in xrange(Ng):
shapes[ctr] = xsdb.fission_data(2, g, CHI)
ctr += 1
# assign ids and strengths
for cell in xrange(total):
matid = mixids[cell]
if mat.assigned_fission(matid):
for g in xrange(Ng):
srcstr[cell] += mat.fission_data(matid, g, NU_SIGMA_F)
if mat.fission_data(matid, 0, CHI) == chi1:
srcids[cell] = 1
# set the source
source.set(num_shapes, shapes, srcids, srcstr)
##---------------------------------------------------------------------------##
## SOLVE
##---------------------------------------------------------------------------##
if node() == 0:
print ">>> Setup complete"
print ">>> Solving with %s differencing" % (manager.spatial_descriptor())
# solve the problem
manager.solve(source)
##---------------------------------------------------------------------------##
## OUTPUT
##---------------------------------------------------------------------------##
# make SILO output
silo = SILO()
silo.add_mixer(mixer)
silo.open("fs")
phi = Vec_Dbl(mesh.num_cells(), 0.0)
for g in xrange(Ng):
flux = manager.moments(g)
for cell in xrange(mesh.num_cells()):
phi[cell] = phi[cell] + flux.scalar_flux(cell)
silo.add("phi", phi)
silo.close()
##---------------------------------------------------------------------------##
## TIMING
##---------------------------------------------------------------------------##
# output final database (has class-dependent defaults)
db.output()
timer.stop()
time = timer.wall_clock()
keys = timer_keys()
if len(keys) > 0 and node() == 0:
print "\n"
print "TIMING : Problem ran in %16.6e seconds." % (time)
print "------------------------------------------------------------------"
for key in keys:
print "%30s : %16.6e %16.6e" % (key, timer_value(key) / time, timer_value(key))
print "------------------------------------------------------------------"
##---------------------------------------------------------------------------##
manager.close()
finalize()
###############################################################################
## end of fs.py
###############################################################################
|
|
# -*- coding: utf-8 -*-
from flask import Blueprint, jsonify, request
from pns.app import app
from pns.forms import CreateChannelForm
from pns.models import db, Channel, User, Alert
from pns.json_schemas import registration_schema
channel = Blueprint('channel', __name__)
@channel.route('/channels', methods=['POST'])
def create_channel():
"""
@api {post} /channels Create Channel
@apiVersion 1.0.0
@apiName CreateChannel
@apiGroup Channel
@apiParam {String {..255}} name Channel name
@apiParam {String} [description] Channel description
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Object} message.channel Channel object
"""
form = CreateChannelForm()
if not form.validate_on_submit():
return jsonify(success=False, message=form.errors), 400
name = request.values.get('name')
description = request.values.get('description', None)
channel_obj = Channel.query.filter_by(name=name).first()
if channel_obj:
return jsonify(success=True, message={'channel': channel_obj.to_dict()})
channel_obj = Channel()
channel_obj.name = name
if description:
channel_obj.description = description
db.session.add(channel_obj)
try:
db.session.commit()
return jsonify(success=True, message={'channel': channel_obj.to_dict()})
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return jsonify(success=False), 500
@channel.route('/channels', methods=['GET'])
def list_channels():
"""
@api {get} /channels List Channels
@apiVersion 1.0.0
@apiName ListChannels
@apiGroup Channel
@apiParam {Number} offset=1
@apiParam {Number} limit=20
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Array} message.channels Channel object array
@apiSuccess {Number} message.total_pages Total number of available pages
@apiSuccess {Number} message.current_page Current page number
@apiSuccess {Boolean} message.has_next Next page available flag
"""
try:
offset = int(request.values.get('offset', 1))
limit = int(request.values.get('limit', 20))
except ValueError:
offset = 1
limit = 20
query = (Channel
.query
.order_by(Channel.created_at.desc())
.paginate(page=offset, per_page=limit, error_out=False))
channels = [channel_obj.to_dict() for channel_obj in query.items]
return jsonify(success=True, message={'channels': channels,
'total_pages': query.pages,
'current_page': offset,
'has_next': query.has_next})
@channel.route('/channels/<int:channel_id>', methods=['GET'])
def get_channel(channel_id):
"""
@api {get} /channels/:channel_id Get Unique Channel
@apiVersion 1.0.0
@apiName GetChannel
@apiGroup Channel
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Array} message.channel Channel object array
"""
channel_obj = Channel.query.get(channel_id)
if not channel_obj:
return jsonify(success=False, message='not found'), 404
return jsonify(success=True,
message={'channel': channel_obj.to_dict()})
@channel.route('/channels/<int:channel_id>', methods=['DELETE'])
def delete_channel(channel_id):
"""
@api {delete} /channels/:channel_id Delete Channel
@apiVersion 1.0.0
@apiName DeleteChannel
@apiGroup Channel
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Object} message.channel Channel object
"""
channel_obj = Channel.query.get(channel_id)
if not channel_obj:
return jsonify(success=False, message='not found'), 404
db.session.delete(channel_obj)
try:
db.session.commit()
return jsonify(success=True,
message={'channel': channel_obj.to_dict()})
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return jsonify(success=False), 500
@channel.route('/channels/<int:channel_id>', methods=['PUT'])
def edit_channel(channel_id):
"""
@api {put} /channels/:channel_id Update Channel
@apiVersion 1.0.0
@apiName UpdateChannel
@apiGroup Channel
@apiParam {String {..255}} [name] Channel name
@apiParam {String} [description] Channel description
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Object} message.channel Channel object
"""
form = CreateChannelForm()
if not form.validate_on_submit():
return jsonify(success=False, message=form.errors), 400
name = request.values.get('name')
description = request.values.get('description', None)
channel_obj = Channel.query.get(channel_id)
if not channel_obj:
return jsonify(success=False, message='not found'), 404
channel_obj.name = name
if description:
channel_obj.description = description
db.session.add(channel_obj)
try:
db.session.commit()
return jsonify(success=True, message={'channel': channel_obj.to_dict()})
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return jsonify(success=False), 500
@channel.route('/channels/<int:channel_id>/members', methods=['POST'])
def register_user(channel_id):
"""
@api {post} /channels/:channel_id/members Subscribe to Channel
@apiVersion 1.0.0
@apiName RegisterUserToChannel
@apiGroup Channel
@apiParam {Array} pns_id Recipients list. Array elements correspond to `pns_id`
@apiParamExample {json} Request-Example:
{
'pns_id': ['alex@example.com', 'neil@example.com']
}
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Object} message.channel Channel object
@apiSuccess {Object} message.users Users object array
"""
json_req = request.get_json(force=True)
try:
registration_schema.validate(json_req)
except Exception as ex:
return jsonify(success=False, message={'error': str(ex)}), 400
pns_id_list = [pns_id.strip() for pns_id in json_req['pns_id']]
channel_obj = Channel.query.get(channel_id)
if not channel_obj:
return jsonify(success=False, message='not found'), 404
users = User.query.filter(User.pns_id.in_(pns_id_list)).all()
for user in users:
channel_obj.subscribe_user(user)
return jsonify(success=True, message={'channel': channel_obj.to_dict(),
'users': [user.to_dict() for user in users]})
@channel.route('/channels/<int:channel_id>/members', methods=['GET'])
def list_channel_members(channel_id):
"""
@api {get} /channels/:channel_id/members List Channel Members
@apiVersion 1.0.0
@apiName GetChannelMembers
@apiGroup Channel
@apiParam {Number} offset=1
@apiParam {Number} limit=20
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Array} message.users User object array
@apiSuccess {Number} message.total_pages Total number of available pages
@apiSuccess {Number} message.current_page Current page number
@apiSuccess {Boolean} message.has_next Next page available flag
"""
try:
offset = int(request.values.get('offset', 1))
limit = int(request.values.get('limit', 20))
except ValueError:
offset = 1
limit = 20
channel_obj = Channel.query.get(channel_id)
if not channel_obj:
return jsonify(success=False, message='not found'), 404
query = (channel_obj
.subscribers
.paginate(page=offset, per_page=limit, error_out=False))
users = [user.to_dict() for user in query.items]
return jsonify(success=True, message={'users': users,
'total_pages': query.pages,
'current_page': offset,
'has_next': query.has_next})
@channel.route('/channels/<int:channel_id>/members/<pns_id>', methods=['DELETE'])
def unregister_user(channel_id, pns_id):
"""
@api {delete} /channels/:channel_id/members/:pns_id Unsubscribe from Channel
@apiVersion 1.0.0
@apiName UnregisterUser
@apiGroup Channel
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Object} message.channel Channel object
@apiSuccess {Object} message.user User object
"""
user_obj = User.query.filter_by(pns_id=pns_id).first()
channel_obj = Channel.query.get(channel_id)
if not user_obj or not channel_obj:
return jsonify(success=False, message='not found'), 404
if channel_obj.unsubscribe_user(user_obj):
return jsonify(success=True, message={'channel': channel_obj.to_dict(),
'user': user_obj.to_dict()})
else:
return jsonify(success=False), 500
@channel.route('/channels/<int:channel_id>/alerts', methods=['GET'])
def list_channel_alerts(channel_id):
"""
@api {get} /channels/:channel_id/alerts List Channel Alerts
@apiVersion 1.0.0
@apiName GetChannelAlerts
@apiGroup Channel
@apiParam {Number} offset=1
@apiParam {Number} limit=20
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Array} message.users User object array
@apiSuccess {Number} message.total_pages Total number of available pages
@apiSuccess {Number} message.current_page Current page number
@apiSuccess {Boolean} message.has_next Next page available flag
"""
try:
offset = int(request.values.get('offset', 1))
limit = int(request.values.get('limit', 20))
except ValueError:
offset = 1
limit = 20
channel_obj = Channel.query.get(channel_id)
if not channel_obj:
return jsonify(success=False, message='not found'), 404
query = (channel_obj
.alerts
.order_by(Alert.created_at.desc())
.paginate(page=offset, per_page=limit, error_out=False))
alerts = [alert.to_dict() for alert in query.items]
return jsonify(success=True, message={'alerts': alerts,
'total_pages': query.pages,
'current_page': offset,
'has_next': query.has_next})
|
|
import Queue
import threading
import time
import struct
import serial
import colorsys
import paho.mqtt.client as mqtt
DEVICES = {
"OVERHEAD": 0,
"SLEEP": 1,
"BED_LEFT": 2,
"BED_CENTER": 3,
"BED_RIGHT": 4
}
NUM_LEDS = {
"OVERHEAD": 20,
"SLEEP": 144,
"BED_LEFT": 18,
"BED_CENTER": 14,
"BED_RIGHT": 18
}
ColorTemperature = {
"Candle": [0xFF, 0x93, 0x29],
"Tungsten40W": [0xFF, 0xC5, 0x8F],
"Tungsten100W": [0xFF, 0xD6, 0xAA],
"Halogen": [0xFF, 0xF1, 0xE0],
"CarbonArc": [0xFF, 0xFA, 0xF4],
"HighNoonSun": [0xFF, 0xFF, 0xFB],
"DirectSunlight": [0xFF, 0xFF, 0xFF],
"OvercastSky": [0xC9, 0xE2, 0xFF],
"ClearBlueSky": [0x40, 0x9C, 0xFF],
"WarmFluorescent": [0xFF, 0xF4, 0xE5],
"StandardFluorescent": [0xF4, 0xFF, 0xFA],
"CoolWhiteFluorescent": [0xD4, 0xEB, 0xFF],
"FullSpectrumFluorescent": [0xFF, 0xF4, 0xF2],
"GrowLightFluorescent": [0xFF, 0xEF, 0xF7],
"BlackLightFluorescent": [0xA7, 0x00, 0xFF],
"MercuryVapor": [0xD8, 0xF7, 0xFF],
"SodiumVapor": [0xFF, 0xD1, 0xB2],
"MetalHalide": [0xF2, 0xFC, 0xFF],
"HighPressureSodium": [0xFF, 0xB7, 0x4C],
"UncorrectedTemperature": [0xFF, 0xFF, 0xFF]
}
sendQueue = Queue.Queue()
class ReceiveThread(threading.Thread):
def __init__(self, ser, mqclient):
threading.Thread.__init__(self)
self.setDaemon(True)
self._ser = ser
self._mqclient = mqclient
def run(self):
while True:
line = self._ser.readline().strip()
self._mqclient.publish("ansiroom/bed/plant", line)
class SendThread(threading.Thread):
def __init__(self, queue, ser):
threading.Thread.__init__(self)
self.setDaemon(True)
self._queue = queue
self._ser = ser
def run(self):
while True:
s = self._queue.get()
#print ":".join("{:02x}".format(ord(c)) for c in s)
for i in s:
self._ser.write(i)
self._ser.flush()
time.sleep(0.05)
self._queue.task_done()
#print "DONE"
time.sleep(0.5)
def on_connect(client, userdata, rc):
print("Connected with result code "+str(rc))
client.subscribe("ansiroom/bedlight/#")
def on_message(client, userdata, msg):
print "Mq Received on channel %s -> %s" % (msg.topic, msg.payload)
parts = msg.topic.split("/")
device = parts[2]
command = parts[3]
val = msg.payload
print " Device: %s Command %s Value %s" % ( device, command, val )
if command == "fire":
if device == "sleep":
Fire(DEVICES['SLEEP'], int(val))
if device == "center":
Fire(DEVICES['BED_CENTER'], int(val))
if command == "sunrise":
if device == "sleep":
Sunrise(DEVICES['SLEEP'], 1, int(val))
if command == "colour":
values = val.split(",")
c = []
for i in range(0, len(values), 3):
a = []
a.append(int(values[i+0]))
a.append(int(values[i+1]))
a.append(int(values[i+2]))
c.append(a)
if device == "sleep":
RGB(DEVICES['SLEEP'], c)
if device == "overhead":
RGB(DEVICES['OVERHEAD'], c)
if device == "center":
RGB(DEVICES['BED_CENTER'], c)
if device == "left":
RGB(DEVICES['BED_LEFT'], c)
if device == "right":
RGB(DEVICES['BED_RIGHT'], c)
def RGB(device, values):
if device > 4 or device < 0:
return
s = ''
s += struct.pack("!BB", 0, device)
for i in values:
s += struct.pack('!{0}B'.format(len(i)), * i)
s += ";"
sendQueue.put(s)
def HSV(device, values):
if device > 4 or device < 0:
return
s = ''
s += struct.pack("!BB", 1, device)
for i in values:
s += struct.pack('!{0}B'.format(len(i)), *i)
s += ";"
sendQueue.put(s)
def fill(size, val):
values = []
for i in range(size):
values.append(val)
return values
def Fire(device, intensity):
if intensity < 1:
s = struct.pack('!BBB', 2, device, 0)
s += ";"
sendQueue.put(s)
RGB(device, fill(20, [0, 0, 0]))
else:
s = struct.pack('!BBB', 2, device, intensity)
s += ";"
sendQueue.put(s)
def Sunrise(device, num, percentage):
if device > 2 or device < 0:
return
if percentage < 0:
RGB(device, fill(num, [0, 0, 0]))
return
if percentage > 99:
RGB(device, fill(num, [255, 255, 255]))
else:
h = ( (250.0 + (85.0 / 100.0) * percentage) % 256 )
s = 255.0 - ( (51.0 / 2000.0) * (percentage * percentage) )
v = ( (255.0 / 100.0) * percentage )
a, b, c = colorsys.hsv_to_rgb( (h / 255.0), (s / 255.0), (v / 255.0) )
RGB(device, fill(num, [int(a * 255),int(b * 255),int(c * 255)]))
if __name__ == "__main__":
ser = serial.Serial("/dev/ttyUSB0", 9600)
mqclient = mqtt.Client("ansibed", clean_session=True)
mqclient.on_connect = on_connect
mqclient.on_message = on_message
mqclient.connect("cortex", 1883, 60)
receive = ReceiveThread(ser, mqclient)
receive.start()
send = SendThread(sendQueue, ser)
send.start()
Fire(DEVICES['SLEEP'], 0)
Fire(DEVICES['BED_CENTER'], 0)
RGB(DEVICES['OVERHEAD'], [[0,0,0]])
RGB(DEVICES['SLEEP'], [[0,0,0]])
RGB(DEVICES['BED_LEFT'], [[0,0,0]])
RGB(DEVICES['BED_CENTER'], [[0,0,0]])
RGB(DEVICES['BED_RIGHT'], [[0,0,0]])
mqclient.loop_forever()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import email
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
with open(mailmap, 'r') as fp:
for l in fp:
try:
canonical_email, alias = re.match(
r'[^#]*?(<.+>).*(<.+>).*', l).groups()
except AttributeError:
continue
mapping[alias] = canonical_email
return mapping
def _parse_git_mailmap(git_dir, mailmap='.mailmap'):
mailmap = os.path.join(os.path.dirname(git_dir), mailmap)
return parse_mailmap(mailmap)
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email_address in mapping.iteritems():
changelog = changelog.replace(alias, email_address)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
with open(requirements_file, 'r') as fil:
return fil.read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def _run_shell_command(cmd, throw_on_error=False):
if os.name == 'nt':
output = subprocess.Popen(["cmd.exe", "/C", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = output.communicate()
if output.returncode and throw_on_error:
raise Exception("%s returned %d" % cmd, output.returncode)
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_directory():
parent_dir = os.path.dirname(__file__)
while True:
git_dir = os.path.join(parent_dir, '.git')
if os.path.exists(git_dir):
return git_dir
parent_dir, child = os.path.split(parent_dir)
if not child: # reached to root dir
return None
def write_git_changelog():
"""Write a changelog based on the git changelog."""
new_changelog = 'ChangeLog'
git_dir = _get_git_directory()
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
if git_dir:
git_log_cmd = 'git --git-dir=%s log' % git_dir
changelog = _run_shell_command(git_log_cmd)
mailmap = _parse_git_mailmap(git_dir)
with open(new_changelog, "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
else:
open(new_changelog, 'w').close()
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.(openstack|stackforge).org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
git_dir = _get_git_directory()
if not os.getenv('SKIP_GENERATE_AUTHORS'):
if git_dir:
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git --git-dir=" + git_dir +
" log --format='%aN <%aE>' | sort -u | "
"egrep -v '" + jenkins_email + "'")
changelog = _run_shell_command(git_log_cmd)
mailmap = _parse_git_mailmap(git_dir)
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
else:
open(new_authors, 'w').close()
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
builders = ['html', 'man']
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in self.builders:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
class LocalBuildLatex(LocalBuildDoc):
builders = ['latex']
cmdclass['build_sphinx'] = LocalBuildDoc
cmdclass['build_sphinx_latex'] = LocalBuildLatex
except ImportError:
pass
return cmdclass
def _get_revno(git_dir):
"""Return the number of commits since the most recent tag.
We use git-describe to find this out, but if there are no
tags then we fall back to counting commits since the beginning
of time.
"""
describe = _run_shell_command(
"git --git-dir=%s describe --always" % git_dir)
if "-" in describe:
return describe.rsplit("-", 2)[-2]
# no tags found
revlist = _run_shell_command(
"git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir)
return len(revlist.splitlines())
def _get_version_from_git(pre_version):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
git_dir = _get_git_directory()
if git_dir:
if pre_version:
try:
return _run_shell_command(
"git --git-dir=" + git_dir + " describe --exact-match",
throw_on_error=True).replace('-', '.')
except Exception:
sha = _run_shell_command(
"git --git-dir=" + git_dir + " log -n1 --pretty=format:%h")
return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha)
else:
return _run_shell_command(
"git --git-dir=" + git_dir + " describe --always").replace(
'-', '.')
return None
def _get_version_from_pkg_info(package_name):
"""Get the version from PKG-INFO file if we can."""
try:
pkg_info_file = open('PKG-INFO', 'r')
except (IOError, OSError):
return None
try:
pkg_info = email.message_from_file(pkg_info_file)
except email.MessageError:
return None
# Check to make sure we're in our own dir
if pkg_info.get('Name', None) != package_name:
return None
return pkg_info.get('Version', None)
def get_version(package_name, pre_version=None):
"""Get the version of the project. First, try getting it from PKG-INFO, if
it exists. If it does, that means we're in a distribution tarball or that
install has happened. Otherwise, if there is no PKG-INFO file, pull the
version from git.
We do not support setup.py version sanity in git archive tarballs, nor do
we support packagers directly sucking our git repo into theirs. We expect
that a source tarball be made from our git repo - or that if someone wants
to make a source tarball from a fork of our repo with additional tags in it
that they understand and desire the results of doing that.
"""
version = os.environ.get("OSLO_PACKAGE_VERSION", None)
if version:
return version
version = "2013.1" #_get_version_from_pkg_info(package_name)
if version:
return version
version = _get_version_from_git(pre_version)
if version:
return version
raise Exception("Versioning for this project requires either an sdist"
" tarball, or access to an upstream git repository.")
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import fixture as utils_fixture
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client as api_client
from nova.tests.functional import integrated_helpers
from nova.tests.unit.api.openstack.compute import test_services
from nova.tests.unit import fake_crypto
from nova.tests.unit import fake_notifier
import nova.tests.unit.image.fake
CONF = cfg.CONF
class NotificationSampleTestBase(test.TestCase,
integrated_helpers.InstanceHelperMixin):
"""Base class for notification sample testing.
To add tests for a versioned notification you have to store a sample file
under doc/notification_sample directory. In the test method in the subclass
trigger a change in the system that expected to generate the notification
then use the _verify_notification function to assert if the stored sample
matches with the generated one.
If the notification has different payload content depending on the state
change you triggered then the replacements parameter of the
_verify_notification function can be used to override values coming from
the sample file.
Check nova.functional.notification_sample_tests.test_service_update as an
example.
"""
ANY = object()
REQUIRES_LOCKING = True
# NOTE(gibi): Notification payloads always reflect the data needed
# for every supported API microversion so we can safe to use the latest
# API version in the tests. This helps the test to use the new API
# features too. This can be overridden by subclasses that need to cap
# at a specific microversion for older APIs.
MAX_MICROVERSION = 'latest'
def setUp(self):
super(NotificationSampleTestBase, self).setUp()
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
self.admin_api = api_fixture.admin_api
max_version = self.MAX_MICROVERSION
self.api.microversion = max_version
self.admin_api.microversion = max_version
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))
self.flags(driver='chance_scheduler', group='scheduler')
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.useFixture(nova_fixtures.PlacementFixture())
self.start_service('conductor')
self.start_service('scheduler')
self.start_service('network', manager=CONF.network_manager)
self.compute = self.start_service('compute')
def _get_notification_sample(self, sample):
sample_dir = os.path.dirname(os.path.abspath(__file__))
sample_dir = os.path.normpath(os.path.join(
sample_dir,
"../../../../doc/notification_samples"))
return sample_dir + '/' + sample + '.json'
def _apply_replacements(self, replacements, sample_obj, notification):
replacements = replacements or {}
for key, value in replacements.items():
obj = sample_obj['payload']
n_obj = notification['payload']
for sub_key in key.split('.')[:-1]:
obj = obj['nova_object.data'][sub_key]
n_obj = n_obj['nova_object.data'][sub_key]
if value == NotificationSampleTestBase.ANY:
del obj['nova_object.data'][key.split('.')[-1]]
del n_obj['nova_object.data'][key.split('.')[-1]]
else:
obj['nova_object.data'][key.split('.')[-1]] = value
def _verify_notification(self, sample_file_name, replacements=None,
actual=None):
"""Assert if the generated notification matches with the stored sample
:param sample_file_name: The name of the sample file to match relative
to doc/notification_samples
:param replacements: A dict of key value pairs that is used to update
the payload field of the sample data before it is
matched against the generated notification.
The 'x.y':'new-value' key-value pair selects the
["payload"]["nova_object.data"]["x"]
["nova_object.data"]["y"] value from the sample
data and overrides it with 'new-value'. There is
a special value ANY that can be used to indicate
that the actual field value shall be ignored
during matching.
:param actual: Defines the actual notification to compare with. If
None then it defaults to the first versioned
notification emitted during the test.
"""
if not actual:
self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS))
notification = fake_notifier.VERSIONED_NOTIFICATIONS[0]
else:
notification = actual
with open(self._get_notification_sample(sample_file_name)) as sample:
sample_data = sample.read()
sample_obj = jsonutils.loads(sample_data)
self._apply_replacements(replacements, sample_obj, notification)
self.assertJsonEqual(sample_obj, notification)
def _boot_a_server(self, expected_status='ACTIVE', extra_params=None):
# We have to depend on a specific image and flavor to fix the content
# of the notification that will be emitted
flavor_body = {'flavor': {'name': 'test_flavor',
'ram': 512,
'vcpus': 1,
'disk': 1,
'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e3'
}}
flavor_id = self.api.post_flavor(flavor_body)['id']
extra_specs = {
"extra_specs": {
"hw:watchdog_action": "disabled"}}
self.admin_api.post_extra_spec(flavor_id, extra_specs)
# Ignore the create flavor notification
fake_notifier.reset()
keypair_req = {
"keypair": {
"name": "my-key",
"public_key": fake_crypto.get_ssh_public_key()
}}
self.api.post_keypair(keypair_req)
server = self._build_minimal_create_server_request(
self.api, 'some-server',
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=flavor_id)
# NOTE(gibi): from microversion 2.19 the description is not set to the
# instance name automatically but can be provided at boot.
server['description'] = 'some-server'
if extra_params:
extra_params['return_reservation_id'] = True
extra_params['key_name'] = 'my-key'
server.update(extra_params)
post = {'server': server}
created_server = self.api.post_server(post)
reservation_id = created_server['reservation_id']
created_server = self.api.get_servers(
detail=False,
search_opts={'reservation_id': reservation_id})[0]
self.assertTrue(created_server['id'])
# Wait for it to finish being created
found_server = self._wait_for_state_change(self.api, created_server,
expected_status)
found_server['reservation_id'] = reservation_id
if found_server['status'] == 'ACTIVE':
self.api.put_server_tags(found_server['id'], ['tag1'])
return found_server
def _wait_until_deleted(self, server):
try:
for i in range(40):
server = self.api.get_server(server['id'])
if server['status'] == 'ERROR':
self.fail('Server went to error state instead of'
'disappearing.')
time.sleep(0.5)
self.fail('Server failed to delete.')
except api_client.OpenStackApiNotFoundException:
return
def _get_notifications(self, event_type):
return [notification for notification
in fake_notifier.VERSIONED_NOTIFICATIONS
if notification['event_type'] == event_type]
def _wait_for_notification(self, event_type, timeout=1.0):
received = fake_notifier.wait_for_versioned_notification(
event_type, timeout)
self.assertTrue(
received,
'notification %s hasn\'t been received' % event_type)
def _wait_for_notifications(self, event_type, expected_count, timeout=1.0):
notifications = []
start_time = time.clock()
while (len(notifications) < expected_count
and time.clock() - start_time < timeout):
fake_notifier.wait_for_versioned_notification(event_type, timeout)
notifications += self._get_notifications(event_type)
# NOTE(gibi): reading and then resetting the fake_notifier without
# synchronization doesn't lead to race condition as the only
# parallelism is due to eventlet.
fake_notifier.reset()
self.assertEqual(expected_count, len(notifications),
'Unexpected number of %s notifications '
'within the given timeout. '
'Expected %d, got %d: %s' %
(event_type, expected_count, len(notifications),
notifications))
return notifications
def _attach_volume_to_server(self, server, volume_id):
self.api.post_server_volume(
server['id'], {"volumeAttachment": {"volumeId": volume_id}})
self._wait_for_notification('instance.volume_attach.end')
def _wait_and_get_migrations(self, server, max_retries=20):
"""Simple method to wait for the migrations
Here we wait for the moment where active migration is in progress so
we can get them and use them in the migration-related tests.
:param server: server we'd like to use
:param max_retries: maximum number of retries
:returns: the migrations
"""
retries = 0
while retries < max_retries:
retries += 1
migrations = self.admin_api.get_active_migrations(server['id'])
if (len(migrations) > 0 and
migrations[0]['status'] != 'preparing'):
return migrations
if retries == max_retries:
self.fail('The migration table left empty.')
|
|
from decimal import Decimal
from django import forms
from django.template import loader
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from l10n.utils import moneyfmt
from livesettings import config_value, config_value_safe
from payment import signals
from payment.config import labelled_gateway_choices
from payment.models import CreditCardDetail
from payment.utils import get_or_create_order
from product.models import Discount, TaxClass, Price
from product.prices import PriceAdjustmentCalc, PriceAdjustment
from product.utils import find_best_auto_discount
from satchmo_store.contact.forms import ProxyContactForm, ContactInfoForm
from satchmo_store.contact.models import Contact
from satchmo_store.shop.models import Cart, Order
from satchmo_store.shop.signals import satchmo_shipping_price_query
from satchmo_utils.dynamic import lookup_template
from satchmo_utils.views import CreditCard
from shipping.config import shipping_methods, shipping_method_by_key
from shipping.signals import shipping_choices_query
from shipping.utils import update_shipping
from signals_ahoy.signals import form_init, form_initialdata, form_presave, form_postsave, form_validate
from tax.templatetags.satchmo_tax import _get_taxprocessor
from threaded_multihost import threadlocals
import calendar
import datetime
import logging
log = logging.getLogger('payment.forms')
MONTHS = [(month,'%02d'%month) for month in range(1,13)]
def _get_cheapest_shipping(shipping_dict):
"""Use the shipping_dict as returned by _get_shipping_choices
to figure the cheapest shipping option."""
least = None
leastcost = None
for key, value in shipping_dict.items():
current = value['cost']
if leastcost is None or current < leastcost:
least = key
leastcost = current
return least
def _get_shipping_choices(request, paymentmodule, cart, contact, default_view_tax=False, order=None):
"""Iterate through legal shipping modules, building the list for display to the user.
Returns the shipping choices list, along with a dictionary of shipping choices, useful
for building javascript that operates on shipping choices.
"""
shipping_options = []
shipping_dict = {}
rendered = {}
if not order:
try:
order = Order.objects.from_request(request)
except Order.DoesNotExist:
pass
discount = None
if order:
try:
discount = Discount.objects.by_code(order.discount_code)
except Discount.DoesNotExist:
pass
if not cart.is_shippable:
methods = [shipping_method_by_key('NoShipping'),]
else:
methods = shipping_methods()
tax_shipping = config_value_safe('TAX','TAX_SHIPPING', False)
shipping_tax = None
if tax_shipping:
taxer = _get_taxprocessor(request)
shipping_tax = TaxClass.objects.get(title=config_value('TAX', 'TAX_CLASS'))
for method in methods:
method.calculate(cart, contact)
if method.valid():
template = lookup_template(paymentmodule, 'shipping/options.html')
t = loader.get_template(template)
shipcost = finalcost = method.cost()
if discount and order:
order.shipping_cost = shipcost
discount.calc(order)
shipdiscount = discount.item_discounts.get('Shipping', 0)
else:
shipdiscount = 0
# set up query to determine shipping price to show
shipprice = Price()
shipprice.price = shipcost
shipadjust = PriceAdjustmentCalc(shipprice)
if shipdiscount:
shipadjust += PriceAdjustment('discount', _('Discount'), shipdiscount)
satchmo_shipping_price_query.send(cart, adjustment=shipadjust)
shipdiscount = shipadjust.total_adjustment()
if shipdiscount:
finalcost -= shipdiscount
shipping_dict[method.id] = {'cost' : shipcost, 'discount' : shipdiscount, 'final' : finalcost}
taxed_shipping_price = None
if tax_shipping:
taxcost = taxer.by_price(shipping_tax, finalcost)
total = finalcost + taxcost
taxed_shipping_price = moneyfmt(total)
shipping_dict[method.id]['taxedcost'] = total
shipping_dict[method.id]['tax'] = taxcost
c = RequestContext(request, {
'amount': finalcost,
'description' : method.description(),
'method' : method.method(),
'expected_delivery' : method.expectedDelivery(),
'default_view_tax' : default_view_tax,
'shipping_tax': shipping_tax,
'taxed_shipping_price': taxed_shipping_price})
rendered[method.id] = t.render(c)
#now sort by price, low to high
sortme = [(value['cost'], key) for key, value in shipping_dict.items()]
sortme.sort()
shipping_options = [(key, rendered[key]) for cost, key in sortme]
shipping_choices_query.send(sender=cart, cart=cart,
paymentmodule=paymentmodule, contact=contact,
default_view_tax=default_view_tax, order=order,
shipping_options = shipping_options,
shipping_dict = shipping_dict)
return shipping_options, shipping_dict
def _find_sale(cart):
if cart.numItems > 0:
products = [item.product for item in cart.cartitem_set.all()]
sale = find_best_auto_discount(products)
else:
sale = None
return sale
class CustomChargeForm(forms.Form):
orderitem = forms.IntegerField(required=True, widget=forms.HiddenInput())
amount = forms.DecimalField(label=_('New price'), required=False)
shipping = forms.DecimalField(label=_('Shipping adjustment'), required=False)
notes = forms.CharField(_("Notes"), required=False, initial="Your custom item is ready.")
def __init__(self, *args, **kwargs):
initial = kwargs.get('initial', {})
form_initialdata.send('CustomChargeForm', form=self, initial=initial)
kwargs['initial'] = initial
super(CustomChargeForm, self).__init__(*args, **kwargs)
form_init.send(CustomChargeForm, form=self)
def clean(self, *args, **kwargs):
super(CustomChargeForm, self).clean(*args, **kwargs)
form_validate.send(CustomChargeForm, form=self)
return self.cleaned_data
class PaymentMethodForm(ProxyContactForm):
paymentmethod = forms.ChoiceField(
label=_('Payment method'),
choices=labelled_gateway_choices(),
widget=forms.RadioSelect,
required=True
)
def __init__(self, cart=None, order=None, *args, **kwargs):
super(PaymentMethodForm, self).__init__(*args, **kwargs)
self.cart = cart
# Send a signal to perform additional filtering of available payment methods.
# Receivers have cart/order passed in variables to check the contents and modify methods
# list if neccessary.
payment_choices = labelled_gateway_choices()
signals.payment_methods_query.send(
PaymentMethodForm,
methods=payment_choices,
cart=cart,
order=order,
contact=self._contact
)
if self.fields['paymentmethod'].initial == None:
self.fields['paymentmethod'].initial = payment_choices[0][0]
if len(payment_choices) == 1:
self.fields['paymentmethod'].widget = forms.HiddenInput()
else:
self.fields['paymentmethod'].widget = forms.RadioSelect()
self.fields['paymentmethod'].choices = payment_choices
def clean(self):
# allow additional validation
form_validate.send(PaymentMethodForm, form=self)
return self.cleaned_data
class PaymentContactInfoForm(PaymentMethodForm, ContactInfoForm):
payment_required_fields = None
def __init__(self, *args, **kwargs):
super(PaymentContactInfoForm, self).__init__(*args, **kwargs)
if not self.cart:
request = threadlocals.get_current_request()
self.cart = Cart.objects.from_request(request)
self.fields['discount'] = forms.CharField(max_length=30, required=False)
self.payment_required_fields = {}
if config_value('PAYMENT', 'USE_DISCOUNTS'):
if not self.fields['discount'].initial:
sale = _find_sale(self.cart)
if sale:
self.fields['discount'].initial = sale.code
else:
self.fields['discount'].widget = forms.HiddenInput()
# Listeners of the form_init signal (below) may modify the dict of
# payment_required_fields. For example, if your CUSTOM_PAYMENT requires
# customer's city, put the following code in the listener:
#
# form.payment_required_fields['CUSTOM_PAYMENT'] = ['city']
#
form_init.send(PaymentContactInfoForm, form=self)
def save(self, request, *args, **kwargs):
form_presave.send(PaymentContactInfoForm, form=self)
contactid = super(PaymentContactInfoForm, self).save(*args, **kwargs)
contact = Contact.objects.get(pk=contactid)
cart = kwargs.get('cart', None)
if not cart:
cart = Cart.objects.from_request(request)
if not cart.customer:
cart.customer = contact
cart.save()
self.order = get_or_create_order(request, cart, contact, self.cleaned_data)
form_postsave.send(PaymentContactInfoForm, form=self)
return contactid
def clean(self):
try:
paymentmethod = self.cleaned_data['paymentmethod']
except KeyError:
self._errors['paymentmethod'] = forms.util.ErrorList([_('This field is required')])
return self.cleaned_data
required_fields = self.payment_required_fields.get(paymentmethod, [])
msg = _('Selected payment method requires this field to be filled')
for fld in required_fields:
if not (self.cleaned_data.has_key(fld) and self.cleaned_data[fld]):
self._errors[fld] = forms.util.ErrorList([msg])
elif fld == 'state':
self.enforce_state = True
try:
self._check_state(self.cleaned_data['state'], self.cleaned_data['country'])
except forms.ValidationError, e:
self._errors[fld] = e.messages
super(PaymentContactInfoForm, self).clean()
return self.cleaned_data
def clean_discount(self):
""" Check if discount exists and is valid. """
if not config_value('PAYMENT', 'USE_DISCOUNTS'):
return ''
data = self.cleaned_data['discount']
if data:
try:
discount = Discount.objects.get(code=data, active=True)
except Discount.DoesNotExist:
raise forms.ValidationError(_('Invalid discount code.'))
request = threadlocals.get_current_request()
try:
contact = Contact.objects.from_request(request)
except Contact.DoesNotExist:
contact = None
valid, msg = discount.isValid(self.cart, contact=contact)
if not valid:
raise forms.ValidationError(msg)
# TODO: validate that it can work with these products
return data
class SimplePayShipForm(forms.Form):
shipping = forms.ChoiceField(widget=forms.RadioSelect(), required=False)
def __init__(self, request, paymentmodule, *args, **kwargs):
super(SimplePayShipForm, self).__init__(*args, **kwargs)
try:
order = Order.objects.from_request(request)
except Order.DoesNotExist:
order = None
self.order = order
self.orderpayment = None
self.paymentmodule = paymentmodule
try:
self.tempCart = Cart.objects.from_request(request)
if self.tempCart.numItems > 0:
products = [item.product for item in self.tempCart.cartitem_set.all()]
except Cart.DoesNotExist:
self.tempCart = None
try:
self.tempContact = Contact.objects.from_request(request)
except Contact.DoesNotExist:
self.tempContact = None
if kwargs.has_key('default_view_tax'):
default_view_tax = kwargs['default_view_tax']
else:
default_view_tax = config_value_safe('TAX', 'TAX_SHIPPING', False)
shipping_choices, shipping_dict = _get_shipping_choices(request, paymentmodule, self.tempCart, self.tempContact, default_view_tax=default_view_tax)
cheapshipping = _get_cheapest_shipping(shipping_dict)
self.cheapshipping = cheapshipping
discount = None
if order and order.discount_code:
try:
discount = Discount.objects.by_code(order.discount_code)
if discount and discount.shipping == "FREECHEAP":
if cheapshipping:
shipping_choices = [opt for opt in shipping_choices if opt[0] == cheapshipping]
shipping_dict = {cheapshipping: shipping_dict[cheapshipping]}
except Discount.DoesNotExist:
pass
# possibly hide the shipping based on store config
shiphide = config_value('SHIPPING','HIDING')
# Handle a partial payment and make sure we don't show a shipping choice after one has
# already been chosen
if self.order and self.order.is_partially_paid and shipping_dict.get(self.order.shipping_model, False):
self.fields['shipping'] = forms.CharField(max_length=30, initial=self.order.shipping_model,
widget=forms.HiddenInput(attrs={'value' : shipping_choices[0][0]}))
self.shipping_hidden = True
# Possibly hide if there is only 1 choise
elif shiphide in ('YES', 'DESCRIPTION') and len(shipping_choices) == 1:
self.fields['shipping'] = forms.CharField(max_length=30, initial=shipping_choices[0][0],
widget=forms.HiddenInput(attrs={'value' : shipping_choices[0][0]}))
if shiphide == 'DESCRIPTION':
self.shipping_hidden = False
self.shipping_description = shipping_choices[0][1]
else:
self.shipping_hidden = True
self.shipping_description = ""
elif len(shipping_choices) == 0:
self.shipping_hidden = True
else:
self.fields['shipping'].choices = shipping_choices
if config_value('SHIPPING','SELECT_CHEAPEST'):
if cheapshipping is not None:
self.fields['shipping'].initial = cheapshipping
self.shipping_hidden = False
self.shipping_dict = shipping_dict
form_init.send(SimplePayShipForm, form=self)
def clean_shipping(self):
shipping = self.cleaned_data['shipping']
if not shipping and self.tempCart.is_shippable:
raise forms.ValidationError(_('This field is required.'))
return shipping
def is_needed(self):
"""Check to see if this form is even needed
it is *not* needed if:
- we have an order
- the order balance is zero
- No shipping needs to be selected
"""
needed = True
if self.order and self.tempContact and self.tempCart:
order = self.order
if order.is_shippable and len(self.shipping_dict) == 1:
update_shipping(order, self.shipping_dict.keys()[0], self.tempContact, self.tempCart)
order.recalculate_total(save=False)
needed = not order.paid_in_full
if not needed:
log.debug('%s can skip the payment step - no info needed', order)
return needed
def save(self, request, cart, contact, payment_module, data=None):
form_presave.send(SimplePayShipForm, form=self)
if data is None:
data = self.cleaned_data
self.order = get_or_create_order(request, cart, contact, data)
if payment_module:
processor_module = payment_module.MODULE.load_module('processor')
processor = processor_module.PaymentProcessor(payment_module)
self.orderpayment = processor.create_pending_payment(order=self.order)
else:
self.orderpayment = None
form_postsave.send(SimplePayShipForm, form=self)
class CreditPayShipForm(SimplePayShipForm):
credit_type = forms.ChoiceField()
credit_number = forms.CharField(max_length=20, widget=forms.TextInput(attrs={'autocomplete':'off'}))
month_expires = forms.ChoiceField(choices=MONTHS)
year_expires = forms.ChoiceField()
ccv = forms.CharField(max_length=4, label='Sec code', widget=forms.TextInput(attrs={'autocomplete':'off'}))
def __init__(self, request, paymentmodule, *args, **kwargs):
creditchoices = paymentmodule.CREDITCHOICES.choice_values
super(CreditPayShipForm, self).__init__(request, paymentmodule, *args, **kwargs)
self.cc = None
self.fields['credit_type'].choices = creditchoices
num_years = config_value('PAYMENT', 'CC_NUM_YEARS')
year_now = datetime.date.today().year
self.fields['year_expires'].choices = [(year, year) for year in range(year_now, year_now+num_years+1)]
self.tempCart = Cart.objects.from_request(request)
initial = kwargs.get('initial', None)
if initial:
if initial.get('credit_number', None):
self.fields['credit_number'].widget = forms.PasswordInput()
if initial.get('ccv', None):
self.fields['ccv'].widget = forms.PasswordInput()
try:
self.tempContact = Contact.objects.from_request(request)
except Contact.DoesNotExist:
self.tempContact = None
def clean(self):
super(CreditPayShipForm, self).clean()
data = self.cleaned_data
if not self.is_valid():
log.debug('form not valid, no early auth')
return data
early = config_value('PAYMENT', 'AUTH_EARLY')
if early:
processor_module = self.paymentmodule.MODULE.load_module('processor')
processor = processor_module.PaymentProcessor(self.paymentmodule)
if processor.can_authorize():
log.debug('Processing early capture/release for: %s', self.order)
processor_module = self.paymentmodule.MODULE.load_module('processor')
processor = processor_module.PaymentProcessor(self.paymentmodule)
if self.order:
# we have to make a payment object and save the credit card data to
# make an auth/release.
orderpayment = processor.create_pending_payment(order=self.order,
amount=Decimal('0.01'))
op = orderpayment.capture
cc = CreditCardDetail(orderpayment=op,
expire_month=data['month_expires'],
expire_year=data['year_expires'],
credit_type=data['credit_type'])
cc.storeCC(data['credit_number'])
cc.save()
# set ccv into cache
cc.ccv = data['ccv']
self.cc = cc
results = processor.authorize_and_release(order=self.order)
if not results.success:
log.debug('Payment module error: %s', results)
raise forms.ValidationError(results.message)
else:
log.debug('Payment module capture/release success for %s', self.order)
else:
log.debug('Payment module %s cannot do credit authorizations, ignoring AUTH_EARLY setting.',
self.paymentmodule.MODULE.value)
return data
def clean_credit_number(self):
""" Check if credit card is valid. """
data = self.cleaned_data
credit_number = data['credit_number']
card = CreditCard(credit_number, data['credit_type'])
results, msg = card.verifyCardTypeandNumber()
if not results:
raise forms.ValidationError(msg)
return credit_number
def clean_month_expires(self):
return int(self.cleaned_data['month_expires'])
def clean_year_expires(self):
""" Check if credit card has expired. """
month = self.cleaned_data['month_expires']
year = int(self.cleaned_data['year_expires'])
max_day = calendar.monthrange(year, month)[1]
if datetime.date.today() > datetime.date(year=year, month=month, day=max_day):
raise forms.ValidationError(_('Your card has expired.'))
return year
def clean_ccv(self):
""" Validate a proper CCV is entered. Remember it can have a leading 0 so don't convert to int and return it"""
try:
check = int(self.cleaned_data['ccv'])
return self.cleaned_data['ccv'].strip()
except ValueError:
raise forms.ValidationError(_('Invalid ccv.'))
def save(self, request, cart, contact, payment_module, data=None):
"""Save the order and the credit card information for this orderpayment"""
form_presave.send(CreditPayShipForm, form=self)
if data is None:
data = self.cleaned_data
assert(data)
super(CreditPayShipForm, self).save(request, cart, contact, payment_module, data=data)
if self.orderpayment:
op = self.orderpayment.capture
cc = CreditCardDetail(orderpayment=op,
expire_month=data['month_expires'],
expire_year=data['year_expires'],
credit_type=data['credit_type'])
cc.storeCC(data['credit_number'])
cc.save()
# set ccv into cache
cc.ccv = data['ccv']
self.cc = cc
form_postsave.send(CreditPayShipForm, form=self)
|
|
"""
Algorithms for computing the skeleton of a binary image
"""
import numpy as np
from scipy import ndimage as ndi
from ._skeletonize_cy import _skeletonize_loop, _table_lookup_index
# --------- Skeletonization by morphological thinning ---------
def skeletonize(image):
"""Return the skeleton of a binary image.
Thinning is used to reduce each connected component in a binary image
to a single-pixel wide skeleton.
Parameters
----------
image : numpy.ndarray
A binary image containing the objects to be skeletonized. '1'
represents foreground, and '0' represents background. It
also accepts arrays of boolean values where True is foreground.
Returns
-------
skeleton : ndarray
A matrix containing the thinned image.
See also
--------
medial_axis
Notes
-----
The algorithm [1] works by making successive passes of the image,
removing pixels on object borders. This continues until no
more pixels can be removed. The image is correlated with a
mask that assigns each pixel a number in the range [0...255]
corresponding to each possible pattern of its 8 neighbouring
pixels. A look up table is then used to assign the pixels a
value of 0, 1, 2 or 3, which are selectively removed during
the iterations.
Note that this algorithm will give different results than a
medial axis transform, which is also often referred to as
"skeletonization".
References
----------
.. [1] A fast parallel algorithm for thinning digital patterns,
T. Y. ZHANG and C. Y. SUEN, Communications of the ACM,
March 1984, Volume 27, Number 3
Examples
--------
>>> X, Y = np.ogrid[0:9, 0:9]
>>> ellipse = (1./3 * (X - 4)**2 + (Y - 4)**2 < 3**2).astype(np.uint8)
>>> ellipse
array([[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0]], dtype=uint8)
>>> skel = skeletonize(ellipse)
>>> skel.astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
# look up table - there is one entry for each of the 2^8=256 possible
# combinations of 8 binary neighbours. 1's, 2's and 3's are candidates
# for removal at each iteration of the algorithm.
lut = [0, 0, 0, 1, 0, 0, 1, 3, 0, 0, 3, 1, 1, 0, 1, 3, 0, 0, 0, 0, 0, 0,
0, 0, 2, 0, 2, 0, 3, 0, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 2, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0,
0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 3, 0, 2, 0, 0, 0, 3, 1,
0, 0, 1, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 1, 3, 0, 0,
1, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 2, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3,
0, 1, 0, 0, 0, 0, 2, 2, 0, 0, 2, 0, 0, 0]
# convert to unsigned int (this should work for boolean values)
skeleton = image.astype(np.uint8)
# check some properties of the input image:
# - 2D
# - binary image with only 0's and 1's
if skeleton.ndim != 2:
raise ValueError('Skeletonize requires a 2D array')
if not np.all(np.in1d(skeleton.flat, (0, 1))):
raise ValueError('Image contains values other than 0 and 1')
# create the mask that will assign a unique value based on the
# arrangement of neighbouring pixels
mask = np.array([[ 1, 2, 4],
[128, 0, 8],
[ 64, 32, 16]], np.uint8)
pixel_removed = True
while pixel_removed:
pixel_removed = False
# assign each pixel a unique value based on its foreground neighbours
neighbours = ndi.correlate(skeleton, mask, mode='constant')
# ignore background
neighbours *= skeleton
# use LUT to categorize each foreground pixel as a 0, 1, 2 or 3
codes = np.take(lut, neighbours)
# pass 1 - remove the 1's and 3's
code_mask = (codes == 1)
if np.any(code_mask):
pixel_removed = True
skeleton[code_mask] = 0
code_mask = (codes == 3)
if np.any(code_mask):
pixel_removed = True
skeleton[code_mask] = 0
# pass 2 - remove the 2's and 3's
neighbours = ndi.correlate(skeleton, mask, mode='constant')
neighbours *= skeleton
codes = np.take(lut, neighbours)
code_mask = (codes == 2)
if np.any(code_mask):
pixel_removed = True
skeleton[code_mask] = 0
code_mask = (codes == 3)
if np.any(code_mask):
pixel_removed = True
skeleton[code_mask] = 0
return skeleton.astype(bool)
# --------- Skeletonization by medial axis transform --------
_eight_connect = ndi.generate_binary_structure(2, 2)
def medial_axis(image, mask=None, return_distance=False):
"""
Compute the medial axis transform of a binary image
Parameters
----------
image : binary ndarray, shape (M, N)
The image of the shape to be skeletonized.
mask : binary ndarray, shape (M, N), optional
If a mask is given, only those elements in `image` with a true
value in `mask` are used for computing the medial axis.
return_distance : bool, optional
If true, the distance transform is returned as well as the skeleton.
Returns
-------
out : ndarray of bools
Medial axis transform of the image
dist : ndarray of ints, optional
Distance transform of the image (only returned if `return_distance`
is True)
See also
--------
skeletonize
Notes
-----
This algorithm computes the medial axis transform of an image
as the ridges of its distance transform.
The different steps of the algorithm are as follows
* A lookup table is used, that assigns 0 or 1 to each configuration of
the 3x3 binary square, whether the central pixel should be removed
or kept. We want a point to be removed if it has more than one neighbor
and if removing it does not change the number of connected components.
* The distance transform to the background is computed, as well as
the cornerness of the pixel.
* The foreground (value of 1) points are ordered by
the distance transform, then the cornerness.
* A cython function is called to reduce the image to its skeleton. It
processes pixels in the order determined at the previous step, and
removes or maintains a pixel according to the lookup table. Because
of the ordering, it is possible to process all pixels in only one
pass.
Examples
--------
>>> square = np.zeros((7, 7), dtype=np.uint8)
>>> square[1:-1, 2:-2] = 1
>>> square
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> medial_axis(square).astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
global _eight_connect
if mask is None:
masked_image = image.astype(np.bool)
else:
masked_image = image.astype(bool).copy()
masked_image[~mask] = False
#
# Build lookup table - three conditions
# 1. Keep only positive pixels (center_is_foreground array).
# AND
# 2. Keep if removing the pixel results in a different connectivity
# (if the number of connected components is different with and
# without the central pixel)
# OR
# 3. Keep if # pixels in neighbourhood is 2 or less
# Note that table is independent of image
center_is_foreground = (np.arange(512) & 2**4).astype(bool)
table = (center_is_foreground # condition 1.
&
(np.array([ndi.label(_pattern_of(index), _eight_connect)[1] !=
ndi.label(_pattern_of(index & ~ 2**4),
_eight_connect)[1]
for index in range(512)]) # condition 2
|
np.array([np.sum(_pattern_of(index)) < 3 for index in range(512)]))
# condition 3
)
# Build distance transform
distance = ndi.distance_transform_edt(masked_image)
if return_distance:
store_distance = distance.copy()
# Corners
# The processing order along the edge is critical to the shape of the
# resulting skeleton: if you process a corner first, that corner will
# be eroded and the skeleton will miss the arm from that corner. Pixels
# with fewer neighbors are more "cornery" and should be processed last.
# We use a cornerness_table lookup table where the score of a
# configuration is the number of background (0-value) pixels in the
# 3x3 neighbourhood
cornerness_table = np.array([9 - np.sum(_pattern_of(index))
for index in range(512)])
corner_score = _table_lookup(masked_image, cornerness_table)
# Define arrays for inner loop
i, j = np.mgrid[0:image.shape[0], 0:image.shape[1]]
result = masked_image.copy()
distance = distance[result]
i = np.ascontiguousarray(i[result], dtype=np.intp)
j = np.ascontiguousarray(j[result], dtype=np.intp)
result = np.ascontiguousarray(result, np.uint8)
# Determine the order in which pixels are processed.
# We use a random # for tiebreaking. Assign each pixel in the image a
# predictable, random # so that masking doesn't affect arbitrary choices
# of skeletons
#
generator = np.random.RandomState(0)
tiebreaker = generator.permutation(np.arange(masked_image.sum()))
order = np.lexsort((tiebreaker,
corner_score[masked_image],
distance))
order = np.ascontiguousarray(order, dtype=np.int32)
table = np.ascontiguousarray(table, dtype=np.uint8)
# Remove pixels not belonging to the medial axis
_skeletonize_loop(result, i, j, order, table)
result = result.astype(bool)
if not mask is None:
result[~mask] = image[~mask]
if return_distance:
return result, store_distance
else:
return result
def _pattern_of(index):
"""
Return the pattern represented by an index value
Byte decomposition of index
"""
return np.array([[index & 2**0, index & 2**1, index & 2**2],
[index & 2**3, index & 2**4, index & 2**5],
[index & 2**6, index & 2**7, index & 2**8]], bool)
def _table_lookup(image, table):
"""
Perform a morphological transform on an image, directed by its
neighbors
Parameters
----------
image : ndarray
A binary image
table : ndarray
A 512-element table giving the transform of each pixel given
the values of that pixel and its 8-connected neighbors.
border_value : bool
The value of pixels beyond the border of the image.
Returns
-------
result : ndarray of same shape as `image`
Transformed image
Notes
-----
The pixels are numbered like this::
0 1 2
3 4 5
6 7 8
The index at a pixel is the sum of 2**<pixel-number> for pixels
that evaluate to true.
"""
#
# We accumulate into the indexer to get the index into the table
# at each point in the image
#
if image.shape[0] < 3 or image.shape[1] < 3:
image = image.astype(bool)
indexer = np.zeros(image.shape, int)
indexer[1:, 1:] += image[:-1, :-1] * 2**0
indexer[1:, :] += image[:-1, :] * 2**1
indexer[1:, :-1] += image[:-1, 1:] * 2**2
indexer[:, 1:] += image[:, :-1] * 2**3
indexer[:, :] += image[:, :] * 2**4
indexer[:, :-1] += image[:, 1:] * 2**5
indexer[:-1, 1:] += image[1:, :-1] * 2**6
indexer[:-1, :] += image[1:, :] * 2**7
indexer[:-1, :-1] += image[1:, 1:] * 2**8
else:
indexer = _table_lookup_index(np.ascontiguousarray(image, np.uint8))
image = table[indexer]
return image
|
|
import ctypes as ct
import freetype.raw as ft
from ed2d import texture
from ed2d import mesh
from ed2d import typeutils
from ed2d.opengl import gl, pgl
# from ed2d import glmath as cyglmath
# from ed2d.glmath import cython as cyglmath
from gem import matrix, vector
# Hack to verify that freetype is properly destructed after everything
# this code was also commited to freetype-py
class _FT_Library_Wrapper(ft.FT_Library):
'''Subclass of FT_Library to help with calling FT_Done_FreeType'''
# for some reason this doesn't get carried over and ctypes complains
_type_ = ft.FT_Library._type_
# Store ref to FT_Done_FreeType otherwise it will be deleted before needed.
_ft_done_freetype = ft.FT_Done_FreeType
def __del__(self):
# call FT_Done_FreeType
self._ft_done_freetype(self)
def init_freetype():
handle = _FT_Library_Wrapper()
if ft.FT_Init_FreeType(ct.byref(handle)):
raise Exception('FreeType failed to initialize.')
return handle
freetype = init_freetype()
# These are the usable fields of FT_GlyphSlotRec
# field: data type:
# library FT_Library
# face FT_Face
# next FT_GlyphSlot
# generic FT_Generic
# metrics FT_Glyph_Metrics
# linearHoriAdvance FT_Fixed
# linearVertAdvance FT_Fixed
# advance FT_Vector
# format FT_Glyph_Format
# bitmap FT_Bitmap
# bitmap_left FT_Int
# bitmap_top FT_Int
# outline FT_Outline
# num_subglyphs FT_UInt
# subglyphs FT_SubGlyph
# control_data void*
# control_len long
# lsb_delta FT_Pos
# rsb_delta FT_Pos
class Font(object):
def __init__(self, size, fontPath):
self.size = size
self.path = fontPath
self.face = ft.FT_Face()
# here is the general structure of the char data dict.
#
# It has
self.charDataCache = {}
# load font face
if ft.FT_New_Face(freetype, typeutils.to_c_str(fontPath), 0,
ct.byref(self.face)):
raise Exception('Error loading font.')
# For now the device dpi will be hard coded to 72
# later on if we want to do mobile stuff, or have dpi scaling
# for high-dpi monitors this will need to be changed.
if ft.FT_Set_Char_Size(self.face, 0, size * 64, 72, 72):
raise Exception('Error setting character size.')
def load_glyph(self, char):
'''
Loads glyph, and returns a dictionary containing glyph data.
'''
try:
return self.charDataCache[char]
except KeyError:
index = ft.FT_Get_Char_Index(self.face, ord(char))
if ft.FT_Load_Glyph(self.face, index, ft.FT_LOAD_RENDER):
raise Exception('Error loading glyph')
glyphSlot = self.face.contents.glyph
charData = {}
bitmapStruct = glyphSlot.contents.bitmap
texWidth = bitmapStruct.width
texHeight = bitmapStruct.rows
pixelData = [0.0 for x in range(texWidth * texHeight)]
for item in range(texWidth * texHeight):
pixelData[item] = bitmapStruct.buffer[item]
if not pixelData:
pixelData = [0]
charData['pixelData'] = pixelData
charData['bitmap_x'] = glyphSlot.contents.bitmap_left
charData['bitmap_y'] = glyphSlot.contents.bitmap_top
charData['texWidth'] = texWidth
charData['texHeight'] = texHeight
charData['advance'] = glyphSlot.contents.advance.x >> 6
self.charDataCache[char] = charData
return charData
def delete(self):
'''Delete the freetype face'''
ft.FT_Done_Face(self.face)
class Text(object):
def __init__(self, program, font):
self.program = program
self.texAtlas = texture.TextureAtlas(self.program, texFormat=gl.GL_RED)
self.font = font
self.vertLoc = self.program.get_attribute(b'position')
self.UVLoc = self.program.get_attribute(b'vertexUV')
self.data = [[0.0, 1.0], [1.0, 1.0], [0.0, 0.0], [1.0, 0.0] ]
self.chrMap = {}
self.basePos = 0.0
self.lineSpacing = 3
for texVal in range(32, 128):
char = chr(texVal)
fontData = self.font.load_glyph(char)
# Find the fartherst position from the baseline
if fontData['bitmap_y'] > self.basePos:
self.basePos = fontData['bitmap_y']
self.chrMap[char] = Glyph(self.program, self.texAtlas, fontData,
char, self)
print(self.basePos)
self.texAtlas.gen_atlas()
self.vbo = mesh.buffer_object(self.data, gl.GLfloat)
for glyph in self.chrMap.values():
glyph.init_gl()
def draw_text(self, text, xPos, yPos):
self.program.use()
self.texAtlas.bind()
# When you can dynamically add textures to an Atlas
# this is where the glyph objects will be created.
# Instead of taking a while on init to generate all
# normal characters.
textLines = text.split('\n')
penPosX = xPos
penPosY = self.basePos + yPos
for txt in textLines:
for c in txt:
char = self.chrMap[c]
char.render(penPosX, penPosY)
penPosX += char.advance
penPosY += self.basePos + self.lineSpacing
penPosX = xPos
# gl.glDisableVertexAttribArray(self.UVLoc)
# gl.glDisableVertexAttribArray(self.vertLoc)
# gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
gl.glBindVertexArray(0)
class Glyph(object):
def __init__(self, program, atlas, fontData, char, parent):
self.atlas = atlas
self.fontData = fontData
self.program = program
self.parent = parent
self.nverts = 4
self.vertLoc = self.program.get_attribute(b'position')
self.modelLoc = self.program.new_uniform(b'model')
self.UVLoc = self.program.get_attribute(b'vertexUV')
self.modelMatrix = matrix.Matrix(4)
self.char = char
self.pixelData = self.fontData['pixelData']
self.textureWidth = self.fontData['texWidth']
self.textureHeight = self.fontData['texHeight']
self.bitX = self.fontData['bitmap_x']
self.bitY = self.fontData['bitmap_y']
self.advance = self.fontData['advance']
self.uniform = self.program.get_uniform(self.modelLoc)
self.textureID = self.atlas.add_texture(self.textureWidth,
self.textureHeight,
self.pixelData)
def init_gl(self):
self.vao = pgl.glGenVertexArrays(1)
gl.glBindVertexArray(self.vao)
self._uvCoords = self.atlas.get_uvcoords(self.textureID)
self.vertexScale = self.atlas.get_vertex_scale(self.textureID)
vecScale = vector.Vector(
3,
data=[self.atlas.maxSubTextureHeight * self.vertexScale[0],
self.atlas.maxSubTextureHeight * self.vertexScale[1], 0.0])
self.scaleMat = matrix.Matrix(4).i_scale(vecScale)
self.uvbo = mesh.buffer_object(self._uvCoords, gl.GLfloat)
gl.glEnableVertexAttribArray(self.vertLoc)
gl.glEnableVertexAttribArray(self.UVLoc)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.parent.vbo)
pgl.glVertexAttribPointer(self.vertLoc, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.uvbo)
pgl.glVertexAttribPointer(self.UVLoc, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
gl.glBindVertexArray(0)
def render(self, posX, posY):
gl.glBindVertexArray(self.vao)
vecScale = vector.Vector(
3,
data=[posX + self.bitX, posY - self.bitY, 0.0])
self.modelMatrix = self.scaleMat.translate(vecScale)
self.program.set_uniform_matrix(self.modelLoc, self.modelMatrix,
uniform=self.uniform,
size=4)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, self.nverts)
|
|
#!/usr/local/bin/python3
# mapsParser5.py
# Parsing script for multi-plate spectrophotometer in PLATE format
#
# Author: Daniel A Cuevas
# Created on 11 Nov 2017
# Updated on 11 Nov 2017
#
#
###############################################################
# ASSUMPTIONS
###############################################################
# = Tab delimited file
# = File is for a single plate
# = First two rows are machine-generated information
# = Third row are column headers, a total of 13 items with overhanging tabs
# === <TAB> Temperature <TAB> 1 <TAB> 2 <TAB> ... <12> <TAB> <NEWLINE>
# = Remaining rows are data values for row A, then B, then C, etc.
# = Time is first item in row A
# = Time format: D.HH:MM:SS
# === Regex for time format is (\d\.)?\d{2}:\d{2}:\d{2}
# === Days and period are optional
# = Plate reads are separated by a blank line
# = After entirety of reads is a blank line and an "~End" indicator
# = Finally, there is a text line with the date+timestamp at the end (not used)
# === E.g. ...Date Last Saved: 7/8/2015 5:36:31 PM
###############################################################
from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
import re
import datetime as dt
from operator import itemgetter
from itertools import product
TIMESTAMP = re.compile(r"((\d)\.)?(\d{2}:\d{2}:\d{2})")
###############################################################################
# Utility methods
###############################################################################
def errOut(msg, parser=None):
"""Send a system exit exception with the given message"""
if parser is not None:
parser.print_help()
script = os.path.basename(__file__)
sys.exit("ERROR in {}\n {}".format(script, msg))
def getDataFiles(mypath):
"""Check directory exists and return list of non-hidden files"""
# Add forward slash if directory does not have one
if not mypath.endswith("/"):
mypath = mypath + "/"
# Check if directory exists
if not os.path.isdir(mypath):
errOut("Data directory not found: {}".format(mypath))
files = ["{}{}".format(mypath, f) for f in os.listdir(mypath)
if os.path.isfile(os.path.join(mypath, f)) and
not f.startswith(".")]
return files
def readPlate(mypath):
"""Parse plate file into dictionary"""
plate = {}
with open(mypath, "r") as fh:
for l in fh:
l = l.rstrip("\n")
# Order of elements are:
# 1) well
# 2) mainsource
# 3) compound
# 4) concentration
well, ms, cpd, conc = l.split("\t")
well = parseWell(well)
plate[well] = (ms, cpd)
return plate
def parseWell(well):
"""Convert well string to 2-tuple"""
well = re.match(r"(\w)(\d+)", well).group(1, 2)
well = (well[0], int(well[1]))
return well
def readData(data, f, t0):
# Get only filename
fname = os.path.basename(f)
# Remove file extension
fname = os.path.splitext(fname)[0]
# Extract sample name and replicate from file
m = re.match(r"^([A-Za-z0-9-.]+)_([A-Za-z0-9]+)", fname)
if m is None:
errOut("Could not extract name and replicate from filename: "
"{}".format(f))
name, rep = m.group(1, 2)
data_found = False # Flag when we're passed the file headers
new_time = True # Flag when we're looking at a new time point
od_reads = [] # Collect all of plate in a single list
with open(f, "r") as fh:
for ln, l in enumerate(fh, start=1):
# Strip off any blank or newlines on both sides
l = l.strip()
# Check if we reached the end of the file
if re.match(r"~End", l):
break
# Check if we hit the end of the header lines
elif re.match(r"Temperature", l):
ll = l.split("\t")
# Check that there are 13 items (1 Temp, 12 columns)
if len(ll) != 13:
errOut("There are not 13 items in header line " + str(ln)
+ " for file " + f)
data_found = True
elif data_found and len(l) > 0:
ll = l.split("\t")
# Check if we're looking at the first row
if new_time:
# New time points have 14 items (Time, Temp, OD reads)
if len(ll) != 14:
errOut("There are not 14 items in data line " + str(ln)
+ " for file " + f)
timeObj, t0 = parseTime(ll[0], t0, name, rep)
od_reads.extend(ll[2:])
new_time = False
else:
# Data must have 12 items
if len(ll) != 12:
errOut("There are not 12 items in data line " + str(ln)
+ " for file " + f)
od_reads.extend(ll)
elif len(l) == 0:
data = addOD(data, od_reads, timeObj, t0, name, rep)
new_time = True
od_reads = []
def parseTime(time_str, t0, name, rep):
"""Parse time from a string and return the datetime object"""
m = TIMESTAMP.match(time_str)
timeRaw = m.group(3)
timeObj = dt.datetime.strptime(timeRaw, "%H:%M:%S")
# Check if there was a day present
if m.group(2):
timeObj += dt.timedelta(days=int(m.group(2)))
# Check if this is the first time for the sample or replicate
if name not in t0:
t0[name] = {rep: timeObj}
elif rep not in t0[name]:
t0[name][rep] = timeObj
return timeObj, t0
def addOD(data, od_reads, timeObj, t0, name, rep):
"""Store data in Data Frame"""
# Calculate time difference and convert to hours
tDelta = timeObj - t0[name][rep]
tDelta = tDelta.days * 24 + tDelta.seconds / 3600
if tDelta < 0:
errOut("ERROR: tDelta is negative for " + name + " " + rep)
wells = ["{}{}".format(*w) for w in product("ABCDEFGH", range(1, 13))]
# Iterate through reads to add
for idx, od_read in enumerate(od_reads):
well = parseWell(wells[idx])
# Check if data keys are initialized
if name not in data:
data[name] = {rep: {well: {tDelta: od_read}}}
elif rep not in data[name]:
data[name][rep] = {well: {tDelta: od_read}}
elif well not in data[name][rep]:
data[name][rep][well] = {tDelta: od_read}
else:
data[name][rep][well][tDelta] = od_read
return data
def printData(data, plate=None, pn=None):
"""Print parsed data from dictionary. Output is tab-delimited.
Columns:
1) sample
2) replicate
3) main source*
4) compound*
5) well#
6) plate name*
7) time
8) OD reading
*Only printed when given a plate file
"""
for s in data:
# s = sample name
for r in data[s]:
# r = replicate
for w in sorted(data[s][r], key=itemgetter(0, 1)):
# w = well tuple: sorted on row then column
for t, od in sorted(data[s][r][w].items()):
# t = time
wId = "".join((w[0], str(w[1])))
if plate:
ms, cpd = plate[w]
print("\t".join((s, r, ms, cpd, wId, pn,
"{:.1f}".format(t), str(od))))
else:
print("\t".join((s, r, wId,
"{:.1f}".format(t), str(od))))
###############################################################################
# Argument parsing
###############################################################################
parser = argparse.ArgumentParser()
parser.add_argument("indir", help="Directory containing data files")
parser.add_argument("-p", "--plate", help="Plate file for wells")
parser.add_argument("-v", "--verbose", action="store_true",
help="Increase output for status messages")
args = parser.parse_args()
inDir = args.indir
plateFile = args.plate
verbose = args.verbose
###############################################################################
# Preprocessing files
###############################################################################
# Get data files
filenames = getDataFiles(inDir)
# Check that the plate file exists
# If not, check if it exists in PMAnalyzer premade plates
if plateFile and not os.path.isfile(plateFile):
d = os.path.expanduser("~") + "/Projects/PMAnalyzer/plates/"
plateFile = d + plateFile
if not os.path.isfile(plateFile):
errOut("Plate file not found: {}".format(plateFile))
if plateFile:
plate = readPlate(plateFile)
# Get only filename
plateName = os.path.basename(plateFile)
# Remove file extension
plateName = os.path.splitext(plateName)[0]
else:
plate = None
plateName = None
###############################################################################
# Data extraction files
###############################################################################
# All data will be contained in a dictionary
# Keys will be ordered as follows:
# 1) Sample name - str
# 2) Replicate - str
# 3) Well - 2-tuple (str, int)
# 4) Time - float
# 5) OD value - str
data = {}
t0 = {} # Dictionary containing starting time points for each replicate
# Read in data for each file
for f in filenames:
readData(data, f, t0)
# Print out header line
if plateFile:
print("sample\trep\tmainsource\tcompound\twell\tplate\ttime\tod")
else:
print("sample\trep\twell\ttime\tod")
printData(data, plate, plateName)
|
|
from __future__ import unicode_literals
from datetime import timedelta
from .base import (OccurrenceDataTestCase, Notification, Occurrence,
TimelineSubscription, now)
from ..handlers.base import AppointmentHandler
from ..handlers.confirm import ConfirmHandler
from ..handlers.move import MoveHandler
from ..handlers.new import NewHandler
from ..handlers.quit import QuitHandler
from ..handlers.status import StatusHandler
from ..handlers.subscribe import SubscribeHandler
class NewHandlerTestCase(OccurrenceDataTestCase):
"Keyword handler for adding users to timelines"
def setUp(self):
self.timeline = self.create_timeline(name='Test', slug='foo')
self.prefix = AppointmentHandler.prefix
def test_help(self):
"Prefix and keyword should return the help for adding subscriptions."
replies = NewHandler.test('APPT NEW')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(self.prefix + ' NEW <KEY> <NAME/ID> <DATE>' in reply)
def test_match(self):
"Send a successful match to create user timeline subscription."
replies = NewHandler.test('APPT NEW foo bar')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'), reply)
def test_match_with_date(self):
"Use start date if given."
tomorrow = (now() + timedelta(days=1)).strftime('%Y-%m-%d')
replies = NewHandler.test('APPT NEW foo bar %s' % tomorrow)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'), reply)
def test_no_keyword_match(self):
"Keyword does not match any existing timelines."
self.timeline.delete()
replies = NewHandler.test('APPT NEW foo bar')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
def test_no_name_given(self):
"No name is given."
replies = NewHandler.test('APPT NEW foo')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
def test_invalid_date_format_given(self):
"Invalid date format."
replies = NewHandler.test('APPT NEW foo bar baz')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
def test_already_joined(self):
"Attempting to register and already registered connection/name pair."
connection = self.create_connection()
NewHandler._mock_backend = connection.backend
self.create_timeline_subscription(timeline=self.timeline,
connection=connection, pin='bar')
replies = NewHandler.test('APPT NEW foo bar', identity=connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
del NewHandler._mock_backend
class ConfirmHandlerTestCase(OccurrenceDataTestCase):
"Keyword handler for confirming occurrences."
def setUp(self):
self.timeline = self.create_timeline(name='Test', slug='foo')
self.connection = self.create_connection()
self.subscription = self.create_timeline_subscription(
timeline=self.timeline, connection=self.connection, pin='bar')
ConfirmHandler._mock_backend = self.connection.backend
self.milestone = self.create_milestone(timeline=self.timeline)
self.occurrence = self.create_occurrence(milestone=self.milestone)
self.notification = self.create_notification(occurrence=self.occurrence)
self.prefix = AppointmentHandler.prefix
def test_help(self):
"Prefix and keyword should return the help."
replies = ConfirmHandler.test('APPT CONFIRM')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(self.prefix + ' CONFIRM <KEY> <NAME/ID>' in reply)
def test_occurrence_confirmed(self):
"Successfully confirm an upcoming occurrence."
replies = ConfirmHandler.test('APPT CONFIRM foo bar', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'))
notification = Notification.objects.get(pk=self.notification.pk)
self.assertTrue(notification.completed)
self.assertEqual(notification.status, Notification.STATUS_COMPLETED)
occurrence = Occurrence.objects.get(pk=self.occurrence.pk)
self.assertTrue(occurrence.completed)
def test_no_upcoming_occurrence(self):
"Matched user has no upcoming occurrence notifications."
self.notification.delete()
replies = ConfirmHandler.test('APPT CONFIRM foo bar', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('no uncompleted' in reply)
def test_already_confirmed(self):
"Matched user has already confirmed the upcoming occurrence."
self.notification.confirm()
replies = ConfirmHandler.test('APPT CONFIRM foo bar', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('no uncompleted' in reply)
def test_no_subscription(self):
"Name/ID does not match a subscription."
self.subscription.delete()
replies = ConfirmHandler.test('APPT CONFIRM foo bar', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('does not match an active subscription' in reply)
def test_subscription_ended(self):
"Name/ID subscription has ended."
self.subscription.end = now()
self.subscription.save()
replies = ConfirmHandler.test('APPT CONFIRM foo bar', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('does not match an active subscription' in reply)
class StatusHandlerTestCase(OccurrenceDataTestCase):
"Keyword handler for updating the status of occurrences."
def setUp(self):
self.timeline = self.create_timeline(name='Test', slug='foo')
self.connection = self.create_connection()
self.subscription = self.create_timeline_subscription(
timeline=self.timeline, connection=self.connection, pin='bar')
StatusHandler._mock_backend = self.connection.backend
self.milestone = self.create_milestone(timeline=self.timeline)
self.occurrence = self.create_occurrence(milestone=self.milestone)
self.prefix = AppointmentHandler.prefix
def test_help(self):
"Prefix and keyword should return the help."
replies = StatusHandler.test('APPT STATUS')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(self.prefix + ' STATUS <KEY> <NAME/ID> <SAW|MISSED>' in reply)
def test_occurrence_status_updated(self):
"Successfully update a recent occurrence."
for status in Occurrence.STATUS_CHOICES[1:]:
appt = self.create_occurrence(milestone=self.milestone)
replies = StatusHandler.test('APPT STATUS foo bar %s' % status[1].upper(),
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'))
self.assertTrue(appt.status, status[0])
def test_occurrence_status_invalid_update(self):
"Do not update if supplied status text is not in STATUS_CHOICES."
replies = StatusHandler.test('APPT STATUS foo bar FOO', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry, the status update must be in'))
def test_no_recent_occurrence(self):
"Matched user has no recent occurrence."
self.occurrence.delete()
replies = StatusHandler.test('APPT STATUS foo bar SAW', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('no recent occurrences' in reply)
def test_no_recent_occurrence_needing_update(self):
"Matched user has no recent occurrence that needs updating."
self.occurrence.status = Occurrence.STATUS_MISSED
self.occurrence.save()
replies = StatusHandler.test('APPT STATUS foo bar MISSED', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('no recent occurrences' in reply)
def test_future_occurrence(self):
"Matched user has no recent occurrence."
self.occurrence.date = self.occurrence.date + timedelta(days=1)
self.occurrence.save()
replies = StatusHandler.test('APPT STATUS foo bar SAW', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('no recent occurrences' in reply)
def test_no_subscription(self):
"Name/ID does not match a subscription."
self.subscription.delete()
replies = StatusHandler.test('APPT STATUS foo bar SAW', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('does not match an active subscription' in reply)
def test_subscription_ended(self):
"Name/ID subscription has ended."
self.subscription.end = now()
self.subscription.save()
replies = StatusHandler.test('APPT STATUS foo bar MISSED', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('does not match an active subscription' in reply)
class MoveHandlerTestCase(OccurrenceDataTestCase):
"Keyword handler for rescheduling of occurrences."
def setUp(self):
self.timeline = self.create_timeline(name='Test', slug='foo')
self.connection = self.create_connection()
self.subscription = self.create_timeline_subscription(
timeline=self.timeline, connection=self.connection, pin='bar')
MoveHandler._mock_backend = self.connection.backend
self.milestone = self.create_milestone(timeline=self.timeline)
self.occurrence = self.create_occurrence(milestone=self.milestone,
date=now() + timedelta(hours=1))
self.tomorrow = (now() + timedelta(days=1)).strftime('%Y-%m-%d')
self.prefix = AppointmentHandler.prefix
def test_help(self):
"Prefix and keyword should return the help."
replies = MoveHandler.test('APPT MOVE')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(self.prefix + ' MOVE <KEY> <NAME/ID> <DATE>' in reply)
def test_occurrence_reschedule(self):
"Successfully reschedule an upcoming occurrence."
self.assertEqual(1, Occurrence.objects.all().count())
replies = MoveHandler.test('APPT MOVE foo bar %s' % self.tomorrow,
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'))
self.assertEqual(2, Occurrence.objects.all().count())
reschedule = Occurrence.objects.all()[0]
self.assertEqual(reschedule.occurrences.all()[0], self.occurrence)
def test_occurrence_reschedule_malformed_date(self):
"Ensure the date is properly formatted."
replies = MoveHandler.test('APPT MOVE foo bar tomorrow',
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry, we cannot understand that date format'))
def test_occurrence_reschedule_future_date(self):
"Ensure the date must be in the future."
yesterday = (now() - timedelta(days=1)).strftime('%Y-%m-%d')
replies = MoveHandler.test('APPT MOVE foo bar %s' % yesterday,
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry, the reschedule date'))
def test_no_future_occurrence(self):
"Matched user has no future occurrence."
self.occurrence.delete()
replies = MoveHandler.test('APPT MOVE foo bar %s' % self.tomorrow,
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('no future occurrences' in reply)
def test_no_future_occurrence_needing_update(self):
"Matched user has no future occurrence that needs rescheduling."
reschedule = self.create_occurrence(subscription=self.subscription,
milestone=self.milestone)
self.occurrence.reschedule = reschedule
self.occurrence.save()
replies = MoveHandler.test('APPT MOVE foo bar %s' % self.tomorrow,
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('no future occurrences' in reply)
def test_prior_occurrence(self):
"Matched user has no future occurrence."
self.occurrence.date = self.occurrence.date - timedelta(days=1)
self.occurrence.save()
replies = MoveHandler.test('APPT MOVE foo bar %s' % self.tomorrow,
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('no future occurrences' in reply)
def test_no_subscription(self):
"Name/ID does not match a subscription."
self.subscription.delete()
replies = MoveHandler.test('APPT MOVE foo bar %s' % self.tomorrow,
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('does not match an active subscription' in reply)
def test_subscription_ended(self):
"Name/ID subscription has ended."
self.subscription.end = now()
self.subscription.save()
replies = MoveHandler.test('APPT MOVE foo bar %s' % self.tomorrow,
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('does not match an active subscription' in reply)
class QuitHandlerTestCase(OccurrenceDataTestCase):
"Keyword handler for unsubscribing users to timelines"
def setUp(self):
self.timeline = self.create_timeline(name='Test', slug='foo')
self.connection = self.create_connection()
self.subscription = self.create_timeline_subscription(
timeline=self.timeline, connection=self.connection, pin='bar')
QuitHandler._mock_backend = self.connection.backend
self.prefix = AppointmentHandler.prefix
def test_help(self):
"Prefix and keyword should return the help for quitting a subscription."
replies = QuitHandler.test('APPT QUIT', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(self.prefix + ' QUIT <KEY> <NAME/ID> <DATE>' in reply)
def test_match(self):
"Send a successful match to end a timeline subscription."
replies = QuitHandler.test('APPT QUIT foo bar', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'), reply)
def test_match_with_date(self):
"Use end date if given."
end = (now() + timedelta(hours=1)).strftime('%Y-%m-%d')
replies = QuitHandler.test('APPT QUIT foo bar %s' % end,
identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'), reply)
self.assertEqual(end, TimelineSubscription.objects.all()[0].end.strftime('%Y-%m-%d'))
def test_no_keyword_match(self):
"Keyword does not match any existing timelines."
self.timeline.delete()
replies = QuitHandler.test('APPT QUIT foo bar', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
def test_no_name_given(self):
"No name is given."
replies = QuitHandler.test('APPT QUIT foo', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
def test_invalid_date_format_given(self):
"Invalid date format."
replies = QuitHandler.test('APPT QUIT foo bar baz', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
def test_already_quit(self):
"Attempting to unsubscribe and already unsubcribed connection/name pair."
self.subscription.end = now()
self.subscription.save()
replies = QuitHandler.test('APPT QUIT foo bar', identity=self.connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
class SubscribeHandlerTestCase(OccurrenceDataTestCase):
"Keyword handler for subscribing users to timelines"
def setUp(self):
self.timeline = self.create_timeline(name='Test', slug='foo')
def test_help(self):
"Keyword should return the help for adding subscriptions."
replies = SubscribeHandler.test('SUBSCRIBE')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue('SUBSCRIBE <KEY> <NAME/ID> <DATE>' in reply)
def test_match(self):
"Send a successful match to create user timeline subscription."
replies = SubscribeHandler.test('SUBSCRIBE foo bar')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'), reply)
def test_match_with_date(self):
"Use start date if given."
tomorrow = (now() + timedelta(days=1)).strftime('%Y-%m-%d')
replies = SubscribeHandler.test('SUBSCRIBE foo bar %s' % tomorrow)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'), reply)
def test_match_with_relative_date(self):
"Use relative start date if given."
two_weeks_ago = "two weeks ago"
replies = SubscribeHandler.test('SUBSCRIBE foo bar %s' % two_weeks_ago)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Thank you'), reply)
def test_no_keyword_match(self):
"Keyword does not match any existing timelines."
self.timeline.delete()
replies = SubscribeHandler.test('SUBSCRIBE foo bar')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
def test_no_name_given(self):
"No name is given."
replies = SubscribeHandler.test('SUBSCRIBE foo')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
def test_invalid_date_format_given(self):
"Invalid date format."
replies = SubscribeHandler.test('SUBSCRIBE foo bar baz')
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
def test_already_subscribed(self):
"Attempting to register and already registered connection/name pair."
connection = self.create_connection()
SubscribeHandler._mock_backend = connection.backend
self.create_timeline_subscription(timeline=self.timeline,
connection=connection, pin='bar')
replies = SubscribeHandler.test('SUBSCRIBE foo bar', identity=connection.identity)
self.assertEqual(len(replies), 1)
reply = replies[0]
self.assertTrue(reply.startswith('Sorry'))
del SubscribeHandler._mock_backend
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parent client for calling the Google Cloud Bigtable API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`Client` owns an :class:`.Instance`
* a :class:`.Instance` owns a :class:`Table <gcloud.bigtable.table.Table>`
* a :class:`Table <gcloud.bigtable.table.Table>` owns a
:class:`ColumnFamily <.column_family.ColumnFamily>`
* a :class:`Table <gcloud.bigtable.table.Table>` owns a :class:`Row <.row.Row>`
(and all the cells in the row)
"""
from pkg_resources import get_distribution
from grpc.beta import implementations
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as instance_admin_v2_pb2)
# V1 table admin service
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
# V1 data service
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
operations_grpc_pb2 as operations_grpc_v2_pb2)
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
from gcloud.bigtable.instance import Instance
from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID
from gcloud.client import _ClientFactoryMixin
from gcloud.client import _ClientProjectMixin
from gcloud.credentials import get_credentials
TABLE_STUB_FACTORY_V2 = (
table_admin_v2_pb2.beta_create_BigtableTableAdmin_stub)
TABLE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com'
"""Table Admin API request host."""
TABLE_ADMIN_PORT_V2 = 443
"""Table Admin API request port."""
INSTANCE_STUB_FACTORY_V2 = (
instance_admin_v2_pb2.beta_create_BigtableInstanceAdmin_stub)
INSTANCE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com'
"""Cluster Admin API request host."""
INSTANCE_ADMIN_PORT_V2 = 443
"""Cluster Admin API request port."""
DATA_STUB_FACTORY_V2 = data_v2_pb2.beta_create_Bigtable_stub
DATA_API_HOST_V2 = 'bigtable.googleapis.com'
"""Data API request host."""
DATA_API_PORT_V2 = 443
"""Data API request port."""
OPERATIONS_STUB_FACTORY_V2 = operations_grpc_v2_pb2.beta_create_Operations_stub
OPERATIONS_API_HOST_V2 = INSTANCE_ADMIN_HOST_V2
OPERATIONS_API_PORT_V2 = INSTANCE_ADMIN_PORT_V2
ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin'
"""Scope for interacting with the Cluster Admin and Table Admin APIs."""
DATA_SCOPE = 'https://www.googleapis.com/auth/bigtable.data'
"""Scope for reading and writing table data."""
READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly'
"""Scope for reading table data."""
DEFAULT_TIMEOUT_SECONDS = 10
"""The default timeout to use for API requests."""
DEFAULT_USER_AGENT = 'gcloud-python/{0}'.format(
get_distribution('gcloud').version)
"""The default user agent for API requests."""
class Client(_ClientFactoryMixin, _ClientProjectMixin):
"""Client for interacting with Google Cloud Bigtable API.
.. note::
Since the Cloud Bigtable API requires the gRPC transport, no
``http`` argument is accepted by this class.
:type project: :class:`str` or :func:`unicode <unicode>`
:param project: (Optional) The ID of the project which owns the
instances, tables and data. If not provided, will
attempt to determine from the environment.
:type credentials:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>` or
:data:`NoneType <types.NoneType>`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not provided, defaults to the Google
Application Default Credentials.
:type read_only: bool
:param read_only: (Optional) Boolean indicating if the data scope should be
for reading only (or for writing as well). Defaults to
:data:`False`.
:type admin: bool
:param admin: (Optional) Boolean indicating if the client will be used to
interact with the Instance Admin or Table Admin APIs. This
requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`.
:type user_agent: str
:param user_agent: (Optional) The user agent to be used with API request.
Defaults to :const:`DEFAULT_USER_AGENT`.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out. If not
passed, defaults to
:const:`DEFAULT_TIMEOUT_SECONDS`.
:raises: :class:`ValueError <exceptions.ValueError>` if both ``read_only``
and ``admin`` are :data:`True`
"""
def __init__(self, project=None, credentials=None,
read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT,
timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
_ClientProjectMixin.__init__(self, project=project)
if credentials is None:
credentials = get_credentials()
if read_only and admin:
raise ValueError('A read-only client cannot also perform'
'administrative actions.')
scopes = []
if read_only:
scopes.append(READ_ONLY_SCOPE)
else:
scopes.append(DATA_SCOPE)
if admin:
scopes.append(ADMIN_SCOPE)
self._admin = bool(admin)
try:
credentials = credentials.create_scoped(scopes)
except AttributeError:
pass
self._credentials = credentials
self.user_agent = user_agent
self.timeout_seconds = timeout_seconds
# These will be set in start().
self._data_stub_internal = None
self._instance_stub_internal = None
self._operations_stub_internal = None
self._table_stub_internal = None
def copy(self):
"""Make a copy of this client.
Copies the local data stored as simple types but does not copy the
current state of any open connections with the Cloud Bigtable API.
:rtype: :class:`.Client`
:returns: A copy of the current client.
"""
credentials = self._credentials
copied_creds = credentials.create_scoped(credentials.scopes)
return self.__class__(
self.project,
copied_creds,
READ_ONLY_SCOPE in copied_creds.scopes,
self._admin,
self.user_agent,
self.timeout_seconds,
)
@property
def credentials(self):
"""Getter for client's credentials.
:rtype:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>`
:returns: The credentials stored on the client.
"""
return self._credentials
@property
def project_name(self):
"""Project name to be used with Instance Admin API.
.. note::
This property will not change if ``project`` does not, but the
return value is not cached.
The project name is of the form
``"projects/{project}"``
:rtype: str
:returns: The project name to be used with the Cloud Bigtable Admin
API RPC service.
"""
return 'projects/' + self.project
@property
def _data_stub(self):
"""Getter for the gRPC stub used for the Data API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client has not been :meth:`start`-ed.
"""
if self._data_stub_internal is None:
raise ValueError('Client has not been started.')
return self._data_stub_internal
@property
def _instance_stub(self):
"""Getter for the gRPC stub used for the Instance Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if not self._admin:
raise ValueError('Client is not an admin client.')
if self._instance_stub_internal is None:
raise ValueError('Client has not been started.')
return self._instance_stub_internal
@property
def _operations_stub(self):
"""Getter for the gRPC stub used for the Operations API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if not self._admin:
raise ValueError('Client is not an admin client.')
if self._operations_stub_internal is None:
raise ValueError('Client has not been started.')
return self._operations_stub_internal
@property
def _table_stub(self):
"""Getter for the gRPC stub used for the Table Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if not self._admin:
raise ValueError('Client is not an admin client.')
if self._table_stub_internal is None:
raise ValueError('Client has not been started.')
return self._table_stub_internal
def _make_data_stub(self):
"""Creates gRPC stub to make requests to the Data API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, DATA_STUB_FACTORY_V2,
DATA_API_HOST_V2, DATA_API_PORT_V2)
def _make_instance_stub(self):
"""Creates gRPC stub to make requests to the Instance Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, INSTANCE_STUB_FACTORY_V2,
INSTANCE_ADMIN_HOST_V2, INSTANCE_ADMIN_PORT_V2)
def _make_operations_stub(self):
"""Creates gRPC stub to make requests to the Operations API.
These are for long-running operations of the Instance Admin API,
hence the host and port matching.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, OPERATIONS_STUB_FACTORY_V2,
OPERATIONS_API_HOST_V2, OPERATIONS_API_PORT_V2)
def _make_table_stub(self):
"""Creates gRPC stub to make requests to the Table Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, TABLE_STUB_FACTORY_V2,
TABLE_ADMIN_HOST_V2, TABLE_ADMIN_PORT_V2)
def is_started(self):
"""Check if the client has been started.
:rtype: bool
:returns: Boolean indicating if the client has been started.
"""
return self._data_stub_internal is not None
def start(self):
"""Prepare the client to make requests.
Activates gRPC contexts for making requests to the Bigtable
Service(s).
"""
if self.is_started():
return
# NOTE: We __enter__ the stubs more-or-less permanently. This is
# because only after entering the context managers is the
# connection created. We don't want to immediately close
# those connections since the client will make many
# requests with it over HTTP/2.
self._data_stub_internal = self._make_data_stub()
self._data_stub_internal.__enter__()
if self._admin:
self._instance_stub_internal = self._make_instance_stub()
self._operations_stub_internal = self._make_operations_stub()
self._table_stub_internal = self._make_table_stub()
self._instance_stub_internal.__enter__()
self._operations_stub_internal.__enter__()
self._table_stub_internal.__enter__()
def __enter__(self):
"""Starts the client as a context manager."""
self.start()
return self
def stop(self):
"""Closes all the open gRPC clients."""
if not self.is_started():
return
# When exit-ing, we pass None as the exception type, value and
# traceback to __exit__.
self._data_stub_internal.__exit__(None, None, None)
if self._admin:
self._instance_stub_internal.__exit__(None, None, None)
self._operations_stub_internal.__exit__(None, None, None)
self._table_stub_internal.__exit__(None, None, None)
self._data_stub_internal = None
self._instance_stub_internal = None
self._operations_stub_internal = None
self._table_stub_internal = None
def __exit__(self, exc_type, exc_val, exc_t):
"""Stops the client as a context manager."""
self.stop()
def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID,
display_name=None, serve_nodes=DEFAULT_SERVE_NODES):
"""Factory to create a instance associated with this client.
:type instance_id: str
:param instance_id: The ID of the instance.
:type location: string
:param location: location name, in form
``projects/<project>/locations/<location>``; used to
set up the instance's cluster.
:type display_name: str
:param display_name: (Optional) The display name for the instance in
the Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type serve_nodes: int
:param serve_nodes: (Optional) The number of nodes in the instance's
cluster; used to set up the instance's cluster.
:rtype: :class:`.Instance`
:returns: an instance owned by this client.
"""
return Instance(instance_id, self, location,
display_name=display_name, serve_nodes=serve_nodes)
def list_instances(self):
"""List instances owned by the project.
:rtype: tuple
:returns: A pair of results, the first is a list of
:class:`.Instance` objects returned and the second is a
list of strings (the failed locations in the request).
"""
request_pb = instance_admin_v2_pb2.ListInstancesRequest(
parent=self.project_name)
response = self._instance_stub.ListInstances(
request_pb, self.timeout_seconds)
instances = [Instance.from_pb(instance_pb, self)
for instance_pb in response.instances]
return instances, response.failed_locations
class _MetadataPlugin(object):
"""Callable class to transform metadata for gRPC requests.
:type client: :class:`.client.Client`
:param client: The client that owns the instance.
Provides authorization and user agent.
"""
def __init__(self, client):
self._credentials = client.credentials
self._user_agent = client.user_agent
def __call__(self, unused_context, callback):
"""Adds authorization header to request metadata."""
access_token = self._credentials.get_access_token().access_token
headers = [
('Authorization', 'Bearer ' + access_token),
('User-agent', self._user_agent),
]
callback(headers, None)
def _make_stub(client, stub_factory, host, port):
"""Makes a stub for an RPC service.
Uses / depends on the beta implementation of gRPC.
:type client: :class:`.client.Client`
:param client: The client that owns the instance.
Provides authorization and user agent.
:type stub_factory: callable
:param stub_factory: A factory which will create a gRPC stub for
a given service.
:type host: str
:param host: The host for the service.
:type port: int
:param port: The port for the service.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: The stub object used to make gRPC requests to a given API.
"""
# Leaving the first argument to ssl_channel_credentials() as None
# loads root certificates from `grpc/_adapter/credentials/roots.pem`.
transport_creds = implementations.ssl_channel_credentials(None, None, None)
custom_metadata_plugin = _MetadataPlugin(client)
auth_creds = implementations.metadata_call_credentials(
custom_metadata_plugin, name='google_creds')
channel_creds = implementations.composite_channel_credentials(
transport_creds, auth_creds)
channel = implementations.secure_channel(host, port, channel_creds)
return stub_factory(channel)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2017-04
import base64
import os
from typing import ByteString, List, Tuple, Generator
from PyQt5.QtWidgets import QMessageBox
from cryptography.fernet import Fernet, InvalidToken
from app_defs import get_note_url
from app_utils import SHA256, write_bytes_buf, write_int_list_buf, read_bytes_from_file, read_int_list_from_file
from common import CancelException
from dash_utils import num_to_varint, read_varint_from_file, bip32_path_n_to_string
from hw_common import HWType, HWNotConnectedException
from hw_intf import hw_sign_message, get_address_and_pubkey, HwSessionInfo
from wnd_utils import WndUtils
DMT_ENCRYPTED_DATA_PREFIX = b'DMTEF'
ENC_FILE_BLOCK_SIZE = 1000000
def prepare_hw_encryption_attrs(hw_session: HwSessionInfo, label: str) -> \
Tuple[int, int, List[int], bytes, bytes, bytes]:
"""
:param hw_session:
:param label:
:return: 0: protocol id
1: hw type id (see below)
2: bip32 path to the entryption key
3: encryption key hash
4: encryption key binary
5: pub key hash of the encryption key
"""
# generate a new random password which will be used to encrypt with Trezor method + Fernet
protocol = 1
hw_type_bin = {
HWType.trezor: 1,
HWType.keepkey: 2,
HWType.ledger_nano: 3
}[hw_session.hw_type]
key = Fernet.generate_key() # encryption key
key_bin = base64.urlsafe_b64decode(key)
bip32_path_n = [10, 100, 1000]
if hw_session.hw_type in (HWType.trezor, HWType.keepkey):
# for trezor method, for encryption we use the raw key and the key encrypted with a device
# will be part of a header
encrypted_key_bin, pub_key = hw_session.hw_encrypt_value(bip32_path_n, label=label, value=key_bin)
pub_key_hash = SHA256.new(pub_key).digest()
return protocol, hw_type_bin, bip32_path_n, key, encrypted_key_bin, pub_key_hash
elif hw_session.hw_type == HWType.ledger_nano:
# Ledger Nano S does not have encryption/decryption features, so for encryption and decryption will use
# a hash of a signed message, where the message the raw key itself;
# The raw key will be part of the encrypted header.
display_label = f'<b>Click the sign message confirmation button on the <br>hardware wallet to ' \
f'encrypt \'{label}\'.</b>'
bip32_path_str = bip32_path_n_to_string(bip32_path_n)
sig = hw_sign_message(hw_session, 'Dash', bip32_path_str, key_bin.hex(), display_label=display_label)
adr_pk = get_address_and_pubkey(hw_session, 'Dash', bip32_path_str)
pub_key_hash = SHA256.new(adr_pk.get('publicKey')).digest()
enc_key_hash = SHA256.new(sig.signature).digest()
enc_key_hash = base64.urlsafe_b64encode(enc_key_hash)
return protocol, hw_type_bin, bip32_path_n, enc_key_hash, key_bin, pub_key_hash
def write_file_encrypted(file_name: str, hw_session: HwSessionInfo, data: bytes):
label = os.path.basename(file_name)
if not hw_session.connect_hardware_wallet():
raise Exception('Not connected to hardware wallet.')
protocol, hw_type_bin, bip32_path_n, encryption_key, encrypted_key_bin, pub_key_hash = \
prepare_hw_encryption_attrs(hw_session, label)
fer = Fernet(encryption_key)
with open(file_name, 'wb') as f_ptr:
header = DMT_ENCRYPTED_DATA_PREFIX + \
num_to_varint(protocol) + num_to_varint(hw_type_bin) + \
write_bytes_buf(bytearray(base64.b64encode(bytearray(label, 'utf-8')))) + \
write_bytes_buf(encrypted_key_bin) + \
write_int_list_buf(bip32_path_n) + \
write_bytes_buf(pub_key_hash)
f_ptr.write(header)
# slice the input data into ENC_FILE_BLOCK_SIZE-byte chunks, encrypt them and
# write to file; each block will be preceded with the length of the encrypted
# data chunk size
begin_idx = 0
while True:
data_left = len(data) - begin_idx
if data_left <= 0:
break
cur_input_chunk_size = min(ENC_FILE_BLOCK_SIZE, data_left)
data_enc_base64 = fer.encrypt(data[begin_idx: begin_idx + cur_input_chunk_size])
data_enc = base64.urlsafe_b64decode(data_enc_base64)
cur_chunk_size_bin = len(data_enc).to_bytes(8, byteorder='little') # write the size of the data chunk
f_ptr.write(cur_chunk_size_bin) # write the data
f_ptr.write(data_enc) # write the data
begin_idx += cur_input_chunk_size
def read_file_encrypted(file_name: str, ret_attrs: dict, hw_session: HwSessionInfo) -> Generator[bytes, None, None]:
ret_attrs['encrypted'] = False
try:
hw_session.save_state()
with open(file_name, 'rb') as f_ptr:
data = f_ptr.read(len(DMT_ENCRYPTED_DATA_PREFIX))
if data == DMT_ENCRYPTED_DATA_PREFIX:
ret_attrs['encrypted'] = True
protocol = read_varint_from_file(f_ptr)
if protocol == 1: # with Trezor method + Fernet
hw_type_bin = read_varint_from_file(f_ptr)
hw_type = {
1: HWType.trezor,
2: HWType.keepkey,
3: HWType.ledger_nano
}.get(hw_type_bin)
if hw_type:
# connect hardware wallet, choosing the type compatible with the type read from
# the encrypted file
if hw_session.hw_client:
if (hw_type in (HWType.trezor, HWType.keepkey) and
hw_session.hw_type not in (HWType.trezor, HWType.keepkey)) or \
(hw_type == HWType.ledger_nano and hw_type != hw_session.hw_type):
# if the currently connected hardware wallet type is not compatible with the
# type from the encrypted file, disconnect it to give a user a chance to choose
# the correct one in the code below
hw_session.disconnect_hardware_wallet()
if not hw_session.hw_client:
if hw_type in (HWType.trezor, HWType.keepkey):
hw_session.set_hw_types_allowed((HWType.trezor, HWType.keepkey))
else:
hw_session.set_hw_types_allowed((hw_type,))
if not hw_session.connect_hardware_wallet():
raise HWNotConnectedException(
f'This file was encrypted with {HWType.get_desc(hw_type)} hardware wallet, '
f'which has to be connected to the computer decrypt the file.')
data_label_bin = read_bytes_from_file(f_ptr)
label = base64.urlsafe_b64decode(data_label_bin).decode('utf-8')
encrypted_key_bin = read_bytes_from_file(f_ptr)
bip32_path_n = read_int_list_from_file(f_ptr)
pub_key_hash_hdr = read_bytes_from_file(f_ptr)
while True:
if not hw_session.hw_client:
raise HWNotConnectedException(
f'This file was encrypted with {HWType.get_desc(hw_type)} hardware wallet, '
f'which has to be connected to the computer decrypt the file.')
if hw_session.hw_type in (HWType.trezor, HWType.keepkey):
key_bin, pub_key = hw_session.hw_decrypt_value(
bip32_path_n, label=label, value=encrypted_key_bin)
elif hw_session.hw_type == HWType.ledger_nano:
display_label = f'<b>Click the sign message confirmation button on the <br>' \
f'hardware wallet to decrypt \'{label}\'.</b>'
bip32_path_str = bip32_path_n_to_string(bip32_path_n)
sig = hw_sign_message(hw_session, 'Dash', bip32_path_str, encrypted_key_bin.hex(),
display_label=display_label)
adr_pk = get_address_and_pubkey(hw_session, 'Dash', bip32_path_str)
pub_key = adr_pk.get('publicKey')
key_bin = SHA256.new(sig.signature).digest()
else:
raise Exception('Invalid hardware wallet type.')
pub_key_hash = SHA256.new(pub_key).digest()
if pub_key_hash_hdr == pub_key_hash:
break
url = get_note_url('DMT0003')
if WndUtils.query_dlg(
message='Inconsistency between encryption and decryption keys.\n\n'
'The reason may be using a different passphrase than it was used '
'for encryption or running another application communicating with the '
'device simultaneously, like Trezor web wallet (see <a href="{url}">'
'here</a>).\n\n'
'Do you want to try again?',
buttons=QMessageBox.Yes | QMessageBox.Cancel,
default_button=QMessageBox.Cancel, icon=QMessageBox.Warning) == QMessageBox.Cancel:
raise CancelException('User cancelled.')
hw_session.disconnect_hardware_wallet()
hw_session.connect_hardware_wallet()
key = base64.urlsafe_b64encode(key_bin)
fer = Fernet(key)
while True:
# data is written in blocks; if front of each block there is a block size value
data_bin = f_ptr.read(8)
if len(data_bin) == 0:
break # end of file
elif len(data_bin) < 8:
raise ValueError('File end before read completed.')
data_chunk_size = int.from_bytes(data_bin, byteorder='little')
if data_chunk_size < 0 or data_chunk_size > 2000000000:
raise ValueError('Data corrupted: invalid data chunk size.')
data_bin = f_ptr.read(data_chunk_size)
if data_chunk_size != len(data_bin):
raise ValueError('File end before read completed.')
data_base64 = base64.urlsafe_b64encode(data_bin)
try:
data_decr = fer.decrypt(data_base64)
except InvalidToken:
raise Exception('Couldn\'t decrypt file (InvalidToken error). The file is probably '
'corrupted or is encrypted with a different encryption method.')
yield data_decr
else:
raise ValueError('Invalid hardware wallet type value.')
else:
raise ValueError('Invalid protocol value.')
else:
# the data inside the file isn't encrypted
# read and yield raw data
while True:
# data is written in blocks; if front of each block there is a block size value
data += f_ptr.read(ENC_FILE_BLOCK_SIZE)
if not len(data):
break
yield data
data = bytes()
finally:
hw_session.restore_state()
|
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from nose import tools as nt
from neurom.core.tree import Tree
REF_TREE = Tree()
T11 = REF_TREE.add_child(Tree())
T12 = REF_TREE.add_child(Tree())
T111 = REF_TREE.children[0].add_child(Tree())
T112 = REF_TREE.children[0].add_child(Tree())
T121 = REF_TREE.children[1].add_child(Tree())
T122 = REF_TREE.children[1].add_child(Tree())
T1211 = REF_TREE.children[1].children[0].add_child(Tree())
T12111 = REF_TREE.children[1].children[0].children[0].add_child(Tree())
T12112 = REF_TREE.children[1].children[0].children[0].add_child(Tree())
REF_TREE2 = Tree()
T11_ = REF_TREE2.add_child(Tree())
T12_ = REF_TREE2.add_child(Tree())
T111_ = REF_TREE2.children[0].add_child(Tree())
T112_ = REF_TREE2.children[0].add_child(Tree())
T121_ = REF_TREE2.children[1].add_child(Tree())
T122_ = REF_TREE2.children[1].add_child(Tree())
T1211_ = REF_TREE2.children[1].children[0].add_child(Tree())
T12111_ = REF_TREE2.children[1].children[0].children[0].add_child(Tree())
T12112_ = REF_TREE2.children[1].children[0].children[0].add_child(Tree())
T1111_ = REF_TREE2.children[0].children[0].add_child(Tree())
T11111_ = T1111_.add_child(Tree())
T11112_ = T1111_.add_child(Tree())
T11113_ = T1111_.add_child(Tree())
def test_instantiate_tree():
t = Tree()
nt.ok_(t.parent is None)
nt.ok_(len(t.children) == 0)
def test_add_child():
t = Tree()
ch11 = t.add_child(Tree())
ch22 = t.add_child(Tree())
nt.ok_(len(t.children) == 2)
nt.ok_(t.children == [ch11, ch22])
def test_parent():
t = Tree()
for i in range(10):
t.add_child(Tree())
nt.ok_(len(t.children) == 10)
for c in t.children:
nt.ok_(c.parent is t)
def test_is_root_true():
t = Tree()
nt.ok_(Tree.is_root(t))
nt.ok_(t.is_root())
def test_is_root_false():
t = Tree()
t.add_child(Tree())
nt.ok_(not t.children[0].is_root())
def test_is_leaf():
nt.ok_(Tree().is_leaf())
def test_is_leaf_false():
t = Tree()
t.add_child(Tree())
nt.ok_(not t.is_leaf())
def test_is_forking_point():
t = Tree()
t.add_child(Tree())
t.add_child(Tree())
nt.ok_(t.is_forking_point())
t.add_child(Tree())
nt.ok_(t.is_forking_point())
def test_is_forking_point_false():
t = Tree()
nt.ok_(not t.is_forking_point())
t.add_child(Tree())
nt.ok_(not t.is_forking_point())
def test_is_bifurcation_point():
t = Tree()
t.add_child(Tree())
t.add_child(Tree())
nt.ok_(t.is_bifurcation_point())
def test_is_bifurcation_point_false():
t = Tree()
nt.ok_(not t.is_bifurcation_point())
t.add_child(Tree())
nt.ok_(not t.is_bifurcation_point())
t.add_child(Tree())
t.add_child(Tree())
nt.ok_(not t.is_bifurcation_point())
def test_deep_iteration():
root = t = Tree()
for i in range(1, sys.getrecursionlimit() + 2):
child = Tree()
t.add_child(child)
t = child
list(root.ipreorder())
list(root.ipostorder())
list(t.iupstream())
def test_preorder_iteration():
nt.ok_(list(REF_TREE.ipreorder()) ==
[REF_TREE, T11, T111, T112, T12, T121, T1211, T12111, T12112, T122])
nt.ok_(list(REF_TREE.children[0].ipreorder()) == [T11, T111, T112])
nt.ok_(list(REF_TREE.children[1].ipreorder()) ==
[T12, T121, T1211, T12111, T12112, T122])
def test_postorder_iteration():
nt.ok_(list(REF_TREE.ipostorder()) ==
[T111, T112, T11, T12111, T12112, T1211, T121, T122, T12, REF_TREE])
nt.ok_(list(REF_TREE.children[0].ipostorder()) == [T111, T112, T11])
nt.ok_(list(REF_TREE.children[1].ipostorder()) ==
[T12111, T12112, T1211, T121, T122, T12])
def test_upstream_iteration():
nt.ok_(list(REF_TREE.iupstream()) == [REF_TREE])
nt.ok_(list(REF_TREE.children[0].iupstream()) == [T11, REF_TREE])
nt.ok_(list(REF_TREE.children[0].children[0].iupstream()) ==
[T111, T11, REF_TREE])
nt.ok_(list(REF_TREE.children[0].children[1].iupstream()) ==
[T112, T11, REF_TREE])
nt.ok_(list(REF_TREE.children[1].iupstream()) == [T12, REF_TREE])
nt.ok_(list(REF_TREE.children[1].children[0].iupstream()) ==
[T121, T12, REF_TREE])
nt.ok_(list(REF_TREE.children[1].children[1].iupstream()) ==
[T122, T12, REF_TREE])
def test_leaf_iteration():
ileaf = Tree.ileaf
nt.ok_(list(ileaf(REF_TREE)) == [T111, T112, T12111, T12112, T122])
nt.ok_(list(ileaf(REF_TREE.children[0])) == [T111, T112])
nt.ok_(list(ileaf(REF_TREE.children[1])) == [T12111, T12112, T122])
nt.ok_(list(ileaf(REF_TREE.children[0].children[0])) == [T111])
nt.ok_(list(ileaf(REF_TREE.children[0].children[1])) == [T112])
nt.ok_(list(ileaf(REF_TREE.children[1].children[0])) == [T12111, T12112])
nt.ok_(list(ileaf(REF_TREE.children[1].children[1])) == [T122])
def test_iforking_point():
nt.assert_equal([n for n in REF_TREE2.iforking_point()],
[REF_TREE2, T11_, T1111_, T12_, T1211_])
def test_iforking_point_postorder():
nt.assert_equal([n for n in REF_TREE2.iforking_point(Tree.ipostorder)],
[T1111_, T11_, T1211_, T12_, REF_TREE2])
def test_iforking_point_upstream():
ileaf = Tree.ileaf
leaves = [l for l in ileaf(REF_TREE2)]
ref_paths = [
[T1111_, T11_, REF_TREE2], [T1111_, T11_, REF_TREE2], [T1111_, T11_, REF_TREE2],
[T11_, REF_TREE2], [T1211_, T12_, REF_TREE2], [T1211_, T12_, REF_TREE2],
[T12_, REF_TREE2]
]
for l, ref in zip(leaves, ref_paths):
nt.assert_equal([s for s in l.iforking_point(Tree.iupstream)], ref)
def test_ibifurcation_point():
nt.assert_equal([n for n in REF_TREE2.ibifurcation_point()],
[REF_TREE2, T11_, T12_, T1211_])
def test_ibifurcation_point_postorder():
nt.assert_equal([n for n in REF_TREE2.ibifurcation_point(Tree.ipostorder)],
[T11_, T1211_, T12_, REF_TREE2])
def test_ibifurcation_point_upstream():
leaves = [l for l in REF_TREE2.ileaf()]
ref_paths = [
[T11_, REF_TREE2], [T11_, REF_TREE2], [T11_, REF_TREE2], [T11_, REF_TREE2],
[T1211_, T12_, REF_TREE2], [T1211_, T12_, REF_TREE2], [T12_, REF_TREE2]
]
for l, ref in zip(leaves, ref_paths):
nt.assert_equal([s for s in l.ibifurcation_point(Tree.iupstream)], ref)
def test_valiter_bifurcation_point():
nt.ok_(list(REF_TREE2.ibifurcation_point()) ==
[REF_TREE2, T11_, T12_, T1211_])
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import exceptions
from tempest import test
from tempest.thirdparty.boto import test as boto_test
from tempest.thirdparty.boto.utils import s3
from tempest.thirdparty.boto.utils import wait
CONF = config.CONF
LOG = logging.getLogger(__name__)
class InstanceRunTest(boto_test.BotoTestCase):
@classmethod
def setup_clients(cls):
super(InstanceRunTest, cls).setup_clients()
cls.s3_client = cls.os.s3_client
cls.ec2_client = cls.os.ec2api_client
@classmethod
def resource_setup(cls):
super(InstanceRunTest, cls).resource_setup()
if not cls.conclusion['A_I_IMAGES_READY']:
raise cls.skipException("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
cls.zone = CONF.boto.aws_zone
cls.materials_path = CONF.boto.s3_materials_path
ami_manifest = CONF.boto.ami_manifest
aki_manifest = CONF.boto.aki_manifest
ari_manifest = CONF.boto.ari_manifest
cls.instance_type = CONF.boto.instance_type
cls.bucket_name = data_utils.rand_name("s3bucket")
cls.keypair_name = data_utils.rand_name("keypair")
cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
cls.keypair_name)
bucket = cls.s3_client.create_bucket(cls.bucket_name)
cls.addResourceCleanUp(cls.destroy_bucket,
cls.s3_client.connection_data,
cls.bucket_name)
s3.s3_upload_dir(bucket, cls.materials_path)
cls.images = {"ami":
{"name": data_utils.rand_name("ami-name"),
"location": cls.bucket_name + "/" + ami_manifest},
"aki":
{"name": data_utils.rand_name("aki-name"),
"location": cls.bucket_name + "/" + aki_manifest},
"ari":
{"name": data_utils.rand_name("ari-name"),
"location": cls.bucket_name + "/" + ari_manifest}}
for image in cls.images.itervalues():
image["image_id"] = cls.ec2_client.register_image(
name=image["name"],
image_location=image["location"])
cls.addResourceCleanUp(cls.ec2_client.deregister_image,
image["image_id"])
for image in cls.images.itervalues():
def _state():
retr = cls.ec2_client.get_image(image["image_id"])
return retr.state
state = wait.state_wait(_state, "available")
if state != "available":
for _image in cls.images.itervalues():
cls.ec2_client.deregister_image(_image["image_id"])
raise exceptions.EC2RegisterImageException(
image_id=image["image_id"])
def _terminate_reservation(self, reservation, rcuk):
for instance in reservation.instances:
instance.terminate()
for instance in reservation.instances:
self.assertInstanceStateWait(instance, '_GONE')
self.cancelResourceCleanUp(rcuk)
@test.idempotent_id('c881fbb7-d56e-4054-9d76-1c3a60a207b0')
def test_run_idempotent_instances(self):
# EC2 run instances idempotently
def _run_instance(client_token):
reservation = self.ec2_client.run_instances(
image_id=self.images["ami"]["image_id"],
kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
client_token=client_token)
rcuk = self.addResourceCleanUp(self.destroy_reservation,
reservation)
return (reservation, rcuk)
reservation_1, rcuk_1 = _run_instance('token_1')
reservation_2, rcuk_2 = _run_instance('token_2')
reservation_1a, rcuk_1a = _run_instance('token_1')
self.assertIsNotNone(reservation_1)
self.assertIsNotNone(reservation_2)
self.assertIsNotNone(reservation_1a)
# same reservation for token_1
self.assertEqual(reservation_1.id, reservation_1a.id)
# Cancel cleanup -- since it's a duplicate, it's
# handled by rcuk1
self.cancelResourceCleanUp(rcuk_1a)
self._terminate_reservation(reservation_1, rcuk_1)
self._terminate_reservation(reservation_2, rcuk_2)
@test.idempotent_id('2ea26a39-f96c-48fc-8374-5c10ec184c67')
def test_run_stop_terminate_instance(self):
# EC2 run, stop and terminate instance
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
self._terminate_reservation(reservation, rcuk)
@test.idempotent_id('3d77225a-5cec-4e54-a017-9ebf11a266e6')
def test_run_stop_terminate_instance_with_tags(self):
# EC2 run, stop and terminate instance with tags
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
instance.add_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
td = {item.name: item.value for item in tags}
self.assertIn('key1', td)
self.assertEqual('value1', td['key1'])
tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
td = {item.name: item.value for item in tags}
self.assertIn('key1', td)
self.assertEqual('value1', td['key1'])
tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
td = {item.name: item.value for item in tags}
self.assertIn('key1', td)
self.assertEqual('value1', td['key1'])
tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
td = {item.name: item.value for item in tags}
self.assertNotIn('key1', td)
for instance in reservation.instances:
instance.remove_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
# NOTE: Volume-attach and detach causes metadata (tags) to be created
# for the volume. So exclude them while asserting.
self.assertNotIn('key1', tags)
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
self._terminate_reservation(reservation, rcuk)
@test.idempotent_id('252945b5-3294-4fda-ae21-928a42f63f76')
def test_run_terminate_instance(self):
# EC2 run, terminate immediately
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
for instance in reservation.instances:
instance.terminate()
self.assertInstanceStateWait(instance, '_GONE')
@test.idempotent_id('ab836c29-737b-4101-9fb9-87045eaf89e9')
def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
sec_group_name = data_utils.rand_name("securitygroup")
group_desc = sec_group_name + " security group description "
security_group = self.ec2_client.create_security_group(sec_group_name,
group_desc)
self.addResourceCleanUp(self.destroy_security_group_wait,
security_group)
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="icmp",
cidr_ip="0.0.0.0/0",
from_port=-1,
to_port=-1))
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22))
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
key_name=self.keypair_name,
security_groups=(sec_group_name,))
LOG.debug("Instance booted - state: %s",
reservation.instances[0].state)
self.addResourceCleanUp(self.destroy_reservation,
reservation)
volume = self.ec2_client.create_volume(CONF.volume.volume_size,
self.zone)
LOG.debug("Volume created - status: %s", volume.status)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
LOG.debug("Instance now running - state: %s", instance.state)
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
self.assertTrue(address.associate(instance.id))
rcuk_da = self.addResourceCleanUp(address.disassociate)
# TODO(afazekas): ping test. dependecy/permission ?
self.assertVolumeStatusWait(volume, "available")
# NOTE(afazekas): it may be reports available before it is available
ssh = remote_client.RemoteClient(address.public_ip,
CONF.compute.ssh_user,
pkey=self.keypair.material)
text = data_utils.rand_name("Pattern text for console output")
resp = ssh.write_to_console(text)
self.assertFalse(resp)
def _output():
output = instance.get_console_output()
return output.output
wait.re_search_wait(_output, text)
part_lines = ssh.get_partitions().split('\n')
volume.attach(instance.id, "/dev/vdh")
def _volume_state():
"""Return volume state realizing that 'in-use' is overloaded."""
volume.update(validate=True)
status = volume.status
attached = volume.attach_data.status
LOG.debug("Volume %s is in status: %s, attach_status: %s",
volume.id, status, attached)
# Nova reports 'in-use' on 'attaching' volumes because we
# have a single volume status, and EC2 has 2. Ensure that
# if we aren't attached yet we return something other than
# 'in-use'
if status == 'in-use' and attached != 'attached':
return 'attaching'
else:
return status
wait.re_search_wait(_volume_state, "in-use")
# NOTE(afazekas): Different Hypervisor backends names
# differently the devices,
# now we just test is the partition number increased/decrised
def _part_state():
current = ssh.get_partitions().split('\n')
LOG.debug("Partition map for instance: %s", current)
if current > part_lines:
return 'INCREASE'
if current < part_lines:
return 'DECREASE'
return 'EQUAL'
wait.state_wait(_part_state, 'INCREASE')
part_lines = ssh.get_partitions().split('\n')
# TODO(afazekas): Resource compare to the flavor settings
volume.detach()
self.assertVolumeStatusWait(volume, "available")
wait.state_wait(_part_state, 'DECREASE')
instance.stop()
address.disassociate()
self.assertAddressDissasociatedWait(address)
self.cancelResourceCleanUp(rcuk_da)
address.release()
self.assertAddressReleasedWait(address)
self.cancelResourceCleanUp(rcuk_a)
LOG.debug("Instance %s state: %s", instance.id, instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
# TODO(afazekas): move steps from teardown to the test case
# TODO(afazekas): Snapshot/volume read/write test case
|
|
"""
Models the HostWgsRawSeqSet object.
"""
import json
import logging
import os
import string
from cutlass.iHMPSession import iHMPSession
from cutlass.Base import Base
from cutlass.aspera import aspera
from cutlass.Util import *
# pylint: disable=W0703, C1801
# Create a module logger named after the module
module_logger = logging.getLogger(__name__)
# Add a NullHandler for the case if no logging is configured by the application
module_logger.addHandler(logging.NullHandler())
class HostWgsRawSeqSet(Base):
"""
The class encapsulating the HostWgsRawSeqSet data for an iHMP instance.
This class contains all the fields required to save a HostWgsRawSeqSet
object in the OSDF instance.
Attributes:
namespace (str): The namespace this class will use in the OSDF instance
"""
namespace = "ihmp"
aspera_server = "aspera2.ihmpdcc.org"
def __init__(self, *args, **kwargs):
"""
Constructor for the HostWgsRawSeqSet class. This initializes the fields
specific to the HostWgsRawSeqSet class, and inherits from the Base
class.
Args:
None
"""
self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.logger.addHandler(logging.NullHandler())
# These are common to all objects
self._id = None
self._version = None
self._links = {}
self._tags = []
# These are particular to HostWgsRawSeqSet objects
self._checksums = None
self._comment = None
self._exp_length = None
self._format = None
self._format_doc = None
self._local_file = None
self._seq_model = None
self._sequence_type = None
self._size = None
self._study = None
self._urls = ['']
# Optional properties
self._private_files = None
super(HostWgsRawSeqSet, self).__init__(*args, **kwargs)
def validate(self):
"""
Validates the current object's data/JSON against the current
schema in the OSDF instance for that specific object. All required
fields for that specific object must be present.
Args:
None
Returns:
A list of strings, where each string is the error that the
validation raised during OSDF validation
"""
self.logger.debug("In validate.")
document = self._get_raw_doc()
session = iHMPSession.get_session()
self.logger.info("Got iHMP session.")
(valid, error_message) = session.get_osdf().validate_node(document)
problems = []
if not valid:
self.logger.info("Validation did not succeed for %s.", __name__)
problems.append(error_message)
if self._private_files:
self.logger.info("User specified the files are private.")
else:
self.logger.info("Data is NOT private, so check that local_file is set.")
if self._local_file is None:
problems.append("Local file is not yet set.")
elif not os.path.isfile(self._local_file):
problems.append("Local file does not point to an actual file.")
if 'sequenced_from' not in self._links.keys():
problems.append("Must add a 'sequenced_from' link to a host_seq_prep.")
self.logger.debug("Number of validation problems: %s.", len(problems))
return problems
def is_valid(self):
"""
Validates the current object's data/JSON against the current schema
in the OSDF instance for the specific object. However, unlike
validate(), this method does not provide exact error messages,
it states if the validation was successful or not.
Args:
None
Returns:
True if the data validates, False if the current state of
fields in the instance do not validate with OSDF or
other node requirements.
"""
self.logger.debug("In is_valid.")
problems = self.validate()
valid = True
if len(problems):
self.logger.error("There were %s problems.", len(problems))
valid = False
self.logger.debug("Valid? %s", str(valid))
return valid
@property
def checksums(self):
"""
str: One or more checksums used to ensure file integrity.
"""
self.logger.debug("In 'checksums' getter.")
return self._checksums
@checksums.setter
@enforce_dict
def checksums(self, checksums):
"""
The setter for the checksum data.
Args:
checksums (dict): The checksums for the data file.
Returns:
None
"""
self.logger.debug("In 'checksums' setter.")
self._checksums = checksums
@property
def comment(self):
"""
str: Free-text comment.
"""
self.logger.debug("In 'comment' getter.")
return self._comment
@comment.setter
@enforce_string
def comment(self, comment):
"""
The setter for the comment field. The comment must be a string,
and less than 512 characters.
Args:
comment (str): The new comment to add to the string.
Returns:
None
"""
self.logger.debug("In 'comment' setter.")
self._comment = comment
@property
def exp_length(self):
"""
int: The number of raw bases or color space calls expected for the read,
includes both mate pairs and all technical portions.
"""
self.logger.debug("In 'exp_length' getter.")
return self._exp_length
@exp_length.setter
@enforce_int
def exp_length(self, exp_length):
"""
The setter for the HostWgsRawSeqSet exp_length.
Args:
exp_length (int): The new exp_length for the current instance.
Returns:
None
"""
self.logger.debug("In 'exp_length' setter.")
if exp_length < 0:
raise ValueError("The 'exp_length' must be non-negative.")
self._exp_length = exp_length
@property
def format(self):
"""
str: The file format of the sequence file
"""
self.logger.debug("In 'format' getter.")
return self._format
@format.setter
@enforce_string
def format(self, format_str):
"""
The setter for the HostWgsRawSeqSet format. This must be either fasta
or fastq.
Args:
format_str (str): The new format string for the current object.
Returns:
None
"""
self.logger.debug("In 'format' setter.")
formats = ["fasta", "fastq"]
if format_str in formats:
self._format = format_str
else:
raise Exception("Format must be either fasta or fastq.")
@property
def format_doc(self):
"""
str: URL for documentation of file format.
"""
self.logger.debug("In 'format_doc' getter.")
return self._format_doc
@format_doc.setter
@enforce_string
def format_doc(self, format_doc):
"""
The setter for the HostWgsRawSeqSet format doc.
Args:
format_doc (str): The new format_doc for the current object.
Returns:
None
"""
self.logger.debug("In 'format_doc' setter.")
self._format_doc = format_doc
@property
def local_file(self):
"""
str: URL to the local file to upload to the server.
"""
self.logger.debug("In local_file getter.")
return self._local_file
@local_file.setter
@enforce_string
def local_file(self, local_file):
"""
The setter for the HostWgsRawSeqSet local file.
Args:
local_file (str): The URL to the local file that should
be uploaded to the server.
Returns:
None
"""
self.logger.debug("In 'local_file' setter.")
self._local_file = local_file
@property
def private_files(self):
"""
bool: Whether this object describes private data that should not
be uploaded to the DCC. Defaults to false.
"""
self.logger.debug("In 'private_files' getter.")
return self._private_files
@private_files.setter
@enforce_bool
def private_files(self, private_files):
"""
The setter for the private files flag to denote this object
describes data that should not be uploaded to the DCC.
Args:
private_files (bool):
Returns:
None
"""
self.logger.debug("In 'private_files' setter.")
self._private_files = private_files
@property
def seq_model(self):
"""
str: Sequencing instrument model.
"""
self.logger.debug("In 'seq_model' getter.")
return self._seq_model
@seq_model.setter
@enforce_string
def seq_model(self, seq_model):
"""
The setter for the HostWgsRawSeqSet seq model.
Args:
seq_model (str): The new seq model.
Returns:
None
"""
self.logger.debug("In 'seq_model' setter.")
self._seq_model = seq_model
@property
def sequence_type(self):
"""
str:
Specifies whether the file contains peptide or nucleotide data.
"""
self.logger.debug("In 'sequence_type' getter.")
return self._sequence_type
@sequence_type.setter
@enforce_string
def sequence_type(self, sequence_type):
"""
The setter for the HostWgsRawSeqSet sequence type. This must be either
peptide or nucleotide.
Args:
sequence_type (str): The new sequence type.
Returns:
None
"""
self.logger.debug("In 'sequence_type' setter.")
types = ["peptide", "nucleotide"]
if sequence_type in types:
self._sequence_type = sequence_type
else:
raise Exception("Sequence type must be peptide or nucleotide")
@property
def size(self):
"""
int: The size of the file in bytes.
"""
self.logger.debug("In 'size' getter.")
return self._size
@size.setter
@enforce_int
def size(self, size):
"""
The setter for the HostWgsRawSeqSet size.
Args:
size (int): The size of the seq set in bytes.
Returns:
None
"""
self.logger.debug("In 'size' setter.")
if size < 0:
raise ValueError("The size must be non-negative.")
self._size = size
@property
def study(self):
"""
str: One of the 3 studies that are part of the iHMP.
"""
self.logger.debug("In 'study' getter.")
return self._study
@study.setter
@enforce_string
def study(self, study):
"""
The setter for the HostWgsRawSeqSet study. This is restricted to be
either preg_preterm, ibd, or prediabetes.
Args:
study (str): The study of the seq set.
Returns:
None
"""
self.logger.debug("In 'study' setter.")
studies = ["preg_preterm", "ibd", "prediabetes"]
if study in studies:
self._study = study
else:
raise Exception("Not a valid study")
@property
def urls(self):
"""
array: An array of URL from where the file can be obtained,
http, ftp, fasp, etc...
"""
self.logger.debug("In 'urls' getter.")
return self._urls
@staticmethod
def required_fields():
"""
A static method. The required fields for the class.
Args:
None
Returns:
Tuple of strings of required properties.
"""
module_logger.debug("In required_fields.")
return ("checksums", "comment", "exp_length", "format", "format_doc",
"local_file", "seq_model", "size", "study", "tags", "urls")
def _get_raw_doc(self):
"""
Generates the raw JSON document for the current object. All required
fields are filled in, regardless of whether they are set or not. Any
remaining fields are included only if they are set.
Args:
None
Returns:
A dictionary representation of the JSON document.
"""
self.logger.debug("In _get_raw_doc.")
doc = {
'acl': {
'read': ['all'],
'write': [HostWgsRawSeqSet.namespace]
},
'linkage': self._links,
'ns': HostWgsRawSeqSet.namespace,
'node_type': 'host_wgs_raw_seq_set',
'meta': {
"checksums": self._checksums,
"comment": self._comment,
"exp_length": self._exp_length,
"format": self._format,
"format_doc": self._format_doc,
"seq_model": self.seq_model,
"size": self._size,
"study": self._study,
"urls": self._urls,
"subtype": "wgs",
'tags': self._tags
}
}
if self._id is not None:
self.logger.debug("%s object has the OSDF id set.", __name__)
doc['id'] = self._id
if self._version is not None:
self.logger.debug("%s object has the OSDF version set.", __name__)
doc['ver'] = self._version
# Handle optional properties
if self._sequence_type is not None:
self.logger.debug("%s object has the sequence_type set.", __name__)
doc['meta']['sequence_type'] = self._sequence_type
if self._private_files is not None:
self.logger.debug("%s object has the 'private_files' property set.",
__name__
)
doc['meta']['private_files'] = self._private_files
return doc
@staticmethod
def search(query="\"host_wgs_raw_seq_set\"[node_type]"):
"""
Searches the OSDF database through all HostWgsRawSeqSet node types. Any
criteria the user wishes to add is provided by the user in the query
language specifications provided in the OSDF documentation. A general
format is (including the quotes and brackets):
"search criteria"[field to search]
If there are any results, they are returned as a HostWgsRawSeqSet
instance, otherwise an empty list will be returned.
Args:
query (str): The query for the OSDF framework. Defaults to the
HostWgsRawSeqSet node type.
Returns:
Returns an array of HostWgsRawSeqSet objects. It returns an empty
list if there are no results.
"""
module_logger.debug("In search.")
session = iHMPSession.get_session()
module_logger.info("Got iHMP session.")
if query != '"host_wgs_raw_seq_set"[node_type]':
query = '({}) && "host_wgs_raw_seq_set"[node_type]'.format(query)
module_logger.debug("Submitting OQL query: %s", query)
seqSet_data = session.get_osdf().oql_query("ihmp", query)
all_results = seqSet_data['results']
result_list = list()
if len(all_results) > 0:
for hit in all_results:
result = HostWgsRawSeqSet.load_hostWgsRawSeqSet(hit)
result_list.append(result)
return result_list
@staticmethod
def load_hostWgsRawSeqSet(seq_set_data):
"""
Takes the provided JSON string and converts it to a HostWgsRawSeqSet
object.
Args:
seq_set_data (str): The JSON string to convert
Returns:
Returns a HostWgsRawSeqSet instance.
"""
module_logger.info("Creating a template %s.", __name__)
seq_set = HostWgsRawSeqSet()
module_logger.debug("Filling in %s details.", __name__)
# The attributes commmon to all iHMP nodes
seq_set._set_id(seq_set_data['id'])
seq_set.version = seq_set_data['ver']
seq_set.links = seq_set_data['linkage']
# Required fields
seq_set.checksums = seq_set_data['meta']['checksums']
seq_set.comment = seq_set_data['meta']['comment']
seq_set.exp_length = seq_set_data['meta']['exp_length']
seq_set.format = seq_set_data['meta']['format']
seq_set.format_doc = seq_set_data['meta']['format_doc']
seq_set.seq_model = seq_set_data['meta']['seq_model']
seq_set.size = seq_set_data['meta']['size']
seq_set.study = seq_set_data['meta']['study']
seq_set.tags = seq_set_data['meta']['tags']
seq_set._urls = seq_set_data['meta']['urls']
# Optional fields
if 'sequence_type' in seq_set_data['meta']:
seq_set.sequence_type = seq_set_data['meta']['sequence_type']
if 'private_files' in seq_set_data['meta']:
seq_set.private_files = seq_set_data['meta']['private_files']
module_logger.debug("Returning loaded " + __name__)
return seq_set
@staticmethod
def load(seq_set_id):
"""
Loads the data for the specified input ID from the OSDF instance to
this object. If the provided ID does not exist, then an error message
is provided.
Args:
seq_set_id (str): The OSDF ID for the document to load.
Returns:
A HostWgsRawSeqSet object with all the available OSDF data loaded
into it.
"""
module_logger.debug("In load. Specified ID: %s", seq_set_id)
session = iHMPSession.get_session()
module_logger.info("Got iHMP session.")
seq_set_data = session.get_osdf().get_node(seq_set_id)
seq_set = HostWgsRawSeqSet.load_hostWgsRawSeqSet(seq_set_data)
module_logger.debug("Returning loaded %s.", __name__)
return seq_set
def _upload_data(self):
self.logger.debug("In _upload_data.")
session = iHMPSession.get_session()
study = self._study
study2dir = {
"ibd": "ibd",
"preg_preterm": "ptb",
"prediabetes": "t2d"
}
if study not in study2dir:
raise ValueError("Invalid study. No directory mapping for %s" % study)
study_dir = study2dir[study]
remote_base = os.path.basename(self._local_file)
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
remote_base = ''.join(c for c in remote_base if c in valid_chars)
remote_base = remote_base.replace(' ', '_') # No spaces in filenames
remote_path = "/".join(["/" + study_dir, "genome", "host", "wgs",
"raw", remote_base])
self.logger.debug("Remote path for this file will be %s.", remote_path)
# Upload the file to the iHMP aspera server
upload_result = aspera.upload_file(HostWgsRawSeqSet.aspera_server,
session.username,
session.password,
self._local_file,
remote_path)
if not upload_result:
self.logger.error("Experienced an error uploading the sequence set. "
"Aborting save.")
raise Exception("Unable to upload host WGS raw sequence set.")
else:
self._urls = ["fasp://" + HostWgsRawSeqSet.aspera_server + remote_path]
def save(self):
"""
Saves the data in OSDF. The JSON form of the current data for the
instance is validated in the save function. If the data is not valid,
then the data will not be saved. If the instance was saved previously,
then the node ID is assigned the alpha numeric found in the OSDF
instance. If not saved previously, then the node ID is 'None', and upon
a successful save, will be assigned the ID found in OSDF.
Also, the version is updated as the data is saved in OSDF.
Args:
None
Returns;
True if successful, False otherwise.
"""
self.logger.debug("In save.")
# If node previously saved, use edit_node instead since ID
# is given (an update in a way)
# can also use get_node to check if the node already exists
if not self.is_valid():
self.logger.error("Cannot save, data is invalid")
return False
session = iHMPSession.get_session()
self.logger.info("Got iHMP session.")
success = False
if self._private_files:
self._urls = ["<private>"]
else:
try:
self._upload_data()
except Exception as upload_exception:
self.logger.exception(upload_exception)
# Don't bother continuing...
return False
osdf = session.get_osdf()
if self.id is None:
# The document has not yet been saved
self.logger.info("About to insert a new %s OSDF node.", __name__)
# Get the JSON form of the data and load it
self.logger.debug("Converting %s to parsed JSON form.", __name__)
data = json.loads(self.to_json())
self.logger.info("Got the raw JSON document.")
try:
self.logger.info("Attempting to save a new node.")
node_id = osdf.insert_node(data)
self._set_id(node_id)
self._version = 1
self.logger.info("Save for %s %s successful.", __name__, node_id)
self.logger.info("Setting ID for %s %s.", __name__, node_id)
success = True
except Exception as save_exception:
self.logger.exception(save_exception)
self.logger.error("An error occurred while saving %s. "
"Reason: %s", __name__, save_exception)
else:
self.logger.info("%s already has an ID, so we do an update "
"(not an insert).", __name__)
try:
seq_set_data = self._get_raw_doc()
seq_set_id = self._id
self.logger.info("Attempting to update %s with ID: %s.",
__name__, seq_set_id
)
osdf.edit_node(seq_set_data)
self.logger.info("Update for %s %s successful.",
__name__, seq_set_id
)
seq_set_data = osdf.get_node(seq_set_id)
latest_version = seq_set_data['ver']
self.logger.debug(
"The version of this %s is now: %s",
__name__,
str(latest_version)
)
self._version = latest_version
success = True
except Exception as e:
self.logger.error("An error occurred while updating " +
__name__ + " %s. Reason: %s" % self._id, e)
self.logger.debug("Returning " + str(success))
return success
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for parts of our release automation system.
"""
import os
import sys
from setuptools.dist import Distribution
from twisted.trial.unittest import TestCase
from twisted.python import dist
from twisted.python.dist import (get_setup_args, ConditionalExtension,
build_scripts_twisted, _EXTRAS_REQUIRE)
from twisted.python.filepath import FilePath
class SetupTests(TestCase):
"""
Tests for L{get_setup_args}.
"""
def test_conditionalExtensions(self):
"""
Passing C{conditionalExtensions} as a list of L{ConditionalExtension}
objects to get_setup_args inserts a custom build_ext into the result
which knows how to check whether they should be built.
"""
good_ext = ConditionalExtension("whatever", ["whatever.c"],
condition=lambda b: True)
bad_ext = ConditionalExtension("whatever", ["whatever.c"],
condition=lambda b: False)
args = get_setup_args(conditionalExtensions=[good_ext, bad_ext])
# ext_modules should be set even though it's not used. See comment
# in get_setup_args
self.assertEqual(args["ext_modules"], [good_ext, bad_ext])
cmdclass = args["cmdclass"]
build_ext = cmdclass["build_ext"]
builder = build_ext(Distribution())
builder.prepare_extensions()
self.assertEqual(builder.extensions, [good_ext])
def test_win32Definition(self):
"""
When building on Windows NT, the WIN32 macro will be defined as 1.
"""
ext = ConditionalExtension("whatever", ["whatever.c"],
define_macros=[("whatever", 2)])
args = get_setup_args(conditionalExtensions=[ext])
builder = args["cmdclass"]["build_ext"](Distribution())
self.patch(os, "name", "nt")
builder.prepare_extensions()
self.assertEqual(ext.define_macros, [("whatever", 2), ("WIN32", 1)])
class OptionalDependenciesTests(TestCase):
"""
Tests for L{_EXTRAS_REQUIRE}
"""
def test_distributeTakesExtrasRequire(self):
"""
Setuptools' Distribution object parses and stores its C{extras_require}
argument as an attribute.
"""
extras = dict(im_an_extra_dependency="thing")
attrs = dict(extras_require=extras)
distribution = Distribution(attrs)
self.assertEqual(
extras,
distribution.extras_require
)
def test_extrasRequireDictContainsKeys(self):
"""
L{_EXTRAS_REQUIRE} contains options for all documented extras: C{dev},
C{tls}, C{conch}, C{soap}, C{serial}, C{all_non_platform},
C{osx_platform}, and C{windows_platform}.
"""
self.assertIn('dev', _EXTRAS_REQUIRE)
self.assertIn('tls', _EXTRAS_REQUIRE)
self.assertIn('conch', _EXTRAS_REQUIRE)
self.assertIn('soap', _EXTRAS_REQUIRE)
self.assertIn('serial', _EXTRAS_REQUIRE)
self.assertIn('all_non_platform', _EXTRAS_REQUIRE)
self.assertIn('osx_platform', _EXTRAS_REQUIRE)
self.assertIn('windows_platform', _EXTRAS_REQUIRE)
def test_extrasRequiresDevDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{dev} extra contains setuptools requirements for
the tools required for Twisted development.
"""
deps = _EXTRAS_REQUIRE['dev']
self.assertIn('twistedchecker >= 0.2.0', deps)
self.assertIn('pyflakes >= 0.8.1', deps)
self.assertIn('twisted-dev-tools >= 0.0.2', deps)
self.assertIn('python-subunit', deps)
self.assertIn('sphinx >= 1.2.2', deps)
self.assertIn('pydoctor >= 0.5', deps)
def test_extrasRequiresTlsDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{tls} extra contains setuptools requirements for
the packages required to make Twisted's transport layer security fully
work for both clients and servers.
"""
deps = _EXTRAS_REQUIRE['tls']
self.assertIn('pyopenssl >= 0.11', deps)
self.assertIn('service_identity', deps)
self.assertIn('idna >= 0.6', deps)
def test_extrasRequiresConchDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{conch} extra contains setuptools requirements
for the packages required to make Twisted Conch's secure shell server
work.
"""
deps = _EXTRAS_REQUIRE['conch']
self.assertIn('gmpy', deps)
self.assertIn('pyasn1', deps)
self.assertIn('pycrypto', deps)
def test_extrasRequiresSoapDeps(self):
"""
L{_EXTRAS_REQUIRE}' C{soap} extra contains setuptools requirements for
the packages required to make the C{twisted.web.soap} module function.
"""
self.assertIn(
'soappy',
_EXTRAS_REQUIRE['soap']
)
def test_extrasRequiresSerialDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{serial} extra contains setuptools requirements
for the packages required to make Twisted's serial support work.
"""
self.assertIn(
'pyserial',
_EXTRAS_REQUIRE['serial']
)
def test_extrasRequiresAllNonPlatformDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{all_non_platform} extra contains setuptools
requirements for all of Twisted's optional dependencies which work on
all supported operating systems.
"""
deps = _EXTRAS_REQUIRE['all_non_platform']
self.assertIn('pyopenssl >= 0.11', deps)
self.assertIn('service_identity', deps)
self.assertIn('idna >= 0.6', deps)
self.assertIn('gmpy', deps)
self.assertIn('pyasn1', deps)
self.assertIn('pycrypto', deps)
self.assertIn('soappy', deps)
self.assertIn('pyserial', deps)
def test_extrasRequiresOsxPlatformDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{osx_platform} extra contains setuptools
requirements for all of Twisted's optional dependencies usable on the
Mac OS X platform.
"""
deps = _EXTRAS_REQUIRE['osx_platform']
self.assertIn('pyopenssl >= 0.11', deps)
self.assertIn('service_identity', deps)
self.assertIn('idna >= 0.6', deps)
self.assertIn('gmpy', deps)
self.assertIn('pyasn1', deps)
self.assertIn('pycrypto', deps)
self.assertIn('soappy', deps)
self.assertIn('pyserial', deps)
self.assertIn('pyobjc', deps)
def test_extrasRequiresWindowsPlatformDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{windows_platform} extra contains setuptools
requirements for all of Twisted's optional dependencies usable on the
Microsoft Windows platform.
"""
deps = _EXTRAS_REQUIRE['windows_platform']
self.assertIn('pyopenssl >= 0.11', deps)
self.assertIn('service_identity', deps)
self.assertIn('idna >= 0.6', deps)
self.assertIn('gmpy', deps)
self.assertIn('pyasn1', deps)
self.assertIn('pycrypto', deps)
self.assertIn('soappy', deps)
self.assertIn('pyserial', deps)
self.assertIn('pypiwin32', deps)
class GetExtensionsTests(TestCase):
"""
Tests for L{dist.getExtensions}.
"""
setupTemplate = (
"from twisted.python.dist import ConditionalExtension\n"
"extensions = [\n"
" ConditionalExtension(\n"
" '%s', ['twisted/some/thing.c'],\n"
" condition=lambda builder: True)\n"
" ]\n")
def setUp(self):
self.basedir = FilePath(self.mktemp()).child("twisted")
self.basedir.makedirs()
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.basedir.parent().path)
def writeSetup(self, name, *path):
"""
Write out a C{setup.py} file to a location determined by
L{self.basedir} and L{path}. L{self.setupTemplate} is used to
generate its contents.
"""
outdir = self.basedir.descendant(path)
outdir.makedirs()
setup = outdir.child("setup.py")
setup.setContent(self.setupTemplate % (name,))
def writeEmptySetup(self, *path):
"""
Write out an empty C{setup.py} file to a location determined by
L{self.basedir} and L{path}.
"""
outdir = self.basedir.descendant(path)
outdir.makedirs()
outdir.child("setup.py").setContent("")
def assertExtensions(self, expected):
"""
Assert that the given names match the (sorted) names of discovered
extensions.
"""
extensions = dist.getExtensions()
names = [extension.name for extension in extensions]
self.assertEqual(sorted(names), expected)
def test_getExtensions(self):
"""
Files named I{setup.py} in I{twisted/topfiles} and I{twisted/*/topfiles}
are executed with L{execfile} in order to discover the extensions they
declare.
"""
self.writeSetup("twisted.transmutate", "topfiles")
self.writeSetup("twisted.tele.port", "tele", "topfiles")
self.assertExtensions(["twisted.tele.port", "twisted.transmutate"])
def test_getExtensionsTooDeep(self):
"""
Files named I{setup.py} in I{topfiles} directories are not considered if
they are too deep in the directory hierarchy.
"""
self.writeSetup("twisted.trans.mog.rify", "trans", "mog", "topfiles")
self.assertExtensions([])
def test_getExtensionsNotTopfiles(self):
"""
The folder in which I{setup.py} is discovered must be called I{topfiles}
otherwise it is ignored.
"""
self.writeSetup("twisted.metamorphosis", "notfiles")
self.assertExtensions([])
def test_getExtensionsNotSupportedOnJava(self):
"""
Extensions are not supported on Java-based platforms.
"""
self.addCleanup(setattr, sys, "platform", sys.platform)
sys.platform = "java"
self.writeSetup("twisted.sorcery", "topfiles")
self.assertExtensions([])
def test_getExtensionsExtensionsLocalIsOptional(self):
"""
It is acceptable for extensions to not define the C{extensions} local
variable.
"""
self.writeEmptySetup("twisted.necromancy", "topfiles")
self.assertExtensions([])
class GetVersionTests(TestCase):
"""
Tests for L{dist.getVersion}.
"""
def setUp(self):
self.dirname = self.mktemp()
os.mkdir(self.dirname)
def test_getVersionCore(self):
"""
Test that getting the version of core reads from the
[base]/_version.py file.
"""
f = open(os.path.join(self.dirname, "_version.py"), "w")
f.write("""
from twisted.python import versions
version = versions.Version("twisted", 0, 1, 2)
""")
f.close()
self.assertEqual(dist.getVersion("core", base=self.dirname), "0.1.2")
def test_getVersionOther(self):
"""
Test that getting the version of a non-core project reads from
the [base]/[projname]/_version.py file.
"""
os.mkdir(os.path.join(self.dirname, "blat"))
f = open(os.path.join(self.dirname, "blat", "_version.py"), "w")
f.write("""
from twisted.python import versions
version = versions.Version("twisted.blat", 9, 8, 10)
""")
f.close()
self.assertEqual(dist.getVersion("blat", base=self.dirname), "9.8.10")
class GetScriptsTests(TestCase):
"""
Tests for L{dist.getScripts} which returns the scripts which should be
included in the distribution of a project.
"""
def test_scriptsInSVN(self):
"""
getScripts should return the scripts associated with a project
in the context of Twisted SVN.
"""
basedir = self.mktemp()
os.mkdir(basedir)
os.mkdir(os.path.join(basedir, 'bin'))
os.mkdir(os.path.join(basedir, 'bin', 'proj'))
f = open(os.path.join(basedir, 'bin', 'proj', 'exy'), 'w')
f.write('yay')
f.close()
scripts = dist.getScripts('proj', basedir=basedir)
self.assertEqual(len(scripts), 1)
self.assertEqual(os.path.basename(scripts[0]), 'exy')
def test_excludedPreamble(self):
"""
L{dist.getScripts} includes neither C{"_preamble.py"} nor
C{"_preamble.pyc"}.
"""
basedir = FilePath(self.mktemp())
bin = basedir.child('bin')
bin.makedirs()
bin.child('_preamble.py').setContent('some preamble code\n')
bin.child('_preamble.pyc').setContent('some preamble byte code\n')
bin.child('program').setContent('good program code\n')
scripts = dist.getScripts("", basedir=basedir.path)
self.assertEqual(scripts, [bin.child('program').path])
def test_scriptsInRelease(self):
"""
getScripts should return the scripts associated with a project
in the context of a released subproject tarball.
"""
basedir = self.mktemp()
os.mkdir(basedir)
os.mkdir(os.path.join(basedir, 'bin'))
f = open(os.path.join(basedir, 'bin', 'exy'), 'w')
f.write('yay')
f.close()
scripts = dist.getScripts('proj', basedir=basedir)
self.assertEqual(len(scripts), 1)
self.assertEqual(os.path.basename(scripts[0]), 'exy')
def test_noScriptsInSVN(self):
"""
When calling getScripts for a project which doesn't actually
have any scripts, in the context of an SVN checkout, an
empty list should be returned.
"""
basedir = self.mktemp()
os.mkdir(basedir)
os.mkdir(os.path.join(basedir, 'bin'))
os.mkdir(os.path.join(basedir, 'bin', 'otherproj'))
scripts = dist.getScripts('noscripts', basedir=basedir)
self.assertEqual(scripts, [])
def test_getScriptsTopLevel(self):
"""
Passing the empty string to getScripts returns scripts that are (only)
in the top level bin directory.
"""
basedir = FilePath(self.mktemp())
basedir.createDirectory()
bindir = basedir.child("bin")
bindir.createDirectory()
included = bindir.child("included")
included.setContent("yay included")
subdir = bindir.child("subdir")
subdir.createDirectory()
subdir.child("not-included").setContent("not included")
scripts = dist.getScripts("", basedir=basedir.path)
self.assertEqual(scripts, [included.path])
def test_noScriptsInSubproject(self):
"""
When calling getScripts for a project which doesn't actually
have any scripts in the context of that project's individual
project structure, an empty list should be returned.
"""
basedir = self.mktemp()
os.mkdir(basedir)
scripts = dist.getScripts('noscripts', basedir=basedir)
self.assertEqual(scripts, [])
class DummyCommand:
"""
A fake Command.
"""
def __init__(self, **kwargs):
for kw, val in kwargs.items():
setattr(self, kw, val)
def ensure_finalized(self):
pass
class BuildScriptsTests(TestCase):
"""
Tests for L{dist.build_scripts_twisted}.
"""
def setUp(self):
self.source = FilePath(self.mktemp())
self.target = FilePath(self.mktemp())
self.source.makedirs()
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.source.path)
def buildScripts(self):
"""
Write 3 types of scripts and run the L{build_scripts_twisted}
command.
"""
self.writeScript(self.source, "script1",
("#! /usr/bin/env python2.7\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
self.writeScript(self.source, "script2.py",
("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
self.writeScript(self.source, "shell.sh",
("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
expected = ['script1', 'script2.py', 'shell.sh']
cmd = self.getBuildScriptsCmd(self.target,
[self.source.child(fn).path
for fn in expected])
cmd.finalize_options()
cmd.run()
return self.target.listdir()
def getBuildScriptsCmd(self, target, scripts):
"""
Create a distutils L{Distribution} with a L{DummyCommand} and wrap it
in L{build_scripts_twisted}.
@type target: L{FilePath}
"""
dist = Distribution()
dist.scripts = scripts
dist.command_obj["build"] = DummyCommand(
build_scripts = target.path,
force = 1,
executable = sys.executable
)
return build_scripts_twisted(dist)
def writeScript(self, dir, name, text):
"""
Write the script to disk.
"""
with open(dir.child(name).path, "w") as f:
f.write(text)
def test_notWindows(self):
"""
L{build_scripts_twisted} does not rename scripts on non-Windows
platforms.
"""
self.patch(os, "name", "twisted")
built = self.buildScripts()
for name in ['script1', 'script2.py', 'shell.sh']:
self.assertTrue(name in built)
def test_windows(self):
"""
L{build_scripts_twisted} renames scripts so they end with '.py' on
the Windows platform.
"""
self.patch(os, "name", "nt")
built = self.buildScripts()
for name in ['script1.py', 'script2.py', 'shell.sh.py']:
self.assertTrue(name in built)
class FakeModule(object):
"""
A fake module, suitable for dependency injection in testing.
"""
def __init__(self, attrs):
"""
Initializes a fake module.
@param attrs: The attrs that will be accessible on the module.
@type attrs: C{dict} of C{str} (Python names) to objects
"""
self._attrs = attrs
def __getattr__(self, name):
"""
Gets an attribute of this fake module from its attrs.
@raise AttributeError: When the requested attribute is missing.
"""
try:
return self._attrs[name]
except KeyError:
raise AttributeError()
fakeCPythonPlatform = FakeModule({"python_implementation": lambda: "CPython"})
fakeOtherPlatform = FakeModule({"python_implementation": lambda: "lvhpy"})
class WithPlatformTests(TestCase):
"""
Tests for L{_checkCPython} when used with a (fake) C{platform} module.
"""
def test_cpython(self):
"""
L{_checkCPython} returns C{True} when C{platform.python_implementation}
says we're running on CPython.
"""
self.assertTrue(dist._checkCPython(platform=fakeCPythonPlatform))
def test_other(self):
"""
L{_checkCPython} returns C{False} when C{platform.python_implementation}
says we're not running on CPython.
"""
self.assertFalse(dist._checkCPython(platform=fakeOtherPlatform))
|
|
# coding: utf-8
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import Computed
from sqlalchemy import exc
from sqlalchemy import except_
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy import outerjoin
from sqlalchemy import schema
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import type_coerce
from sqlalchemy import TypeDecorator
from sqlalchemy import union
from sqlalchemy.dialects.oracle import base as oracle
from sqlalchemy.dialects.oracle import cx_oracle
from sqlalchemy.engine import default
from sqlalchemy.sql import column
from sqlalchemy.sql import quoted_name
from sqlalchemy.sql import table
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "oracle"
def test_true_false(self):
self.assert_compile(sql.false(), "0")
self.assert_compile(sql.true(), "1")
def test_owner(self):
meta = MetaData()
parent = Table(
"parent",
meta,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
schema="ed",
)
child = Table(
"child",
meta,
Column("id", Integer, primary_key=True),
Column("parent_id", Integer, ForeignKey("ed.parent.id")),
schema="ed",
)
self.assert_compile(
parent.join(child),
"ed.parent JOIN ed.child ON ed.parent.id = " "ed.child.parent_id",
)
def test_subquery(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t])
s = select([s.c.col1, s.c.col2])
self.assert_compile(
s,
"SELECT col1, col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 "
"AS col2 FROM sometable)",
)
def test_bindparam_quote(self):
"""test that bound parameters take on quoting for reserved words,
column names quote flag enabled."""
# note: this is only in cx_oracle at the moment. not sure
# what other hypothetical oracle dialects might need
self.assert_compile(bindparam("option"), ':"option"')
self.assert_compile(bindparam("plain"), ":plain")
t = Table("s", MetaData(), Column("plain", Integer, quote=True))
self.assert_compile(
t.insert().values(plain=5),
'INSERT INTO s ("plain") VALUES (:"plain")',
)
self.assert_compile(
t.update().values(plain=5), 'UPDATE s SET "plain"=:"plain"'
)
def test_bindparam_quote_raise_on_expanding(self):
assert_raises_message(
exc.CompileError,
"Can't use expanding feature with parameter name 'uid' on "
"Oracle; it requires quoting which is not supported in this "
"context",
bindparam("uid", expanding=True).compile,
dialect=cx_oracle.dialect(),
)
def test_cte(self):
part = table(
"part", column("part"), column("sub_part"), column("quantity")
)
included_parts = (
select([part.c.sub_part, part.c.part, part.c.quantity])
.where(part.c.part == "p1")
.cte(name="included_parts", recursive=True)
.suffix_with(
"search depth first by part set ord1",
"cycle part set y_cycle to 1 default 0",
dialect="oracle",
)
)
incl_alias = included_parts.alias("pr1")
parts_alias = part.alias("p")
included_parts = included_parts.union_all(
select(
[
parts_alias.c.sub_part,
parts_alias.c.part,
parts_alias.c.quantity,
]
).where(parts_alias.c.part == incl_alias.c.sub_part)
)
q = select(
[
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).label("total_quantity"),
]
).group_by(included_parts.c.sub_part)
self.assert_compile(
q,
"WITH included_parts(sub_part, part, quantity) AS "
"(SELECT part.sub_part AS sub_part, part.part AS part, "
"part.quantity AS quantity FROM part WHERE part.part = :part_1 "
"UNION ALL SELECT p.sub_part AS sub_part, p.part AS part, "
"p.quantity AS quantity FROM part p, included_parts pr1 "
"WHERE p.part = pr1.sub_part) "
"search depth first by part set ord1 cycle part set "
"y_cycle to 1 default 0 "
"SELECT included_parts.sub_part, sum(included_parts.quantity) "
"AS total_quantity FROM included_parts "
"GROUP BY included_parts.sub_part",
)
def test_limit(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t])
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c._create_result_map()["col1"][1])
s = select([t]).limit(10).offset(20)
self.assert_compile(
s,
"SELECT col1, col2 FROM (SELECT col1, "
"col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable) WHERE ROWNUM <= "
":param_1 + :param_2) WHERE ora_rn > :param_2",
checkparams={"param_1": 10, "param_2": 20},
)
c = s.compile(dialect=oracle.OracleDialect())
eq_(len(c._result_columns), 2)
assert t.c.col1 in set(c._create_result_map()["col1"][1])
s2 = select([s.c.col1, s.c.col2])
self.assert_compile(
s2,
"SELECT col1, col2 FROM (SELECT col1, col2 "
"FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) "
"WHERE ROWNUM <= :param_1 + :param_2) "
"WHERE ora_rn > :param_2)",
checkparams={"param_1": 10, "param_2": 20},
)
self.assert_compile(
s2,
"SELECT col1, col2 FROM (SELECT col1, col2 "
"FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) "
"WHERE ROWNUM <= :param_1 + :param_2) "
"WHERE ora_rn > :param_2)",
)
c = s2.compile(dialect=oracle.OracleDialect())
eq_(len(c._result_columns), 2)
assert s.c.col1 in set(c._create_result_map()["col1"][1])
s = select([t]).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(
s,
"SELECT col1, col2 FROM (SELECT col1, "
"col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) WHERE ROWNUM <= "
":param_1 + :param_2) WHERE ora_rn > :param_2",
checkparams={"param_1": 10, "param_2": 20},
)
c = s.compile(dialect=oracle.OracleDialect())
eq_(len(c._result_columns), 2)
assert t.c.col1 in set(c._create_result_map()["col1"][1])
s = select([t]).with_for_update().limit(10).order_by(t.c.col2)
self.assert_compile(
s,
"SELECT col1, col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) WHERE ROWNUM <= :param_1 "
"FOR UPDATE",
)
s = (
select([t])
.with_for_update()
.limit(10)
.offset(20)
.order_by(t.c.col2)
)
self.assert_compile(
s,
"SELECT col1, col2 FROM (SELECT col1, "
"col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) WHERE ROWNUM <= "
":param_1 + :param_2) WHERE ora_rn > :param_2 FOR "
"UPDATE",
)
def test_limit_special_quoting(self):
"""Oracle-specific test for #4730.
Even though this issue is generic, test the originally reported Oracle
use case.
"""
col = literal_column("SUM(ABC)").label("SUM(ABC)")
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT "SUM(ABC)" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)" '
"FROM my_table ORDER BY SUM(ABC)) "
"WHERE ROWNUM <= :param_1",
)
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)", True))
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT "SUM(ABC)" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)" '
"FROM my_table ORDER BY SUM(ABC)) "
"WHERE ROWNUM <= :param_1",
)
col = literal_column("SUM(ABC)").label("SUM(ABC)_")
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT "SUM(ABC)_" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)_" '
"FROM my_table ORDER BY SUM(ABC)) "
"WHERE ROWNUM <= :param_1",
)
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)_", True))
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT "SUM(ABC)_" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)_" '
"FROM my_table ORDER BY SUM(ABC)) "
"WHERE ROWNUM <= :param_1",
)
def test_for_update(self):
table1 = table(
"mytable", column("myid"), column("name"), column("description")
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
of=table1.c.myid
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 "
"FOR UPDATE OF mytable.myid",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
nowait=True, of=table1.c.myid
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 "
"FOR UPDATE OF mytable.myid NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
nowait=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF "
"mytable.myid, mytable.name NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
skip_locked=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF "
"mytable.myid, mytable.name SKIP LOCKED",
)
# key_share has no effect
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(key_share=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
# read has no effect
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, key_share=True
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
ta = table1.alias()
self.assert_compile(
ta.select(ta.c.myid == 7).with_for_update(
of=[ta.c.myid, ta.c.name]
),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable mytable_1 "
"WHERE mytable_1.myid = :myid_1 FOR UPDATE OF "
"mytable_1.myid, mytable_1.name",
)
def test_for_update_of_w_limit_adaption_col_present(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid, table1.c.name])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10),
"SELECT myid, name FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) "
"WHERE ROWNUM <= :param_1 FOR UPDATE OF name NOWAIT",
)
def test_for_update_of_w_limit_adaption_col_unpresent(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10),
"SELECT myid FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) "
"WHERE ROWNUM <= :param_1 FOR UPDATE OF name NOWAIT",
)
def test_for_update_of_w_limit_offset_adaption_col_present(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid, table1.c.name])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10)
.offset(50),
"SELECT myid, name FROM (SELECT myid, name, ROWNUM AS ora_rn "
"FROM (SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) "
"WHERE ROWNUM <= :param_1 + :param_2) WHERE ora_rn > :param_2 "
"FOR UPDATE OF name NOWAIT",
)
def test_for_update_of_w_limit_offset_adaption_col_unpresent(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10)
.offset(50),
"SELECT myid FROM (SELECT myid, ROWNUM AS ora_rn, name "
"FROM (SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) "
"WHERE ROWNUM <= :param_1 + :param_2) WHERE ora_rn > :param_2 "
"FOR UPDATE OF name NOWAIT",
)
def test_for_update_of_w_limit_offset_adaption_partial_col_unpresent(self):
table1 = table("mytable", column("myid"), column("foo"), column("bar"))
self.assert_compile(
select([table1.c.myid, table1.c.bar])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=[table1.c.foo, table1.c.bar])
.limit(10)
.offset(50),
"SELECT myid, bar FROM (SELECT myid, bar, ROWNUM AS ora_rn, "
"foo FROM (SELECT mytable.myid AS myid, mytable.bar AS bar, "
"mytable.foo AS foo FROM mytable WHERE mytable.myid = :myid_1) "
"WHERE ROWNUM <= :param_1 + :param_2) WHERE ora_rn > :param_2 "
"FOR UPDATE OF foo, bar NOWAIT",
)
def test_limit_preserves_typing_information(self):
class MyType(TypeDecorator):
impl = Integer
stmt = select([type_coerce(column("x"), MyType).label("foo")]).limit(1)
dialect = oracle.dialect()
compiled = stmt.compile(dialect=dialect)
assert isinstance(compiled._create_result_map()["foo"][-1], MyType)
def test_use_binds_for_limits_disabled(self):
t = table("sometable", column("col1"), column("col2"))
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(
select([t]).limit(10),
"SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) WHERE ROWNUM <= 10",
dialect=dialect,
)
self.assert_compile(
select([t]).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable)) WHERE ora_rn > 10",
dialect=dialect,
)
self.assert_compile(
select([t]).limit(10).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) WHERE ROWNUM <= 20) WHERE ora_rn > 10",
dialect=dialect,
)
def test_use_binds_for_limits_enabled(self):
t = table("sometable", column("col1"), column("col2"))
dialect = oracle.OracleDialect(use_binds_for_limits=True)
self.assert_compile(
select([t]).limit(10),
"SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) WHERE ROWNUM "
"<= :param_1",
dialect=dialect,
)
self.assert_compile(
select([t]).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable)) WHERE ora_rn > :param_1",
dialect=dialect,
)
self.assert_compile(
select([t]).limit(10).offset(10),
"SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) WHERE ROWNUM <= :param_1 + :param_2) "
"WHERE ora_rn > :param_2",
dialect=dialect,
checkparams={"param_1": 10, "param_2": 10},
)
def test_long_labels(self):
dialect = default.DefaultDialect()
dialect.max_identifier_length = 30
ora_dialect = oracle.dialect()
m = MetaData()
a_table = Table(
"thirty_characters_table_xxxxxx",
m,
Column("id", Integer, primary_key=True),
)
other_table = Table(
"other_thirty_characters_table_",
m,
Column("id", Integer, primary_key=True),
Column(
"thirty_characters_table_id",
Integer,
ForeignKey("thirty_characters_table_xxxxxx.id"),
primary_key=True,
),
)
anon = a_table.alias()
self.assert_compile(
select([other_table, anon])
.select_from(other_table.outerjoin(anon))
.apply_labels(),
"SELECT other_thirty_characters_table_.id "
"AS other_thirty_characters__1, "
"other_thirty_characters_table_.thirty_char"
"acters_table_id AS other_thirty_characters"
"__2, thirty_characters_table__1.id AS "
"thirty_characters_table__3 FROM "
"other_thirty_characters_table_ LEFT OUTER "
"JOIN thirty_characters_table_xxxxxx AS "
"thirty_characters_table__1 ON "
"thirty_characters_table__1.id = "
"other_thirty_characters_table_.thirty_char"
"acters_table_id",
dialect=dialect,
)
self.assert_compile(
select([other_table, anon])
.select_from(other_table.outerjoin(anon))
.apply_labels(),
"SELECT other_thirty_characters_table_.id "
"AS other_thirty_characters__1, "
"other_thirty_characters_table_.thirty_char"
"acters_table_id AS other_thirty_characters"
"__2, thirty_characters_table__1.id AS "
"thirty_characters_table__3 FROM "
"other_thirty_characters_table_ LEFT OUTER "
"JOIN thirty_characters_table_xxxxxx "
"thirty_characters_table__1 ON "
"thirty_characters_table__1.id = "
"other_thirty_characters_table_.thirty_char"
"acters_table_id",
dialect=ora_dialect,
)
def test_outer_join(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable",
column("otherid", Integer),
column("othername", String),
)
table3 = table(
"thirdtable",
column("userid", Integer),
column("otherstuff", String),
)
query = select(
[table1, table2],
or_(
table1.c.name == "fred",
table1.c.myid == 10,
table2.c.othername != "jack",
text("EXISTS (select yay from foo where boo = lar)"),
),
from_obj=[
outerjoin(table1, table2, table1.c.myid == table2.c.otherid)
],
)
self.assert_compile(
query,
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername FROM mytable, "
"myothertable WHERE (mytable.name = "
":name_1 OR mytable.myid = :myid_1 OR "
"myothertable.othername != :othername_1 OR "
"EXISTS (select yay from foo where boo = "
"lar)) AND mytable.myid = "
"myothertable.otherid(+)",
dialect=oracle.OracleDialect(use_ansi=False),
)
query = table1.outerjoin(
table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table3.c.userid == table2.c.otherid)
self.assert_compile(
query.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername, "
"thirdtable.userid, thirdtable.otherstuff "
"FROM mytable LEFT OUTER JOIN myothertable "
"ON mytable.myid = myothertable.otherid "
"LEFT OUTER JOIN thirdtable ON "
"thirdtable.userid = myothertable.otherid",
)
self.assert_compile(
query.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername, "
"thirdtable.userid, thirdtable.otherstuff "
"FROM mytable, myothertable, thirdtable "
"WHERE thirdtable.userid(+) = "
"myothertable.otherid AND mytable.myid = "
"myothertable.otherid(+)",
dialect=oracle.dialect(use_ansi=False),
)
query = table1.join(table2, table1.c.myid == table2.c.otherid).join(
table3, table3.c.userid == table2.c.otherid
)
self.assert_compile(
query.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername, "
"thirdtable.userid, thirdtable.otherstuff "
"FROM mytable, myothertable, thirdtable "
"WHERE thirdtable.userid = "
"myothertable.otherid AND mytable.myid = "
"myothertable.otherid",
dialect=oracle.dialect(use_ansi=False),
)
query = table1.join(
table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table3.c.userid == table2.c.otherid)
self.assert_compile(
query.select().order_by(table1.c.name).limit(10).offset(5),
"SELECT myid, name, description, otherid, "
"othername, userid, otherstuff FROM "
"(SELECT myid, name, description, otherid, "
"othername, userid, otherstuff, ROWNUM AS "
"ora_rn FROM (SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description, myothertable.otherid AS "
"otherid, myothertable.othername AS "
"othername, thirdtable.userid AS userid, "
"thirdtable.otherstuff AS otherstuff FROM "
"mytable, myothertable, thirdtable WHERE "
"thirdtable.userid(+) = "
"myothertable.otherid AND mytable.myid = "
"myothertable.otherid ORDER BY mytable.name) "
"WHERE ROWNUM <= :param_1 + :param_2) "
"WHERE ora_rn > :param_2",
checkparams={"param_1": 10, "param_2": 5},
dialect=oracle.dialect(use_ansi=False),
)
subq = (
select([table1])
.select_from(
table1.outerjoin(table2, table1.c.myid == table2.c.otherid)
)
.alias()
)
q = select([table3]).select_from(
table3.outerjoin(subq, table3.c.userid == subq.c.myid)
)
self.assert_compile(
q,
"SELECT thirdtable.userid, "
"thirdtable.otherstuff FROM thirdtable "
"LEFT OUTER JOIN (SELECT mytable.myid AS "
"myid, mytable.name AS name, "
"mytable.description AS description FROM "
"mytable LEFT OUTER JOIN myothertable ON "
"mytable.myid = myothertable.otherid) "
"anon_1 ON thirdtable.userid = anon_1.myid",
dialect=oracle.dialect(use_ansi=True),
)
self.assert_compile(
q,
"SELECT thirdtable.userid, "
"thirdtable.otherstuff FROM thirdtable, "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid("
"+)) anon_1 WHERE thirdtable.userid = "
"anon_1.myid(+)",
dialect=oracle.dialect(use_ansi=False),
)
q = select([table1.c.name]).where(table1.c.name == "foo")
self.assert_compile(
q,
"SELECT mytable.name FROM mytable WHERE " "mytable.name = :name_1",
dialect=oracle.dialect(use_ansi=False),
)
subq = (
select([table3.c.otherstuff])
.where(table3.c.otherstuff == table1.c.name)
.label("bar")
)
q = select([table1.c.name, subq])
self.assert_compile(
q,
"SELECT mytable.name, (SELECT "
"thirdtable.otherstuff FROM thirdtable "
"WHERE thirdtable.otherstuff = "
"mytable.name) AS bar FROM mytable",
dialect=oracle.dialect(use_ansi=False),
)
def test_nonansi_plusses_everthing_in_the_condition(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable",
column("otherid", Integer),
column("othername", String),
)
stmt = select([table1]).select_from(
table1.outerjoin(
table2,
and_(
table1.c.myid == table2.c.otherid,
table2.c.othername > 5,
table1.c.name == "foo",
),
)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = "
"myothertable.otherid(+) AND myothertable.othername(+) > "
":othername_1 AND mytable.name = :name_1",
dialect=oracle.dialect(use_ansi=False),
)
stmt = select([table1]).select_from(
table1.outerjoin(
table2,
and_(
table1.c.myid == table2.c.otherid,
table2.c.othername == None,
table1.c.name == None,
),
)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = "
"myothertable.otherid(+) AND myothertable.othername(+) IS NULL "
"AND mytable.name IS NULL",
dialect=oracle.dialect(use_ansi=False),
)
def test_nonansi_nested_right_join(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
j = a.join(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False),
)
j = a.outerjoin(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b(+) AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False),
)
j = a.join(b.outerjoin(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c(+)",
dialect=oracle.OracleDialect(use_ansi=False),
)
def test_alias_outer_join(self):
address_types = table("address_types", column("id"), column("name"))
addresses = table(
"addresses",
column("id"),
column("user_id"),
column("address_type_id"),
column("email_address"),
)
at_alias = address_types.alias()
s = (
select([at_alias, addresses])
.select_from(
addresses.outerjoin(
at_alias, addresses.c.address_type_id == at_alias.c.id
)
)
.where(addresses.c.user_id == 7)
.order_by(addresses.c.id, address_types.c.id)
)
self.assert_compile(
s,
"SELECT address_types_1.id, "
"address_types_1.name, addresses.id, "
"addresses.user_id, addresses.address_type_"
"id, addresses.email_address FROM "
"addresses LEFT OUTER JOIN address_types "
"address_types_1 ON addresses.address_type_"
"id = address_types_1.id WHERE "
"addresses.user_id = :user_id_1 ORDER BY "
"addresses.id, address_types.id",
)
def test_returning_insert(self):
t1 = table("t1", column("c1"), column("c2"), column("c3"))
self.assert_compile(
t1.insert().values(c1=1).returning(t1.c.c2, t1.c.c3),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1",
)
def test_returning_insert_functional(self):
t1 = table(
"t1", column("c1"), column("c2", String()), column("c3", String())
)
fn = func.lower(t1.c.c2, type_=String())
stmt = t1.insert().values(c1=1).returning(fn, t1.c.c3)
compiled = stmt.compile(dialect=oracle.dialect())
eq_(
compiled._create_result_map(),
{
"c3": ("c3", (t1.c.c3, "c3", "c3"), t1.c.c3.type),
"lower": ("lower", (fn, "lower", None), fn.type),
},
)
self.assert_compile(
stmt,
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"lower(t1.c2), t1.c3 INTO :ret_0, :ret_1",
)
def test_returning_insert_labeled(self):
t1 = table("t1", column("c1"), column("c2"), column("c3"))
self.assert_compile(
t1.insert()
.values(c1=1)
.returning(t1.c.c2.label("c2_l"), t1.c.c3.label("c3_l")),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1",
)
def test_returning_insert_computed(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer, Computed("foo + 42")),
)
self.assert_compile(
t1.insert().values(id=1, foo=5).returning(t1.c.bar),
"INSERT INTO t1 (id, foo) VALUES (:id, :foo) "
"RETURNING t1.bar INTO :ret_0",
)
def test_returning_update_computed_warning(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer, Computed("foo + 42")),
)
with testing.expect_warnings(
"Computed columns don't work with Oracle UPDATE"
):
self.assert_compile(
t1.update().values(id=1, foo=5).returning(t1.c.bar),
"UPDATE t1 SET id=:id, foo=:foo RETURNING t1.bar INTO :ret_0",
)
def test_compound(self):
t1 = table("t1", column("c1"), column("c2"), column("c3"))
t2 = table("t2", column("c1"), column("c2"), column("c3"))
self.assert_compile(
union(t1.select(), t2.select()),
"SELECT t1.c1, t1.c2, t1.c3 FROM t1 UNION "
"SELECT t2.c1, t2.c2, t2.c3 FROM t2",
)
self.assert_compile(
except_(t1.select(), t2.select()),
"SELECT t1.c1, t1.c2, t1.c3 FROM t1 MINUS "
"SELECT t2.c1, t2.c2, t2.c3 FROM t2",
)
def test_no_paren_fns(self):
for fn, expected in [
(func.uid(), "uid"),
(func.UID(), "UID"),
(func.sysdate(), "sysdate"),
(func.row_number(), "row_number()"),
(func.rank(), "rank()"),
(func.now(), "CURRENT_TIMESTAMP"),
(func.current_timestamp(), "CURRENT_TIMESTAMP"),
(func.user(), "USER"),
]:
self.assert_compile(fn, expected)
def test_create_index_alt_schema(self):
m = MetaData()
t1 = Table("foo", m, Column("x", Integer), schema="alt_schema")
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x)),
"CREATE INDEX alt_schema.bar ON alt_schema.foo (x)",
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table("foo", m, Column("x", Integer))
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo (x > 5)",
)
def test_table_options(self):
m = MetaData()
t = Table(
"foo",
m,
Column("x", Integer),
prefixes=["GLOBAL TEMPORARY"],
oracle_on_commit="PRESERVE ROWS",
)
self.assert_compile(
schema.CreateTable(t),
"CREATE GLOBAL TEMPORARY TABLE "
"foo (x INTEGER) ON COMMIT PRESERVE ROWS",
)
def test_create_table_compress(self):
m = MetaData()
tbl1 = Table(
"testtbl1", m, Column("data", Integer), oracle_compress=True
)
tbl2 = Table(
"testtbl2", m, Column("data", Integer), oracle_compress="OLTP"
)
self.assert_compile(
schema.CreateTable(tbl1),
"CREATE TABLE testtbl1 (data INTEGER) COMPRESS",
)
self.assert_compile(
schema.CreateTable(tbl2),
"CREATE TABLE testtbl2 (data INTEGER) " "COMPRESS FOR OLTP",
)
def test_create_index_bitmap_compress(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("idx1", tbl.c.data, oracle_compress=True)
idx2 = Index("idx2", tbl.c.data, oracle_compress=1)
idx3 = Index("idx3", tbl.c.data, oracle_bitmap=True)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX idx1 ON testtbl (data) COMPRESS",
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX idx2 ON testtbl (data) COMPRESS 1",
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE BITMAP INDEX idx3 ON testtbl (data)",
)
@testing.combinations(
("no_persisted", "", "ignore"),
("persisted_none", "", None),
("persisted_false", " VIRTUAL", False),
id_="iaa",
)
def test_column_computed(self, text, persisted):
m = MetaData()
kwargs = {"persisted": persisted} if persisted != "ignore" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", **kwargs)),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER, y INTEGER GENERATED "
"ALWAYS AS (x + 2)%s)" % text,
)
def test_column_computed_persisted_true(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", persisted=True)),
)
assert_raises_message(
exc.CompileError,
r".*Oracle computed columns do not support 'stored' ",
schema.CreateTable(t).compile,
dialect=oracle.dialect(),
)
class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
def test_basic(self):
seq = Sequence("my_seq_no_schema")
dialect = oracle.OracleDialect()
assert (
dialect.identifier_preparer.format_sequence(seq)
== "my_seq_no_schema"
)
seq = Sequence("my_seq", schema="some_schema")
assert (
dialect.identifier_preparer.format_sequence(seq)
== "some_schema.my_seq"
)
seq = Sequence("My_Seq", schema="Some_Schema")
assert (
dialect.identifier_preparer.format_sequence(seq)
== '"Some_Schema"."My_Seq"'
)
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import time
import fixtures
import mock
from oslo.config import cfg
from nova import exception
from nova.openstack.common import processutils
from nova.storage import linuxscsi
from nova import test
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova import utils
from nova.virt import fake
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import volume
CONF = cfg.CONF
class LibvirtVolumeTestCase(test.NoDBTestCase):
def setUp(self):
super(LibvirtVolumeTestCase, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
class FakeLibvirtDriver(object):
def __init__(self, hyperv="QEMU", version=1005001):
self.hyperv = hyperv
self.version = version
def _get_hypervisor_version(self):
return self.version
def _get_hypervisor_type(self):
return self.hyperv
def _get_all_block_devices(self):
return []
self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
self.connr = {
'ip': '127.0.0.1',
'initiator': 'fake_initiator',
'host': 'fake_host'
}
self.disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.name = 'volume-00000001'
self.location = '10.0.2.15:3260'
self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
self.vol = {'id': 1, 'name': self.name}
self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.user = 'foo'
def _assertNetworkAndProtocolEquals(self, tree):
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
rbd_name = '%s/%s' % ('rbd', self.name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
def _assertFileTypeEquals(self, tree, file_path):
self.assertEqual(tree.get('type'), 'file')
self.assertEqual(tree.find('./source').get('file'), file_path)
def _assertDiskInfoEquals(self, tree, disk_info):
self.assertEqual(tree.get('device'), disk_info['type'])
self.assertEqual(tree.find('./target').get('bus'),
disk_info['bus'])
self.assertEqual(tree.find('./target').get('dev'),
disk_info['dev'])
def _test_libvirt_volume_driver_disk_info(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertDiskInfoEquals(tree, self.disk_info)
def test_libvirt_volume_disk_info_type(self):
self.disk_info['type'] = 'cdrom'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_dev(self):
self.disk_info['dev'] = 'hdc'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_bus(self):
self.disk_info['bus'] = 'scsi'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_driver_serial(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('block', tree.get('type'))
self.assertEqual('fake_serial', tree.find('./serial').text)
self.assertIsNone(tree.find('./blockio'))
def test_libvirt_volume_driver_blockio(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'logical_block_size': '4096',
'physical_block_size': '4096',
},
'serial': 'fake_serial',
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
blockio = tree.find('./blockio')
self.assertEqual('4096', blockio.get('logical_block_size'))
self.assertEqual('4096', blockio.get('physical_block_size'))
def test_libvirt_volume_driver_iotune(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'qos_specs': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
iotune = tree.find('./iotune')
# ensure invalid qos_specs is ignored
self.assertIsNone(iotune)
specs = {
'total_bytes_sec': '102400',
'read_bytes_sec': '51200',
'write_bytes_sec': '0',
'total_iops_sec': '0',
'read_iops_sec': '200',
'write_iops_sec': '200',
}
del connection_info['data']['qos_specs']
connection_info['data'].update(dict(qos_specs=specs))
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
def test_libvirt_volume_driver_readonly(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'access_mode': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.assertRaises(exception.InvalidVolumeAccessMode,
libvirt_driver.connect_volume,
connection_info, self.disk_info)
connection_info['data']['access_mode'] = 'rw'
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNone(readonly)
connection_info['data']['access_mode'] = 'ro'
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNotNone(readonly)
def iscsi_connection(self, volume, location, iqn):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
'qos_specs': {
'total_bytes_sec': '102400',
'read_iops_sec': '200',
}
}
}
def test_rescan_multipath(self):
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
libvirt_driver._rescan_multipath()
expected_multipath_cmd = ('multipath', '-r')
self.assertIn(expected_multipath_cmd, self.executes)
def test_libvirt_iscsi_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
self.stubs.Set(os.path, 'exists', lambda x: True)
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
connection_info = self.iscsi_connection(self.vol, self.location,
self.iqn)
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
self.assertEqual('qemu', conf.driver_name)
tree = conf.format_dom()
dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
self.iqn)
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location),
('iscsiadm', '-m', 'session'),
('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location, '--login'),
('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location, '--op', 'update',
'-n', 'node.startup', '-v', 'automatic'),
('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location, '--rescan'),
('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location, '--op', 'update',
'-n', 'node.startup', '-v', 'manual'),
('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location, '--logout'),
('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location, '--op', 'delete')]
self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
self.assertEqual(self.executes, expected_commands)
def test_libvirt_iscsi_driver_still_in_use(self):
# NOTE(vish) exists is to make driver assume connecting worked
self.stubs.Set(os.path, 'exists', lambda x: True)
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
self.iqn)]
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
vol = {'id': 1, 'name': self.name}
connection_info = self.iscsi_connection(vol, self.location, self.iqn)
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
dev_str = '/dev/disk/by-path/%s' % dev_name
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location),
('iscsiadm', '-m', 'session'),
('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location, '--login'),
('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location, '--op', 'update',
'-n', 'node.startup', '-v', 'automatic'),
('iscsiadm', '-m', 'node', '-T', self.iqn,
'-p', self.location, '--rescan'),
('cp', '/dev/stdin',
'/sys/block/%s/device/delete' % dev_name)]
self.assertEqual(self.executes, expected_commands)
def test_libvirt_iscsi_driver_disconnect_multipath_error(self):
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
self.iqn)]
with contextlib.nested(
mock.patch.object(os.path, 'exists', return_value=True),
mock.patch.object(self.fake_conn, '_get_all_block_devices',
return_value=devs),
mock.patch.object(libvirt_driver, '_rescan_multipath'),
mock.patch.object(libvirt_driver, '_run_multipath'),
mock.patch.object(libvirt_driver, '_get_multipath_device_name',
return_value='/dev/mapper/fake-multipath-devname'),
mock.patch.object(libvirt_driver,
'_get_target_portals_from_iscsiadm_output',
return_value=[('fake-ip', 'fake-portal')]),
mock.patch.object(libvirt_driver, '_get_multipath_iqn',
return_value='fake-portal'),
) as (mock_exists, mock_devices, mock_rescan_multipath,
mock_run_multipath, mock_device_name, mock_get_portals,
mock_get_iqn):
mock_run_multipath.side_effect = processutils.ProcessExecutionError
vol = {'id': 1, 'name': self.name}
connection_info = self.iscsi_connection(vol, self.location,
self.iqn)
conf = libvirt_driver.connect_volume(connection_info,
self.disk_info)
tree = conf.format_dom()
dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
dev_str = '/dev/disk/by-path/%s' % dev_name
self.assertEqual('block', tree.get('type'))
self.assertEqual(dev_str, tree.find('./source').get('dev'))
libvirt_driver.use_multipath = True
libvirt_driver.disconnect_volume(connection_info, "vde")
mock_run_multipath.assert_called_once_with(
['-f', 'fake-multipath-devname'],
check_exit_code=[0, 1])
def test_sanitize_log_run_iscsiadm(self):
# Tests that the parameters to the _run_iscsiadm function are sanitized
# for passwords when logged.
def fake_debug(*args, **kwargs):
self.assertIn('node.session.auth.password', args[0])
self.assertNotIn('scrubme', args[0])
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
connection_info = self.iscsi_connection(self.vol, self.location,
self.iqn)
iscsi_properties = connection_info['data']
with mock.patch.object(volume.LOG, 'debug',
side_effect=fake_debug) as debug_mock:
libvirt_driver._iscsiadm_update(iscsi_properties,
'node.session.auth.password',
'scrubme')
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
def iser_connection(self, volume, location, iqn):
return {
'driver_volume_type': 'iser',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
}
}
def sheepdog_connection(self, volume):
return {
'driver_volume_type': 'sheepdog',
'data': {
'name': volume['name']
}
}
def test_libvirt_sheepdog_driver(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.sheepdog_connection(self.vol)
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
self.assertEqual(tree.find('./source').get('name'), self.name)
libvirt_driver.disconnect_volume(connection_info, "vde")
def rbd_connection(self, volume):
return {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % ('rbd', volume['name']),
'auth_enabled': CONF.libvirt.rbd_secret_uuid is not None,
'auth_username': CONF.libvirt.rbd_user,
'secret_type': 'ceph',
'secret_uuid': CONF.libvirt.rbd_secret_uuid,
'qos_specs': {
'total_bytes_sec': '1048576',
'read_iops_sec': '500',
}
}
}
def test_libvirt_rbd_driver(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertIsNone(tree.find('./source/auth'))
self.assertEqual('1048576', tree.find('./iotune/total_bytes_sec').text)
self.assertEqual('500', tree.find('./iotune/read_iops_sec').text)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_hosts(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
hosts = ['example.com', '1.2.3.4', '::1']
ports = [None, '6790', '6791']
connection_info['data']['hosts'] = hosts
connection_info['data']['ports'] = ports
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertIsNone(tree.find('./source/auth'))
found_hosts = tree.findall('./source/host')
self.assertEqual([host.get('name') for host in found_hosts], hosts)
self.assertEqual([host.get('port') for host in found_hosts], ports)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_enabled(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
secret_type = 'ceph'
connection_info['data']['auth_enabled'] = True
connection_info['data']['auth_username'] = self.user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = self.uuid
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertEqual(tree.find('./auth').get('username'), self.user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), self.uuid)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
secret_type = 'ceph'
connection_info['data']['auth_enabled'] = True
connection_info['data']['auth_username'] = self.user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = self.uuid
flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
flags_user = 'bar'
self.flags(rbd_user=flags_user,
rbd_secret_uuid=flags_uuid,
group='libvirt')
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_disabled(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
secret_type = 'ceph'
connection_info['data']['auth_enabled'] = False
connection_info['data']['auth_username'] = self.user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = self.uuid
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertIsNone(tree.find('./auth'))
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
connection_info = self.rbd_connection(self.vol)
secret_type = 'ceph'
connection_info['data']['auth_enabled'] = False
connection_info['data']['auth_username'] = self.user
connection_info['data']['secret_type'] = secret_type
connection_info['data']['secret_uuid'] = self.uuid
# NOTE: Supplying the rbd_secret_uuid will enable authentication
# locally in nova-compute even if not enabled in nova-volume/cinder
flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
flags_user = 'bar'
self.flags(rbd_user=flags_user,
rbd_secret_uuid=flags_uuid,
group='libvirt')
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertNetworkAndProtocolEquals(tree)
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_kvm_volume(self):
self.stubs.Set(os.path, 'exists', lambda x: True)
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
connection_info = self.iscsi_connection(self.vol, self.location,
self.iqn)
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
self.iqn)
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, 'vde')
def test_libvirt_kvm_volume_with_multipath(self):
self.flags(iscsi_use_multipath=True, group='libvirt')
self.stubs.Set(os.path, 'exists', lambda x: True)
devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
connection_info = self.iscsi_connection(self.vol, self.location,
self.iqn)
mpdev_filepath = '/dev/mapper/foo'
connection_info['data']['device_path'] = mpdev_filepath
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
self.stubs.Set(libvirt_driver,
'_get_target_portals_from_iscsiadm_output',
lambda x: [[self.location, self.iqn]])
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
libvirt_driver._get_multipath_iqn = lambda x: self.iqn
libvirt_driver.disconnect_volume(connection_info, 'vde')
expected_multipath_cmd = ('multipath', '-f', 'foo')
self.assertIn(expected_multipath_cmd, self.executes)
def test_libvirt_kvm_volume_with_multipath_still_in_use(self):
name = 'volume-00000001'
location = '10.0.2.15:3260'
iqn = 'iqn.2010-10.org.openstack:%s' % name
mpdev_filepath = '/dev/mapper/foo'
def _get_multipath_device_name(path):
if '%s-lun-1' % iqn in path:
return mpdev_filepath
return '/dev/mapper/donotdisconnect'
self.flags(iscsi_use_multipath=True, group='libvirt')
self.stubs.Set(os.path, 'exists', lambda x: True)
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
libvirt_driver._get_multipath_device_name =\
lambda x: _get_multipath_device_name(x)
block_devs = ['/dev/disks/by-path/%s-iscsi-%s-lun-2' % (location, iqn)]
self.stubs.Set(self.fake_conn, '_get_all_block_devices',
lambda: block_devs)
vol = {'id': 1, 'name': name}
connection_info = self.iscsi_connection(vol, location, iqn)
connection_info['data']['device_path'] = mpdev_filepath
libvirt_driver._get_multipath_iqn = lambda x: iqn
iscsi_devs = ['1.2.3.4-iscsi-%s-lun-1' % iqn,
'%s-iscsi-%s-lun-1' % (location, iqn),
'%s-iscsi-%s-lun-2' % (location, iqn)]
libvirt_driver._get_iscsi_devices = lambda: iscsi_devs
self.stubs.Set(libvirt_driver,
'_get_target_portals_from_iscsiadm_output',
lambda x: [[location, iqn]])
# Set up disconnect volume mock expectations
self.mox.StubOutWithMock(libvirt_driver, '_delete_device')
self.mox.StubOutWithMock(libvirt_driver, '_rescan_multipath')
libvirt_driver._rescan_multipath()
libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[0])
libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[1])
libvirt_driver._rescan_multipath()
# Ensure that the mpath devices are deleted
self.mox.ReplayAll()
libvirt_driver.disconnect_volume(connection_info, 'vde')
def test_libvirt_kvm_volume_with_multipath_getmpdev(self):
self.flags(iscsi_use_multipath=True, group='libvirt')
self.stubs.Set(os.path, 'exists', lambda x: True)
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
name0 = 'volume-00000000'
iqn0 = 'iqn.2010-10.org.openstack:%s' % name0
dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (self.location, iqn0)
dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
self.iqn)
devs = [dev0, dev]
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
connection_info = self.iscsi_connection(self.vol, self.location,
self.iqn)
mpdev_filepath = '/dev/mapper/foo'
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
self.stubs.Set(libvirt_driver,
'_get_target_portals_from_iscsiadm_output',
lambda x: [['fake_portal1', 'fake_iqn1']])
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
libvirt_driver.disconnect_volume(connection_info, 'vde')
def test_libvirt_kvm_iser_volume_with_multipath(self):
self.flags(iser_use_multipath=True, group='libvirt')
self.stubs.Set(os.path, 'exists', lambda x: True)
self.stubs.Set(time, 'sleep', lambda x: None)
devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
name = 'volume-00000001'
location = '10.0.2.15:3260'
iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
vol = {'id': 1, 'name': name}
connection_info = self.iser_connection(vol, location, iqn)
mpdev_filepath = '/dev/mapper/foo'
connection_info['data']['device_path'] = mpdev_filepath
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
self.stubs.Set(libvirt_driver,
'_get_target_portals_from_iscsiadm_output',
lambda x: [[location, iqn]])
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
libvirt_driver._get_multipath_iqn = lambda x: iqn
libvirt_driver.disconnect_volume(connection_info, 'vde')
expected_multipath_cmd = ('multipath', '-f', 'foo')
self.assertIn(expected_multipath_cmd, self.executes)
def test_libvirt_kvm_iser_volume_with_multipath_getmpdev(self):
self.flags(iser_use_multipath=True, group='libvirt')
self.stubs.Set(os.path, 'exists', lambda x: True)
self.stubs.Set(time, 'sleep', lambda x: None)
libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
name0 = 'volume-00000000'
location0 = '10.0.2.15:3260'
iqn0 = 'iqn.2010-10.org.iser.openstack:%s' % name0
dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (location0, iqn0)
name = 'volume-00000001'
location = '10.0.2.15:3260'
iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
vol = {'id': 1, 'name': name}
dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
devs = [dev0, dev]
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
self.stubs.Set(libvirt_driver, '_get_iscsi_devices', lambda: [])
connection_info = self.iser_connection(vol, location, iqn)
mpdev_filepath = '/dev/mapper/foo'
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
self.stubs.Set(libvirt_driver,
'_get_target_portals_from_iscsiadm_output',
lambda x: [['fake_portal1', 'fake_iqn1']])
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
libvirt_driver.disconnect_volume(connection_info, 'vde')
def test_libvirt_nfs_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
mnt_base = '/mnt'
self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/nfs/share1'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'nfs', export_string, export_mnt_base),
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
def test_libvirt_nfs_driver_already_mounted(self):
# NOTE(vish) exists is to make driver assume connecting worked
mnt_base = '/mnt'
self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
export_string = '192.168.1.1:/nfs/share1'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('findmnt', '--target', export_mnt_base, '--source',
export_string),
('umount', export_mnt_base)]
self.assertEqual(self.executes, expected_commands)
def test_libvirt_nfs_driver_with_opts(self):
mnt_base = '/mnt'
self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/nfs/share1'
options = '-o intr,nfsvers=3'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name,
'options': options}}
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'nfs', '-o', 'intr,nfsvers=3',
export_string, export_mnt_base),
('umount', export_mnt_base),
]
self.assertEqual(expected_commands, self.executes)
def aoe_connection(self, shelf, lun):
return {
'driver_volume_type': 'aoe',
'data': {
'target_shelf': shelf,
'target_lun': lun,
}
}
def test_libvirt_aoe_driver(self):
# NOTE(jbr_) exists is to make driver assume connecting worked
self.stubs.Set(os.path, 'exists', lambda x: True)
libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
shelf = '100'
lun = '1'
connection_info = self.aoe_connection(shelf, lun)
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
aoedevpath = '/dev/etherd/e%s.%s' % (shelf, lun)
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), aoedevpath)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_glusterfs_driver(self):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'glusterfs', export_string, export_mnt_base),
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
def test_libvirt_glusterfs_driver_already_mounted(self):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
export_string = '192.168.1.1:/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('findmnt', '--target', export_mnt_base,
'--source', export_string),
('umount', export_mnt_base)]
self.assertEqual(self.executes, expected_commands)
def test_libvirt_glusterfs_driver_qcow2(self):
libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/volume-00001'
name = 'volume-00001'
format = 'qcow2'
connection_info = {'data': {'export': export_string,
'name': name,
'format': format}}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'file')
self.assertEqual(tree.find('./driver').get('type'), 'qcow2')
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_glusterfs_driver_with_opts(self):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/volume-00001'
options = '-o backupvolfile-server=192.168.1.2'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name,
'options': options}}
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'glusterfs',
'-o', 'backupvolfile-server=192.168.1.2',
export_string, export_mnt_base),
('umount', export_mnt_base),
]
self.assertEqual(self.executes, expected_commands)
def test_libvirt_glusterfs_libgfapi(self):
self.flags(qemu_allowed_storage_drivers=['gluster'], group='libvirt')
libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/volume-00001'
name = 'volume-00001'
connection_info = {'data': {'export': export_string, 'name': name}}
disk_info = {
"dev": "vde",
"type": "disk",
"bus": "virtio",
}
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./driver').get('type'), 'raw')
source = tree.find('./source')
self.assertEqual(source.get('protocol'), 'gluster')
self.assertEqual(source.get('name'), 'volume-00001/volume-00001')
self.assertEqual(source.find('./host').get('name'), '192.168.1.1')
self.assertEqual(source.find('./host').get('port'), '24007')
libvirt_driver.disconnect_volume(connection_info, "vde")
def fibrechan_connection(self, volume, location, wwn):
return {
'driver_volume_type': 'fibrechan',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_wwn': wwn,
'target_lun': 1,
}
}
def test_libvirt_fibrechan_driver(self):
self.stubs.Set(libvirt_utils, 'get_fc_hbas',
fake_libvirt_utils.get_fc_hbas)
self.stubs.Set(libvirt_utils, 'get_fc_hbas_info',
fake_libvirt_utils.get_fc_hbas_info)
# NOTE(vish) exists is to make driver assume connecting worked
self.stubs.Set(os.path, 'exists', lambda x: True)
self.stubs.Set(os.path, 'realpath', lambda x: '/dev/sdb')
libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
multipath_devname = '/dev/md-1'
devices = {"device": multipath_devname,
"id": "1234567890",
"devices": [{'device': '/dev/sdb',
'address': '1:0:0:1',
'host': 1, 'channel': 0,
'id': 0, 'lun': 1}]}
self.stubs.Set(linuxscsi, 'find_multipath_device', lambda x: devices)
self.stubs.Set(linuxscsi, 'remove_device', lambda x: None)
# Should work for string, unicode, and list
wwns = ['1234567890123456', unicode('1234567890123456'),
['1234567890123456', '1234567890123457']]
for wwn in wwns:
connection_info = self.fibrechan_connection(self.vol,
self.location, wwn)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info,
self.disk_info)
self.assertEqual('1234567890',
connection_info['data']['multipath_id'])
tree = conf.format_dom()
self.assertEqual('block', tree.get('type'))
self.assertEqual(multipath_devname,
tree.find('./source').get('dev'))
# Test the scenario where multipath_id is returned
libvirt_driver.disconnect_volume(connection_info, mount_device)
expected_commands = []
self.assertEqual(expected_commands, self.executes)
# Test the scenario where multipath_id is not returned
connection_info["data"]["devices"] = devices["devices"]
del connection_info["data"]["multipath_id"]
libvirt_driver.disconnect_volume(connection_info, mount_device)
expected_commands = []
self.assertEqual(expected_commands, self.executes)
# Should not work for anything other than string, unicode, and list
connection_info = self.fibrechan_connection(self.vol,
self.location, 123)
self.assertRaises(exception.NovaException,
libvirt_driver.connect_volume,
connection_info, self.disk_info)
self.stubs.Set(libvirt_utils, 'get_fc_hbas', lambda: [])
self.stubs.Set(libvirt_utils, 'get_fc_hbas_info', lambda: [])
self.assertRaises(exception.NovaException,
libvirt_driver.connect_volume,
connection_info, self.disk_info)
def test_libvirt_fibrechan_getpci_num(self):
libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
"/0000:05:00.3/host2/fc_host/host2"}
pci_num = libvirt_driver._get_pci_num(hba)
self.assertEqual("0000:05:00.3", pci_num)
hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
"/0000:05:00.3/0000:06:00.6/host2/fc_host/host2"}
pci_num = libvirt_driver._get_pci_num(hba)
self.assertEqual("0000:06:00.6", pci_num)
def test_libvirt_scality_driver(self):
tempdir = self.useFixture(fixtures.TempDir()).path
TEST_MOUNT = os.path.join(tempdir, 'fake_mount')
TEST_CONFIG = os.path.join(tempdir, 'fake_config')
TEST_VOLDIR = 'volumes'
TEST_VOLNAME = 'volume_name'
TEST_CONN_INFO = {
'data': {
'sofs_path': os.path.join(TEST_VOLDIR, TEST_VOLNAME)
}
}
TEST_VOLPATH = os.path.join(TEST_MOUNT,
TEST_VOLDIR,
TEST_VOLNAME)
open(TEST_CONFIG, "w+").close()
os.makedirs(os.path.join(TEST_MOUNT, 'sys'))
def _access_wrapper(path, flags):
if path == '/sbin/mount.sofs':
return True
else:
return os.access(path, flags)
self.stubs.Set(os, 'access', _access_wrapper)
self.flags(scality_sofs_config=TEST_CONFIG,
scality_sofs_mount_point=TEST_MOUNT,
group='libvirt')
driver = volume.LibvirtScalityVolumeDriver(self.fake_conn)
conf = driver.connect_volume(TEST_CONN_INFO, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, TEST_VOLPATH)
|
|
"""Builds the eye network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
import eye_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 1,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/home/hp/Documents/DeepLearning/MyProjects/Data/eye/new96',
"""Path to the eye data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the eye data set.
IMAGE_SIZE = eye_input.IMAGE_SIZE
NUM_CLASSES = eye_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = eye_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = eye_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 2000.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for eye training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
images, labels = eye_input.distorted_inputs(FLAGS.data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for eye evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
images, labels = eye_input.inputs(eval_data=eval_data,
data_dir=FLAGS.data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the eye model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# More conv and pooling
# conv3
with tf.variable_scope('conv3') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv3)
# pool1
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool3')
# norm3
norm3 = tf.nn.lrn(pool3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm3')
# conv4
with tf.variable_scope('conv4') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 128],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm3, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv4)
# norm4
norm4 = tf.nn.lrn(conv4, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm4')
# pool4
pool4 = tf.nn.max_pool(norm4, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool4')
###
# local5
with tf.variable_scope('local5') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool4, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local5 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local5)
# local6
with tf.variable_scope('local6') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local6 = tf.nn.relu(tf.matmul(local5, weights) + biases, name=scope.name)
_activation_summary(local6)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1 / 192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local6, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in eye model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train eye model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
|
|
#----------------------------------------------------------------------
# Copyright (c) 2013-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# Routine to check the health of a gram rack
# 1. Are the essential openstack services up? [keystone, nova, quntum, glance]?
# 2. Are the compute nodes up?
# 3. Is gram up?
# 4. Can we allocate/release a VM with gram?
# Eventually this output should report to NAGIOS or some
# other health-check system. For now it prints a report periodically
import config
import platform
import subprocess
import time
import netaddr
import logging
import fileinput
import sys
import os
import random
import open_stack_interface as osi
GRAM_HEALTHCHECK_INTERVAL = 600 # Every 10 minutes
def get_keystone_status():
success = False
try:
command = "keystone tenant-list"
tenants = osi._execCommand(command)
success = True
except:
pass
return success
def get_nova_status():
success = False
try:
hosts = osi._listHosts('compute')
success = True
except:
pass
return success
def get_glance_status():
success = False
try:
images = osi._listImages()
success = True
except:
pass
return success
def get_network_status():
success = False
net_node_addr = config.network_host_addr
try:
command = '%s net-list' % config.network_type
output = osi._execCommand(command)
success = True
except:
pass
return success
def compute_host_ping_status(host):
ret = subprocess.call("ping -c 1 %s" % host, \
shell=True, \
stdout=open('/dev/null', 'w'), \
stderr=subprocess.STDOUT);
# print "RET(%s) = %s" % (host, ret)
return ret == 0
def get_host_status():
compute_hosts = osi._listHosts('compute').keys()
# print "HOSTS = " + str(compute_hosts)
status = {}
cmd = "nova-manage service list"
ret = subprocess.check_output(cmd, shell=True)
lines = ret.split('\n');
for host in compute_hosts:
found = 0
for line in lines:
if line.find(host):
if line.find(':-)'):
status[host] = ':-)'
found = 0
continue
else:
status[host] = 'xxx'
if found == 0:
status[host] = 'xxx'
#status[host] = compute_host_ping_status(host)
return status
def get_compute_status():
print "Checking the status of the compute hosts: \n"
cmd = "nova-manage service list"
ret = subprocess.check_output(cmd, shell=True)
lines = ret.split('\n');
print lines[0]
for line in lines:
if not line.find('nova-compute') < 0:
if not line.find('xxx') < 0:
print 'WARNING: compute host is down or not properly configured: \n'
print line
print "\n"
def get_network_agent_status():
net_node_addr = config.network_host_addr
cmd = '%s agent-list' % config.network_type
print "Checking status of Openstack networking software modules: \n"
ret = subprocess.check_output(cmd, shell=True)
lines = ret.split('\n');
print lines[0]
for line in lines:
if not line.find('xxx') < 0:
print 'WARNING: the followng agent is down or not properly configured (ignore if it is a duplicate entry): \n'
print line
print "\n"
def compute_gram_port():
cmd = "grep port= /home/gram/.gcf/gcf_config"
ret = subprocess.check_output(cmd, shell=True)
# print "RET = " + str(ret) + " " + str(type(str(ret)))
lines = ret.split('\n');
# print "LINES = " + str(lines) + " " + str(len(lines))
line = lines[len(lines)-2]
# print "LINE = " + str(line)
parts = line.split('=')
gram_port = int(parts[1])
gram_port = 5001
return gram_port
def compute_am_status(hostname):
gram_port = compute_gram_port()
# print "GP = " + str(gram_port)
cmd = "/opt/gcf/src/omni.py -V3 -a https://%s:%d getversion" % (hostname, gram_port)
ret = subprocess.check_output(cmd, \
shell=True, \
stderr=subprocess.STDOUT)
# print "RET = " + str(ret)
return ret.find('Failed') < 0
def compute_gram_status(hostname):
gram_port = compute_gram_port()
slice_name = "DUMMY" + str(random.randint(1,100000))
rspec_name = "/tmp/dummy.rspec"
f = open(rspec_name, 'w')
f.write('<rspec type="request" xmlns="http://www.geni.net/resources/rspec/3" \
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" \
xsi:schemaLocation="http://www.geni.net/resources/rspec/3 \
http://www.geni.net/resources/rspec/3/request.xsd"><node client_id="foo"/></rspec>\n');
f.close()
create_tmpl = "/opt/gcf/src/omni.py -V3 -a https://%s:%d createslice %s %s"
create_cmd = create_tmpl % (hostname, gram_port, slice_name, rspec_name)
#print "Creating slice: " + create_cmd
ret = subprocess.check_output(create_cmd, shell=True, stderr=subprocess.STDOUT)
create_tmpl = "/opt/gcf/src/omni.py -V3 -a https://%s:%d allocate %s %s"
create_cmd = create_tmpl % (hostname, gram_port, slice_name, rspec_name)
#print "Allocating: " + create_cmd
ret = subprocess.check_output(create_cmd, shell=True, stderr=subprocess.STDOUT)
# print "RET = " + ret
create_success = ret.find('Allocated slivers') >= 0
if create_success:
print "Allocate - success"
else:
print "Allocate - failure"
print ret
provision_tmpl = "/opt/gcf/src/omni.py -V3 -a https://%s:%d provision %s"
provision_cmd = provision_tmpl % (hostname, gram_port, slice_name)
#print "Provisioning slice: " + create_cmd
ret = subprocess.check_output(provision_cmd, shell=True, stderr=subprocess.STDOUT)
# print "RET = " + ret
provision_success = ret.find('Provisioned slivers') >= 0
if provision_success:
print "Provision - success"
else:
print "Provision - failure"
print ret
delete_tmpl = "/opt/gcf/src/omni.py -V3 -a https://%s:%d delete %s"
delete_cmd = delete_tmpl % (hostname, gram_port, slice_name)
#print "Deleting slice: " + delete_cmd
ret = subprocess.check_output(delete_cmd, shell=True, stderr=subprocess.STDOUT)
# print "RET = " + ret
delete_success = ret.find('Result Summary: Deleted') >= 0
if delete_success:
print 'Delete - success'
else:
print 'Delete - failure'
return create_success and provision_success and delete_success
def _getMgmtNamespace() :
"""
Looks at the namespaces on the machine and finds one that has the management
network and the external network:
"""
mgmt_addr = (netaddr.IPNetwork(config.management_network_cidr)).broadcast
public_addr = config.public_subnet_start_ip
net_node_addr = config.network_host_addr
ssh_prefix = 'ssh gram@' + net_node_addr + ' sudo '
# get a list of the namespaces
if config.openstack_type == 'grizzly':
command = 'ip netns list'
else:
command = ssh_prefix + 'ip netns list'
print "Checking for management namespace.\n"
output = osi._execCommand(command)
output_lines = output.split('\n')
# check for both public and mgmt address in each namespace
has_mgmt = 0
has_public = 0
for line in output_lines:
if not line:
return None
try:
if config.openstack_type == 'grizzly':
command = 'ip netns exec ' + line + ' ifconfig'
else:
command = ssh_prefix + 'ip netns exec ' + line + ' ifconfig'
print "Checking through ifconfig return.\n"
ifconfig = osi._execCommand(command)
except subprocess.CalledProcessError as e:
continue
ifconfig_lines = ifconfig.split('\n')
for ifconfig_line in ifconfig_lines:
if str(mgmt_addr) in ifconfig_line:
has_mgmt = 1
if public_addr in ifconfig_line:
has_public = 1
if has_mgmt and has_public:
return line
else:
has_mgmt = 0
has_public = 0
return None
def _setField(field,value):
for line in fileinput.input('/etc/gram/config.json', inplace=1):
if field in line:
if field == 'mgmt_ns':
line = line.replace(line,' "' + field + '": "' + value + '"\n' )
else:
line = line.replace(line,' "' + field + '": "' + value + '",\n' )
sys.stdout.write(line)
def check_mgmt_ns(recreate=False):
mgmt_ns = _getMgmtNamespace()
conf_mgmt_ns = config.mgmt_ns
mgmt_net_name = config.management_network_name
mgmt_net_cidr = config.management_network_cidr
mgmt_net_vlan = config.management_network_vlan
public_subnet_start_ip = config.public_subnet_start_ip
public_subnet_end_ip = config.public_subnet_end_ip
public_gateway_ip = config.public_gateway_ip
public_subnet_cidr = config.public_subnet_cidr
net_node_addr = config.network_host_addr
network_conf = "/etc/%s/l3_agent.ini" % config.network_type
if not mgmt_ns or recreate:
print "WARNING: Management namespace NOT found"
if config.network_type == 'quantum':
nscmd = 'sudo quantum-l3-agent restart'
else:
nscmd = 'ssh gram@' + net_node_addr + \
' sudo service neutron-l3-agent restart'
for x in range(0,10):
print "Restarting L3 service to attempt to recover the namespace - attempt " + str(x)
osi._execCommand(nscmd)
time.sleep(20)
mgmt_ns = _getMgmtNamespace()
if mgmt_ns:
break
if not mgmt_ns:
print "WARNING: Unable to recover management namespace"
input_var = raw_input("Do you wish to recreate the management network? [y/N]: ")
if input_var == 'y':
input_var = raw_input("You must delete 'externalRouter' (router),'public' (network) and " + mgmt_net_name + " (network). Using the Horizon interface is recommended. Have you done this and are ready to proceed? [y/N] ")
if input_var == 'y':
cmd = ("%s net-create " + mgmt_net_name + " --provider:network_type vlan --provider:physical_network physnet2 --provider:segmentation_id " + mgmt_net_vlan + " --shared") % config.network_type
osi._execCommand(cmd)
cmd = ("%s subnet-create " + mgmt_net_name + " " + mgmt_net_cidr) % config.network_type
output = osi._execCommand(cmd)
MGMT_SUBNET_ID = osi._getValueByPropertyName(output, 'id')
cmd = ("%s net-create public --router:external=True") % config.network_type
output = osi._execCommand(cmd)
PUBLIC_NET_ID = osi._getValueByPropertyName(output, 'id')
cmd = ("%s subnet-create --allocation_pool" + \
" start=" + public_subnet_start_ip + \
",end=" + public_subnet_end_ip + \
" --gateway=" + public_gateway_ip + \
" " + str(PUBLIC_NET_ID) + " " + public_subnet_cidr + \
" -- --enable_dhcp=False") % config.network_type
output = osi._execCommand(cmd)
cmd = ("%s router-create externalRouter") % config.network_type
output = osi._execCommand(cmd)
EXTERNAL_ROUTER_ID = osi._getValueByPropertyName(output, 'id')
cmd = ("%s router-gateway-set externalRouter " + PUBLIC_NET_ID) % config.network_type
output = osi._execCommand(cmd)
cmd = ("%s router-interface-add externalRouter " + MGMT_SUBNET_ID) % config.network_type
output = osi._execCommand(cmd)
if config.openstack_type == "juno":
print "Sending public net id to the network node.\n"
cmd = "ssh gram@" + net_node_addr + " echo " + PUBLIC_NET_ID + " > /home/gram/neutron_public_net"
output = osi._execCommand(cmd)
print "Sending external router id to the network node.\n"
cmd = "ssh gram@" + net_node_addr + " echo " + EXTERNAL_ROUTER_ID + " > /home/gram/neutron_ext_router"
output = osi._execCommand(cmd)
print "Rewriting network node neutron l3 agent config files.\n"
cmd = "ssh gram@" + net_node_addr + " sudo /home/gram/gram/juno/install/network_files/synch_control_network.sh"
osi._execCommand(cmd)
else:
osi._execCommand("service neutron-l3-agent restart")
mgmt_ns = _getMgmtNamespace()
if mgmt_ns:
if conf_mgmt_ns and conf_mgmt_ns == mgmt_ns:
print "Found management namespace and it matches config"
elif conf_mgmt_ns:
print "WARNING: Found management namespace but it does not match config"
print "Rewriting config value"
_setField('mgmt_ns',mgmt_ns)
osi._execCommand("service gram-am restart")
def check_openstack_services():
print 'checking OpenStack services...'
if config.openstack_type == "juno":
services = ['nova-api','nova-cert','nova-conductor','nova-consoleauth ','nova-novncproxy','nova-scheduler', 'neutron-server', 'glance-registry','glance-api','keystone']
remote_services = ['neutron-dhcp-agent','neutron-metadata-agent', 'neutron-l3-agent','neutron-plugin-openvswitch-agent']
else:
services = ['nova-api','nova-cert','nova-conductor','nova-consoleauth ','nova-novncproxy','nova-scheduler', 'glance-registry','glance-api','keystone',
'quantum-dhcp-agent','quantum-metadata-agent','quantum-server','quantum-l3-agent','quantum-plugin-openvswitch-agent']
remote_services = []
for service in services:
cmd = 'service ' + service + ' status'
result = osi._execCommand(cmd)
if not result.find('stop') < 0:
print 'Warning: the following service is not running, will attempt to restart it - ' + service
cmd = 'service ' + service + ' restart'
osi._execCommand(cmd)
cmd = 'service ' + service + ' status'
result = osi._execCommand(cmd)
if result.find('stop'):
print 'Error: the following service is still not running, check logs in /var/logs'
else:
print service + ' - running'
net_node_addr = osi._getConfigParam('/etc/gram/config.json','network_host_addr')
for service in remote_services:
print 'Network node status for ' + service + '\n'
cmd = 'ssh gram@' + net_node_addr + ' service ' + service + ' status'
result = osi._execCommand(cmd)
if not result.find('stop') < 0:
print 'Warning: the following service is not running, will attempt to restart it - ' + service
cmd = 'ssh gram@' + net_node_addr + ' service ' + service + ' restart'
cmd = 'service ' + service + ' restart'
osi._execCommand(cmd)
print 'Checking status\n'
cmd = 'ssh gram@' + net_node_addr + ' service ' + service + ' status'
result = osi._execCommand(cmd)
if result.find('stop'):
print 'Error: the following service is still not running, check logs in /var/logs'
else:
print service + ' - running'
def check_gram_services():
print 'Checking GRAM services...'
services = ['gram-am','gram-ctrl','gram-vmoc','gram-ch']
for service in services:
cmd = 'service ' + service + ' status'
result = osi._execCommand(cmd)
if not result.find('stop') < 0:
print 'Warning: the following service is not running, will attempt to restart it - ' + service
cmd = 'service ' + service + ' restart'
osi._execCommand(cmd)
cmd = 'service ' + service + ' status'
result = osi._execCommand(cmd)
if result.find('stop'):
print 'Error: the following service is still not running - ' + service + '\nCheck logs in /var/logs/upstart/'
else:
print service + ' - running'
def perform_gram_healthcheck():
print "Starting healthcheck"
check_gram_services()
check_openstack_services()
check_mgmt_ns()
platform_info = platform.uname()
hostname = platform_info[1]
get_compute_status()
get_network_agent_status()
keystone_status = get_keystone_status()
if keystone_status:
print "Keystone - pass"
else:
print "Keystone - fail"
nova_status = get_nova_status()
if nova_status:
print "Nova - pass"
else:
print "Nova - fail"
glance_status = get_glance_status()
if glance_status:
print "Glance - pass"
else:
print "Glance - fail"
network_status = get_network_status()
if network_status:
print "Network - pass"
else:
print "Network - fail"
#host_status = {}
#if nova_status:
# host_status = get_host_status()
# for state in host_status:
# if not host_status[state]:
# print "Host " + state + " is not reachable by ping"
# else:
# print "Host " + state + " is reachable by ping"
am_status = compute_am_status(hostname)
if am_status:
print "AM is up : Get-Version succeeded at AM"
else:
print "AM is down : Get-Version failed at AM"
gram_status = compute_gram_status(hostname)
if gram_status:
print "AM is functioning"
# TTD
# Create and delete a sliver
# Log it to a log file
# Turn this into a service that logs to a log file
#template = \
#"GRAM Healthcheck %s: KEY %s NOVA %s GLN %s" + \
# " QNTM %s HOST %s AM %s GRAM %s"
#print template % \
# (hostname, keystone_status, nova_status, \
# glance_status, neutron_status, str(host_status), \
# am_status, gram_status)
if __name__ == "__main__":
config.initialize('/etc/gram/config.json')
logging.basicConfig()
if len(sys.argv) > 1:
if sys.argv[1] == 'recreate':
check_mgmt_ns(True)
perform_gram_healthcheck()
#check_mgmt_ns(True)
#time.sleep(GRAM_HEALTHCHECK_INTERVAL)
|
|
#! /usr/bin/env python
# File Docstring
"""Chapter: Game
This is where the actual meat of the game
takes place."""
# Import Modules
import __init__
import sys
import random
import pygame
from pygame.locals import *
# Import Game Data
from DIR_sprite.DAT_token import SPT_token
from DIR_sprite.DAT_spotselector import SPT_spotselector
from DIR_audio.DAT_loadaudio import *
# Chapter Class
class CHP_game:
"""The game chapter."""
def __init__(self, param):
"""Initialize."""
# Token Count
self.TokenPool = ['red', 'red', 'red', 'red', 'red', 'red', 'red', 'red', 'red', 'red', 'red', 'red',
'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange',
'yellow', 'yellow', 'yellow', 'yellow', 'yellow', 'yellow', 'yellow', 'yellow', 'yellow', 'yellow',
'green', 'green', 'green', 'green', 'green', 'green', 'green', 'green', 'green',
'blue', 'blue', 'blue', 'blue', 'blue', 'blue', 'blue', 'blue',
'purple', 'purple', 'purple', 'purple', 'purple', 'purple', 'purple']
if param[0] == 0: pass
elif param[0] > 0:
self.TokenPool.append('cyan')
self.TokenPool.append('cyan')
self.TokenPool.append('cyan')
self.TokenPool.append('cyan')
self.TokenPool.append('cyan')
self.TokenPool.append('cyan')
if param[0] > 1:
self.TokenPool.append('magenta')
self.TokenPool.append('magenta')
self.TokenPool.append('magenta')
self.TokenPool.append('magenta')
self.TokenPool.append('magenta')
if param[0] > 2:
self.TokenPool.append('brown')
self.TokenPool.append('brown')
self.TokenPool.append('brown')
self.TokenPool.append('brown')
if param[0] > 3:
self.TokenPool.append('gray')
self.TokenPool.append('gray')
self.TokenPool.append('gray')
# Special Tokens
if param[1] == 0: pass
elif param[1] == 1:
self.TokenPool.append('black')
self.TokenPool.append('black')
elif param[1] == 2:
self.TokenPool.append('white')
elif param[1] == 3:
self.TokenPool.append('black')
self.TokenPool.append('black')
self.TokenPool.append('white')
# Timer
self.Timer = (param[2] + 3)
# Random Drop Count
self.RDC = param[3] + 1
# Height
self.Height = param[4] * 2
# Music
if param[5] == 0:
self.Music = False
elif param[5] == 1:
self.Music = MUS_Brix = Load_Music('MUS_Brix.ogg')
elif param[5] == 2:
self.Music = MUS_Blox = Load_Music('MUS_Blox.ogg')
else:
pass
def Start(self, screen, constant):
"""Start the chapter."""
# Prepare any data before running the loop.
pygame.event.get() # Purge the Input Queue.
random.seed()
random.shuffle(self.TokenPool)
if self.Music != False:
pygame.mixer.music.play(-1)
elif self.Music == False:
pygame.mixer.music.stop()
CLEARING = False
PAUSED = False
GAME_OVER = False
ROUND_CLEAR = False
BLACK_CLEAR = None
WHITE_CLEAR = False
SCORE = 0
HISCORE = constant['HighScore']
TIMER = self.Timer
DELAY = 60
ROUNDS = 0
# PAUSE SCREEN
PauseScreen = pygame.Surface(screen.RECT.size).convert()
PauseScreen.fill((0, 0, 0))
RCT_PauseScreen = PauseScreen.get_rect()
# Pause Font
PauseFont = constant['BigFont'].render("P A U S E", True, (255, 255, 255))
RCT_PauseFont = PauseFont.get_rect()
RCT_PauseFont.center = RCT_PauseScreen.center
PauseScreen.blit(PauseFont, RCT_PauseFont.topleft)
# Return to the game
ReturnInfo = constant['Font'].render("Press ESC to unpause.", True,
(255, 255, 255))
RCT_ReturnInfo = ReturnInfo.get_rect()
RCT_ReturnInfo.midtop = RCT_PauseFont.midbottom
PauseScreen.blit(ReturnInfo, RCT_ReturnInfo.topleft)
# Exit the game
ExitInfo = constant['Font'].render("Press SPACE or ENTER to quit this round.", True,
(255, 255, 255))
RCT_ExitInfo = ExitInfo.get_rect()
RCT_ExitInfo.midtop = RCT_ReturnInfo.midbottom
PauseScreen.blit(ExitInfo, RCT_ExitInfo.topleft)
# GAME OVER SCREEN
GameOver = constant['BigFont'].render("GAME OVER", True,
(255, 255, 255), (0, 0, 0))
RCT_GameOver = GameOver.get_rect()
RCT_GameOver.center = screen.RECT.center
# ROUND CLEAR SCREEN
RoundClear = constant['BigFont'].render("ROUND CLEAR", True,
(255, 255, 255), (0, 0, 0))
RCT_RoundClear = RoundClear.get_rect()
RCT_RoundClear.center = screen.RECT.center
# Background
bg = pygame.Surface(screen.RECT.size).convert()
bg.fill((random.randrange(100,201), random.randrange(100, 201), random.randrange(100, 201)))
# Token Container
TokenContainer = constant['Spritesheet'].Apply_Graphic((602, 1, 280, 480))
RCT_TokenContainer = TokenContainer.get_rect()
RCT_TokenContainer.center = screen.RECT.center
bg.blit(TokenContainer, RCT_TokenContainer.topleft)
VisualGrid = pygame.Rect(0, 0, 200, 440)
VisualGrid.midtop = RCT_TokenContainer.midtop
# Score
CPT_Score = constant['BigFont'].render("SCORE:", True,
(255, 255, 255))
RCT_CPT_Score = CPT_Score.get_rect()
RCT_CPT_Score.topleft = RCT_TokenContainer.topright
bg.blit(CPT_Score, RCT_CPT_Score.topleft)
Score = constant['BigFont'].render('%s' % SCORE, True,
(0, 255, 0), (0, 0, 0))
RCT_Score = Score.get_rect()
RCT_Score.topleft = RCT_CPT_Score.bottomleft
# High Score
CPT_HiScore = constant['BigFont'].render("HIGH SCORE:", True,
(255, 255, 255))
RCT_CPT_HiScore = CPT_HiScore.get_rect()
RCT_CPT_HiScore.topleft = RCT_Score.bottomleft
bg.blit(CPT_HiScore, RCT_CPT_HiScore.topleft)
HiScore = constant['BigFont'].render('%s' % HISCORE, True,
(255, 0, 0), (0, 0, 0))
RCT_HiScore = Score.get_rect()
RCT_HiScore.topleft = RCT_CPT_HiScore.bottomleft
# Timer
CPT_Timer = constant['BigFont'].render("TIME:", True,
(255, 255, 255))
RCT_CPT_Timer = CPT_Timer.get_rect()
RCT_CPT_Timer.topleft = RCT_HiScore.bottomleft
bg.blit(CPT_Timer, RCT_CPT_Timer.topleft)
Timer = constant['BigFont'].render('0', True,
(255, 255, 0), (0, 0, 0))
RCT_Timer = Timer.get_rect()
RCT_Timer.topleft = RCT_CPT_Timer.bottomleft
# Next Token
CPT_NextToken = constant['BigFont'].render("NEXT:", True,
(255, 255, 255))
RCT_CPT_NextToken = CPT_NextToken.get_rect()
RCT_CPT_NextToken.topleft = RCT_Timer.bottomleft
bg.blit(CPT_NextToken, RCT_CPT_NextToken.topleft)
NT_Container = pygame.Rect(0, 0, 40, 40)
NT_Container.midtop = RCT_CPT_NextToken.bottomright
# Current Token
CPT_CurrentToken = constant['BigFont'].render("CURRENT:", True,
(255, 255, 255))
RCT_CPT_CurrentToken = CPT_CurrentToken.get_rect()
RCT_CPT_CurrentToken.midtop = NT_Container.midbottom
RCT_CPT_CurrentToken.left = RCT_CPT_NextToken.left
bg.blit(CPT_CurrentToken, RCT_CPT_CurrentToken.topleft)
CT_Container = pygame.Rect(0, 0, 40, 40)
CT_Container.midtop = RCT_CPT_CurrentToken.bottomright
CT_Container.centerx = NT_Container.centerx
# RDC
CPT_RDC = constant['BigFont'].render("Random Drop:", True,
(255, 255, 255))
RCT_CPT_RDC = CPT_RDC.get_rect()
RCT_CPT_RDC.topright = RCT_TokenContainer.topleft
bg.blit(CPT_RDC, RCT_CPT_RDC.topleft)
RDC = constant['BigFont'].render('%s' % self.RDC, True,
(255, 255, 255))
RCT_RDC = RDC.get_rect()
RCT_RDC.midtop = RCT_CPT_RDC.midbottom
bg.blit(RDC, RCT_RDC.topleft)
# Rounds
CPT_Rounds = constant['BigFont'].render("Rounds:", True,
(255, 255, 255))
RCT_CPT_Rounds = CPT_Rounds.get_rect()
RCT_CPT_Rounds.left = RCT_CPT_RDC.left
RCT_CPT_Rounds.top = RCT_RDC.bottom
bg.blit(CPT_Rounds, RCT_CPT_Rounds.topleft)
Rounds = constant['BigFont'].render('%s' % ROUNDS, True,
(255, 255, 255))
RCT_Rounds = Rounds.get_rect()
RCT_Rounds.topleft = RCT_CPT_Rounds.bottomleft
RCT_Rounds.centerx = RCT_RDC.centerx
# Spot Selector
SS = SPT_spotselector(constant['Spritesheet'])
SpotChoice = []
for _ in range(0, 5):
SpotChoice.append(pygame.Rect(0, 0, 40, 40))
SpotChoice[0].bottomleft = VisualGrid.bottomleft
for _ in range(1,5):
SpotChoice[_].bottomleft = SpotChoice[_-1].bottomright
SpotSelector = pygame.sprite.RenderUpdates()
SpotSelector.add(SS)
Spot = 2
# Data Grid
DataGrid = [[], [], [], [], []]
AllTokens = pygame.sprite.RenderUpdates()
# Add tokens to...
for column in DataGrid: # Data Grid
for heightcount in range(0, self.Height):
newtoken = SPT_token(constant['Spritesheet'],
random.choice(self.TokenPool))
column.append(newtoken)
for _ in column: # Group
AllTokens.add(_)
# Apply the tokens to the Visual Grid.
for colcount in range(0, 5):
DataGrid[colcount][0].RECT.center = SpotChoice[colcount].center
for nextcount in range(1, self.Height):
DataGrid[colcount][nextcount].RECT.midbottom = \
DataGrid[colcount][nextcount-1].RECT.midtop
# Prepare for action...
NextToken = random.choice(self.TokenPool)
CurrentToken = random.choice(self.TokenPool)
Ready = constant['BigFont'].render("R E A D Y", True,
(255, 255, 255), (255, 0, 0))
RCT_Ready = Ready.get_rect()
RCT_Ready.center = screen.RECT.center
screen.FRAME.blit(bg, (0, 0))
screen.FRAME.blit(Ready, RCT_Ready.topleft)
screen.Update()
pygame.time.wait(1000)
SFC_NextToken = constant['Spritesheet'].\
Apply_Graphic(SPT_token.DIC_color[NextToken][1])
SFC_CurrentToken = constant['Spritesheet'].\
Apply_Graphic(SPT_token.DIC_color[CurrentToken][1])
# Sound Effects
SND_GameOver = Load_Sound('SND_GameOver.ogg') ; SND_GameOver_Flag = False
SND_Pause = Load_Sound('SND_Pause.ogg')
SND_SpotSelect = Load_Sound('SND_SpotSelect.ogg')
SND_SpotRotate = Load_Sound('SND_SpotRotate.ogg')
SND_Place = Load_Sound('SND_Place.ogg')
SND_NoPlace = Load_Sound('SND_NoPlace.ogg')
SND_Clear = Load_Sound('SND_Clear.ogg') ; SND_Clear_Flag = False
SND_RoundClear = Load_Sound('SND_RoundClear.ogg') ; SND_RoundClear_Flag = False
SND_RandomDrop = Load_Sound('SND_RandomDrop.ogg')
# Main Loop
LS = True # Loop Switch
while LS:
constant['Clock'].tick(60) # Clock 60 Frames Per Second
# In the event of a token-placing bug, remove any token higher than 11...
for column in DataGrid:
if len(column) > 11:
ExtraTokens = column[11:]
del column[11:]
for _ in ExtraTokens:
_.kill()
# Check the grid for clear columns.
FullGrid = [None, None, None, None, None]
for colcount in range(5):
if len(DataGrid[colcount]) == 11:
FullGrid[colcount] = True
elif len(DataGrid[colcount]) > 0 and len(DataGrid[colcount]) < 11:
FullGrid[colcount] = False
elif len(DataGrid[colcount]) == 0:
FullGrid[colcount] = 'CLEAR'
if FullGrid == [True, True, True, True, True]: # If the entire grid is full...
GAME_OVER = True
elif 'CLEAR' in FullGrid: # If at least one stack is cleared...
ROUND_CLEAR = True
# Random Drop Count
if TIMER == 0 and DELAY == 60 and GAME_OVER == False:
SND_RandomDrop.play()
RemainingSpace = 0
# For every column in the data grid...
for column in DataGrid:
RemainingSpace += (11 - len(column))
TokenDrops = 0
if RemainingSpace > self.RDC:
TokenDrops = self.RDC
elif RemainingSpace <= self.RDC:
TokenDrops = RemainingSpace
for _ in range(TokenDrops):
StackCheck = True
while StackCheck == True:
RandomSpot = random.choice(DataGrid)
if len(RandomSpot) == 11:
pass
else:
StackCheck = False
RandomToken = SPT_token(constant['Spritesheet'],
random.choice(self.TokenPool))
RandomSpot.append(RandomToken)
AllTokens.add(RandomSpot[-1])
# Black Clear
if BLACK_CLEAR:
for col in DataGrid:
for token in col:
if token.IDEN == BLACK_CLEAR:
token.CLEAR = True
BLACK_CLEAR = None
# White Clear
if WHITE_CLEAR == True:
for col in DataGrid:
for token in col[2:]:
token.CLEAR = True
WHITE_CLEAR = False
# Game Over
if GAME_OVER == True and CLEARING == False: # If the game is over...
if SND_GameOver_Flag == True:
pass
elif SND_GameOver_Flag == False:
pygame.mixer.music.stop()
SND_GameOver.play()
SND_GameOver_Flag = True
for EVT in pygame.event.get():
if EVT.type == QUIT: # Hard Exit
pygame.quit()
sys.exit()
elif EVT.type == KEYDOWN:
if EVT.key == K_ESCAPE or \
EVT.key == K_RETURN or \
EVT.key == K_SPACE:
constant['HighScore'] = HISCORE
LS = False
else:
pass
screen.FRAME.blit(GameOver, RCT_GameOver.topleft)
elif ROUND_CLEAR == True: # If one stack has been cleared...
if SND_RoundClear_Flag == True:
pass
elif SND_RoundClear_Flag == False:
pygame.mixer.music.stop()
SND_RoundClear.play()
SND_RoundClear_Flag = True
for EVT in pygame.event.get():
if EVT.type == QUIT: # Hard Exit
pygame.quit()
sys.exit()
elif EVT.type == KEYDOWN:
if EVT.key == K_ESCAPE:
constant['HighScore'] = HISCORE
LS = False
elif EVT.key == K_RETURN or \
EVT.key == K_SPACE:
constant['HighScore'] = HISCORE
ROUND_CLEAR = False
SND_RoundClear_Flag = False
if self.Music != False:
pygame.mixer.music.play(-1)
# Reset All
ROUNDS += 1
TIMER = self.Timer
DataGrid = [[], [], [], [], []]
AllTokens = pygame.sprite.RenderUpdates()
# Reset BG
bg.fill((random.randrange(100,201), random.randrange(100, 201), random.randrange(100, 201)))
bg.blit(TokenContainer, RCT_TokenContainer.topleft)
bg.blit(CPT_Score, RCT_CPT_Score.topleft)
bg.blit(CPT_HiScore, RCT_CPT_HiScore.topleft)
bg.blit(CPT_Timer, RCT_CPT_Timer.topleft)
bg.blit(CPT_NextToken, RCT_CPT_NextToken.topleft)
bg.blit(CPT_CurrentToken, RCT_CPT_CurrentToken.topleft)
bg.blit(CPT_RDC, RCT_CPT_RDC.topleft)
bg.blit(RDC, RCT_RDC.topleft)
bg.blit(CPT_Rounds, RCT_CPT_Rounds.topleft)
# Add tokens to...
for column in DataGrid: # Data Grid
for heightcount in range(0, self.Height):
newtoken = SPT_token(constant['Spritesheet'],
random.choice(self.TokenPool))
column.append(newtoken)
for _ in column: # Group
AllTokens.add(_)
# Apply the tokens to the Visual Grid.
for colcount in range(0, 5):
DataGrid[colcount][0].RECT.center = SpotChoice[colcount].center
for nextcount in range(1, self.Height):
DataGrid[colcount][nextcount].RECT.midbottom = \
DataGrid[colcount][nextcount-1].RECT.midtop
else:
pass
screen.FRAME.blit(RoundClear, RCT_RoundClear.topleft)
elif PAUSED == True: # If the game is paused...
for EVT in pygame.event.get():
if EVT.type == QUIT: # Hard Exit
pygame.quit()
sys.exit()
elif EVT.type == KEYDOWN:
if EVT.key == K_ESCAPE:
PAUSED = False
if self.Music != False:
pygame.mixer.music.unpause()
elif EVT.key == K_RETURN or EVT.key == K_SPACE:
constant['HighScore'] = HISCORE
LS = False
else: pass
screen.FRAME.blit(PauseScreen, (0, 0))
elif PAUSED == False: # If the game is running...
# Don't forget to pass a CLEARING flag as False.
for EVT in pygame.event.get():
if EVT.type == QUIT: # Hard Exit
pygame.quit()
sys.exit()
elif EVT.type == KEYDOWN:
if EVT.key == K_ESCAPE:
PAUSED = True
pygame.mixer.music.pause()
SND_Pause.play()
elif EVT.key == K_LEFT and CLEARING == False:
Spot -= 1
if Spot < 0:
Spot = 0
SND_NoPlace.play()
else:
SND_SpotSelect.play()
elif EVT.key == K_RIGHT and CLEARING == False:
Spot += 1
if Spot > 4:
Spot = 4
SND_NoPlace.play()
else:
SND_SpotSelect.play()
elif EVT.key == K_UP and CLEARING == False:
DataGrid[Spot].insert(0, DataGrid[Spot].pop())
SND_SpotRotate.play()
elif EVT.key == K_DOWN and CLEARING == False:
DataGrid[Spot].append(DataGrid[Spot].pop(0))
SND_SpotRotate.play()
elif EVT.key == K_1 and CLEARING == False:
Spot = 0
SND_SpotSelect.play()
elif EVT.key == K_2 and CLEARING == False:
Spot = 1
SND_SpotSelect.play()
elif EVT.key == K_3 and CLEARING == False:
Spot = 2
SND_SpotSelect.play()
elif EVT.key == K_4 and CLEARING == False:
Spot = 3
SND_SpotSelect.play()
elif EVT.key == K_5 and CLEARING == False:
Spot = 4
SND_SpotSelect.play()
elif (EVT.key == K_RETURN or EVT.key == K_SPACE) \
and CLEARING == False:
if FullGrid[Spot] == True:
SND_NoPlace.play()
elif FullGrid[Spot] == False:
SND_Place.play()
DataGrid[Spot].insert(0,
SPT_token(constant['Spritesheet'],
CurrentToken))
AllTokens.add(DataGrid[Spot][0])
CurrentToken = NextToken
NextToken = random.choice(self.TokenPool)
SFC_NextToken = constant['Spritesheet'].\
Apply_Graphic(SPT_token.DIC_color[NextToken][1])
SFC_CurrentToken = constant['Spritesheet'].\
Apply_Graphic(SPT_token.DIC_color[CurrentToken][1])
else:
pass
# Pop out any tokens that are cleared to go.
for col in DataGrid:
if len(col) == 0: pass
elif len(col) >= 1:
for token in col:
if token.ALPHA <= 0:
removedtoken = col.pop(col.index(token))
SCORE += removedtoken.SCORE
removedtoken.kill()
# Attach the token stacks on each other.
for colcount in range(5):
if len(DataGrid[colcount]) == 0: pass
else:
DataGrid[colcount][0].RECT.center = SpotChoice[colcount].center
for nextcount in range(1, len(DataGrid[colcount])):
DataGrid[colcount][nextcount].RECT.midbottom = \
DataGrid[colcount][nextcount-1].RECT.midtop
# Mandatory Line-Clearing Check.
HeightCheck = [] # A list of each column's height.
for col in DataGrid:
HeightCheck.append(len(col))
MinimumHeight = min(HeightCheck)
ValidCount = 0
if MinimumHeight == 0:
ROUND_CLEAR = True
elif MinimumHeight >= 1:
for rowcheck in range(MinimumHeight):
ValidCheck = []
for col in DataGrid:
ValidCheck.append(col[rowcheck])
ColorPool = []
for _ in ValidCheck:
if _.IDEN in ColorPool:
pass
else:
ColorPool.append(_.IDEN)
ColorCount = 0
for _ in ColorPool:
if _ == 'black' or _ == 'white':
pass
else:
ColorCount += 1
if ColorCount > 1:
pass
elif ColorCount == 1:
for token in ValidCheck:
token.CLEAR = True
if 'black' in ColorPool:
for _ in ColorPool:
if _ == 'black' or _ == 'white':
pass
else:
BLACK_CLEAR = _
if 'white' in ColorPool:
WHITE_CLEAR = True
ValidCount += 1
if ValidCount >= 1:
CLEARING = True
if SND_Clear_Flag == True:
pass
else:
SND_Clear.play()
SND_Clear_Flag = True
elif ValidCount == 0:
CLEARING = False
SND_Clear_Flag = False
# Background Info
screen.FRAME.blit(bg, (0, 0))
Score = constant['BigFont'].render('%s' % SCORE, True,
(0, 255, 0), (0, 0, 0))
screen.FRAME.blit(Score, RCT_Score.topleft)
if SCORE >= HISCORE: HISCORE = SCORE
HiScore = constant['BigFont'].render('%s' % str(HISCORE), True,
(255, 0, 0), (0, 0, 0))
screen.FRAME.blit(HiScore, RCT_HiScore.topleft)
# Timer Info
if CLEARING == False:
DELAY -= 1
if DELAY < 0:
DELAY = 60
TIMER -= 1
if TIMER < 0:
TIMER = self.Timer
Timer = constant['BigFont'].render('%s' % (TIMER), True,
(255, 255, 0), (0, 0, 0))
screen.FRAME.blit(Timer, RCT_Timer.topleft)
# Next & Current Tokens
screen.FRAME.blit(SFC_NextToken, NT_Container.topleft)
screen.FRAME.blit(SFC_CurrentToken, CT_Container.topleft)
# Rounds Count
Rounds = constant['BigFont'].render('%s' % ROUNDS, True,
(255, 255, 255))
screen.FRAME.blit(Rounds, RCT_Rounds.topleft)
# Tokens
AllTokens.update()
AllTokens.draw(screen.FRAME)
# Spot Selector
SS.RECT.center = SpotChoice[Spot].center
SpotSelector.update()
SpotSelector.draw(screen.FRAME)
# Update Screen
screen.Update()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class NetworkSecurityGroupsOperations(object):
"""NetworkSecurityGroupsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-03-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-01"
self.config = config
def delete(
self, resource_group_name, network_security_group_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, network_security_group_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkSecurityGroup
<azure.mgmt.network.v2017_03_01.models.NetworkSecurityGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_security_group_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network security group in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param parameters: Parameters supplied to the create or update network
security group operation.
:type parameters: :class:`NetworkSecurityGroup
<azure.mgmt.network.v2017_03_01.models.NetworkSecurityGroup>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`NetworkSecurityGroup
<azure.mgmt.network.v2017_03_01.models.NetworkSecurityGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkSecurityGroup')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('NetworkSecurityGroup', response)
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network security groups in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkSecurityGroupPaged
<azure.mgmt.network.v2017_03_01.models.NetworkSecurityGroupPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkSecurityGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkSecurityGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`NetworkSecurityGroupPaged
<azure.mgmt.network.v2017_03_01.models.NetworkSecurityGroupPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkSecurityGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkSecurityGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
import logging
import warnings
from typing import Optional, Union
from uuid import UUID
from dateutil.parser import parse
from great_expectations.core.data_context_key import DataContextKey
from great_expectations.core.id_dict import BatchKwargs, IDDict
from great_expectations.core.run_identifier import RunIdentifier, RunIdentifierSchema
from great_expectations.exceptions import DataContextError, InvalidDataContextKeyError
from great_expectations.marshmallow__shade import Schema, fields, post_load
logger = logging.getLogger(__name__)
class ExpectationSuiteIdentifier(DataContextKey):
def __init__(self, expectation_suite_name: str):
super().__init__()
if not isinstance(expectation_suite_name, str):
raise InvalidDataContextKeyError(
f"expectation_suite_name must be a string, not {type(expectation_suite_name).__name__}"
)
self._expectation_suite_name = expectation_suite_name
@property
def expectation_suite_name(self):
return self._expectation_suite_name
def to_tuple(self):
return tuple(self.expectation_suite_name.split("."))
def to_fixed_length_tuple(self):
return (self.expectation_suite_name,)
@classmethod
def from_tuple(cls, tuple_):
return cls(".".join(tuple_))
@classmethod
def from_fixed_length_tuple(cls, tuple_):
return cls(expectation_suite_name=tuple_[0])
def __repr__(self):
return f"{self.__class__.__name__}::{self._expectation_suite_name}"
class ExpectationSuiteIdentifierSchema(Schema):
expectation_suite_name = fields.Str()
# noinspection PyUnusedLocal
@post_load
def make_expectation_suite_identifier(self, data, **kwargs):
return ExpectationSuiteIdentifier(**data)
class BatchIdentifier(DataContextKey):
"""A BatchIdentifier tracks"""
def __init__(
self,
batch_identifier: Union[BatchKwargs, dict, str],
data_asset_name: str = None,
):
super().__init__()
# if isinstance(batch_identifier, (BatchKwargs, dict)):
# self._batch_identifier = batch_identifier.batch_fingerprint
self._batch_identifier = batch_identifier
self._data_asset_name = data_asset_name
@property
def batch_identifier(self):
return self._batch_identifier
@property
def data_asset_name(self):
return self._data_asset_name
def to_tuple(self):
return (self.batch_identifier,)
@classmethod
def from_tuple(cls, tuple_):
return cls(batch_identifier=tuple_[0])
class BatchIdentifierSchema(Schema):
batch_identifier = fields.Str()
data_asset_name = fields.Str()
# noinspection PyUnusedLocal
@post_load
def make_batch_identifier(self, data, **kwargs):
return BatchIdentifier(**data)
class ValidationResultIdentifier(DataContextKey):
"""A ValidationResultIdentifier identifies a validation result by the fully-qualified expectation_suite_identifier
and run_id.
"""
def __init__(self, expectation_suite_identifier, run_id, batch_identifier):
"""Constructs a ValidationResultIdentifier
Args:
expectation_suite_identifier (ExpectationSuiteIdentifier, list, tuple, or dict):
identifying information for the fully-qualified expectation suite used to validate
run_id (RunIdentifier): The run_id for which validation occurred
"""
super().__init__()
self._expectation_suite_identifier = expectation_suite_identifier
if isinstance(run_id, str):
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional).",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
run_time = None
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif run_id is None:
run_id = RunIdentifier()
elif not isinstance(run_id, RunIdentifier):
run_id = RunIdentifier(run_name=str(run_id))
self._run_id = run_id
self._batch_identifier = batch_identifier
@property
def expectation_suite_identifier(self) -> ExpectationSuiteIdentifier:
return self._expectation_suite_identifier
@property
def run_id(self):
return self._run_id
@property
def batch_identifier(self):
return self._batch_identifier
def to_tuple(self):
return tuple(
list(self.expectation_suite_identifier.to_tuple())
+ list(self.run_id.to_tuple())
+ [self.batch_identifier or "__none__"]
)
def to_fixed_length_tuple(self):
return tuple(
[self.expectation_suite_identifier.expectation_suite_name]
+ list(self.run_id.to_tuple())
+ [self.batch_identifier or "__none__"]
)
@classmethod
def from_tuple(cls, tuple_):
return cls(
ExpectationSuiteIdentifier.from_tuple(tuple_[0:-3]),
RunIdentifier.from_tuple((tuple_[-3], tuple_[-2])),
tuple_[-1],
)
@classmethod
def from_fixed_length_tuple(cls, tuple_):
return cls(
ExpectationSuiteIdentifier(tuple_[0]),
RunIdentifier.from_tuple((tuple_[1], tuple_[2])),
tuple_[3],
)
@classmethod
def from_object(cls, validation_result):
batch_kwargs = validation_result.meta.get("batch_kwargs", {})
if isinstance(batch_kwargs, IDDict):
batch_identifier = batch_kwargs.to_id()
elif isinstance(batch_kwargs, dict):
batch_identifier = IDDict(batch_kwargs).to_id()
else:
raise DataContextError(
"Unable to construct ValidationResultIdentifier from provided object."
)
return cls(
expectation_suite_identifier=ExpectationSuiteIdentifier(
validation_result.meta["expectation_suite_name"]
),
run_id=validation_result.meta.get("run_id"),
batch_identifier=batch_identifier,
)
class GeCloudIdentifier(DataContextKey):
def __init__(self, resource_type: str, ge_cloud_id: Optional[str] = None):
super().__init__()
self._resource_type = resource_type
self._ge_cloud_id = ge_cloud_id if ge_cloud_id is not None else ""
@property
def resource_type(self):
return self._resource_type
@resource_type.setter
def resource_type(self, value):
self._resource_type = value
@property
def ge_cloud_id(self):
return self._ge_cloud_id
@ge_cloud_id.setter
def ge_cloud_id(self, value):
self._ge_cloud_id = value
def to_tuple(self):
return (self.resource_type, self.ge_cloud_id)
def to_fixed_length_tuple(self):
return self.to_tuple()
@classmethod
def from_tuple(cls, tuple_):
return cls(resource_type=tuple_[0], ge_cloud_id=tuple_[1])
@classmethod
def from_fixed_length_tuple(cls, tuple_):
return cls.from_tuple(tuple_)
def __repr__(self):
return f"{self.__class__.__name__}::{self.resource_type}::{self.ge_cloud_id}"
class ValidationResultIdentifierSchema(Schema):
expectation_suite_identifier = fields.Nested(
ExpectationSuiteIdentifierSchema,
required=True,
error_messages={
"required": "expectation_suite_identifier is required for a ValidationResultIdentifier"
},
)
run_id = fields.Nested(
RunIdentifierSchema,
required=True,
error_messages={
"required": "run_id is required for a " "ValidationResultIdentifier"
},
)
batch_identifier = fields.Nested(BatchIdentifierSchema, required=True)
# noinspection PyUnusedLocal
@post_load
def make_validation_result_identifier(self, data, **kwargs):
return ValidationResultIdentifier(**data)
class SiteSectionIdentifier(DataContextKey):
def __init__(self, site_section_name, resource_identifier):
self._site_section_name = site_section_name
if site_section_name in ["validations", "profiling"]:
if isinstance(resource_identifier, ValidationResultIdentifier):
self._resource_identifier = resource_identifier
elif isinstance(resource_identifier, (tuple, list)):
self._resource_identifier = ValidationResultIdentifier(
*resource_identifier
)
else:
self._resource_identifier = ValidationResultIdentifier(
**resource_identifier
)
elif site_section_name == "expectations":
if isinstance(resource_identifier, ExpectationSuiteIdentifier):
self._resource_identifier = resource_identifier
elif isinstance(resource_identifier, (tuple, list)):
self._resource_identifier = ExpectationSuiteIdentifier(
*resource_identifier
)
else:
self._resource_identifier = ExpectationSuiteIdentifier(
**resource_identifier
)
else:
raise InvalidDataContextKeyError(
"SiteSectionIdentifier only supports 'validations' and 'expectations' as site section names"
)
@property
def site_section_name(self):
return self._site_section_name
@property
def resource_identifier(self):
return self._resource_identifier
def to_tuple(self):
site_section_identifier_tuple_list = [self.site_section_name] + list(
self.resource_identifier.to_tuple()
)
return tuple(site_section_identifier_tuple_list)
@classmethod
def from_tuple(cls, tuple_):
if tuple_[0] == "validations":
return cls(
site_section_name=tuple_[0],
resource_identifier=ValidationResultIdentifier.from_tuple(tuple_[1:]),
)
elif tuple_[0] == "expectations":
return cls(
site_section_name=tuple_[0],
resource_identifier=ExpectationSuiteIdentifier.from_tuple(tuple_[1:]),
)
else:
raise InvalidDataContextKeyError(
"SiteSectionIdentifier only supports 'validations' and 'expectations' as site section names"
)
class ConfigurationIdentifier(DataContextKey):
def __init__(self, configuration_key: Union[str, UUID]):
super().__init__()
if isinstance(configuration_key, UUID):
configuration_key = str(configuration_key)
if not isinstance(configuration_key, str):
raise InvalidDataContextKeyError(
f"configuration_key must be a string, not {type(configuration_key).__name__}"
)
self._configuration_key = configuration_key
@property
def configuration_key(self) -> str:
return self._configuration_key
def to_tuple(self):
return tuple(self.configuration_key.split("."))
def to_fixed_length_tuple(self):
return (self.configuration_key,)
@classmethod
def from_tuple(cls, tuple_):
return cls(".".join(tuple_))
@classmethod
def from_fixed_length_tuple(cls, tuple_):
return cls(configuration_key=tuple_[0])
def __repr__(self):
return f"{self.__class__.__name__}::{self._configuration_key}"
class ConfigurationIdentifierSchema(Schema):
configuration_key = fields.Str()
# noinspection PyUnusedLocal
@post_load
def make_configuration_identifier(self, data, **kwargs):
return ConfigurationIdentifier(**data)
expectationSuiteIdentifierSchema = ExpectationSuiteIdentifierSchema()
validationResultIdentifierSchema = ValidationResultIdentifierSchema()
runIdentifierSchema = RunIdentifierSchema()
batchIdentifierSchema = BatchIdentifierSchema()
configurationIdentifierSchema = ConfigurationIdentifierSchema()
|
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources as pkg
from six.moves.urllib import parse
from six.moves.urllib import request
from mistralclient.api import base as api_base
from mistralclient.api.v2 import workbooks
from mistralclient.tests.unit.v2 import base
# TODO(everyone): later we need additional tests verifying all the errors etc.
WB_DEF = """
---
version: 2.0
name: wb
workflows:
wf1:
type: direct
input:
- param1
- param2
tasks:
task1:
action: std.http url="localhost:8989"
on-success:
- test_subsequent
test_subsequent:
action: std.http url="http://some_url" server_id=1
"""
INVALID_WB_DEF = """
version: 2.0
name: wb
workflows:
wf1:
type: direct
tasks:
task1:
action: std.http url="localhost:8989"
workflow: wf2
"""
WORKBOOK = {'definition': WB_DEF}
URL_TEMPLATE = '/workbooks'
URL_TEMPLATE_NAME = '/workbooks/%s'
URL_TEMPLATE_VALIDATE = '/workbooks/validate'
class TestWorkbooksV2(base.BaseClientV2Test):
def test_create(self):
self.requests_mock.post(self.TEST_URL + URL_TEMPLATE,
json=WORKBOOK,
status_code=201)
wb = self.workbooks.create(WB_DEF)
self.assertIsNotNone(wb)
self.assertEqual(WB_DEF, wb.definition)
last_request = self.requests_mock.last_request
self.assertEqual(WB_DEF, last_request.text)
self.assertEqual('text/plain', last_request.headers['content-type'])
def test_create_with_file_uri(self):
self.requests_mock.post(self.TEST_URL + URL_TEMPLATE,
json=WORKBOOK,
status_code=201)
# The contents of wb_v2.yaml must be identical to WB_DEF
path = pkg.resource_filename(
'mistralclient',
'tests/unit/resources/wb_v2.yaml'
)
# Convert the file path to file URI
uri = parse.urljoin('file:', request.pathname2url(path))
wb = self.workbooks.create(uri)
self.assertIsNotNone(wb)
self.assertEqual(WB_DEF, wb.definition)
last_request = self.requests_mock.last_request
self.assertEqual(WB_DEF, last_request.text)
self.assertEqual('text/plain', last_request.headers['content-type'])
def test_update(self):
self.requests_mock.put(self.TEST_URL + URL_TEMPLATE, json=WORKBOOK)
wb = self.workbooks.update(WB_DEF)
self.assertIsNotNone(wb)
self.assertEqual(WB_DEF, wb.definition)
last_request = self.requests_mock.last_request
self.assertEqual(WB_DEF, last_request.text)
self.assertEqual('text/plain', last_request.headers['content-type'])
def test_update_with_file(self):
self.requests_mock.put(self.TEST_URL + URL_TEMPLATE, json=WORKBOOK)
# The contents of wb_v2.yaml must be identical to WB_DEF
path = pkg.resource_filename(
'mistralclient',
'tests/unit/resources/wb_v2.yaml'
)
wb = self.workbooks.update(path)
self.assertIsNotNone(wb)
self.assertEqual(WB_DEF, wb.definition)
last_request = self.requests_mock.last_request
self.assertEqual(WB_DEF, last_request.text)
self.assertEqual('text/plain', last_request.headers['content-type'])
def test_list(self):
self.requests_mock.get(self.TEST_URL + URL_TEMPLATE,
json={'workbooks': [WORKBOOK]})
workbook_list = self.workbooks.list()
self.assertEqual(1, len(workbook_list))
wb = workbook_list[0]
self.assertEqual(
workbooks.Workbook(self.workbooks, WORKBOOK).to_dict(),
wb.to_dict()
)
def test_get(self):
url = self.TEST_URL + URL_TEMPLATE_NAME % 'wb'
self.requests_mock.get(url, json=WORKBOOK)
wb = self.workbooks.get('wb')
self.assertIsNotNone(wb)
self.assertEqual(
workbooks.Workbook(self.workbooks, WORKBOOK).to_dict(),
wb.to_dict()
)
def test_delete(self):
url = self.TEST_URL + URL_TEMPLATE_NAME % 'wb'
self.requests_mock.delete(url, status_code=204)
self.workbooks.delete('wb')
def test_validate(self):
self.requests_mock.post(self.TEST_URL + URL_TEMPLATE_VALIDATE,
json={'valid': True})
result = self.workbooks.validate(WB_DEF)
self.assertIsNotNone(result)
self.assertIn('valid', result)
self.assertTrue(result['valid'])
last_request = self.requests_mock.last_request
self.assertEqual(WB_DEF, last_request.text)
self.assertEqual('text/plain', last_request.headers['content-type'])
def test_validate_with_file(self):
self.requests_mock.post(self.TEST_URL + URL_TEMPLATE_VALIDATE,
json={'valid': True})
# The contents of wb_v2.yaml must be identical to WB_DEF
path = pkg.resource_filename(
'mistralclient',
'tests/unit/resources/wb_v2.yaml'
)
result = self.workbooks.validate(path)
self.assertIsNotNone(result)
self.assertIn('valid', result)
self.assertTrue(result['valid'])
last_request = self.requests_mock.last_request
self.assertEqual(WB_DEF, last_request.text)
self.assertEqual('text/plain', last_request.headers['content-type'])
def test_validate_failed(self):
mock_result = {
"valid": False,
"error": "Task properties 'action' and 'workflow' "
"can't be specified both"
}
self.requests_mock.post(self.TEST_URL + URL_TEMPLATE_VALIDATE,
json=mock_result)
result = self.workbooks.validate(INVALID_WB_DEF)
self.assertIsNotNone(result)
self.assertIn('valid', result)
self.assertFalse(result['valid'])
self.assertIn('error', result)
self.assertIn(
"Task properties 'action' and 'workflow' "
"can't be specified both", result['error']
)
last_request = self.requests_mock.last_request
self.assertEqual(INVALID_WB_DEF, last_request.text)
self.assertEqual('text/plain', last_request.headers['content-type'])
def test_validate_api_failed(self):
self.requests_mock.post(self.TEST_URL + URL_TEMPLATE_VALIDATE,
status_code=500)
self.assertRaises(
api_base.APIException,
self.workbooks.validate,
WB_DEF
)
last_request = self.requests_mock.last_request
self.assertEqual(WB_DEF, last_request.text)
self.assertEqual('text/plain', last_request.headers['content-type'])
|
|
"""
Classes and functions for templates.
"""
from __future__ import absolute_import, division, print_function
import sys
from glob import glob
import os
import traceback
import numpy as np
from astropy.io import fits
from .utils import native_endian, elapsed, transmission_Lyman
from .rebin import rebin_template, trapz_rebin
class Template(object):
"""A spectral Template PCA object.
The template data is read from a redrock-format template file.
Alternatively, the data can be specified in the constructor.
Args:
filename (str): the path to the template file, either absolute or
relative to the RR_TEMPLATE_DIR environment variable.
"""
def __init__(self, filename=None, spectype=None, redshifts=None,
wave=None, flux=None, subtype=None):
if filename is not None:
fx = None
if os.path.exists(filename):
fx = fits.open(filename, memmap=False)
else:
xfilename = os.path.join(os.getenv('RR_TEMPLATE_DIR'), filename)
if os.path.exists(xfilename):
fx = fits.open(xfilename, memmap=False)
else:
raise IOError('unable to find '+filename)
hdr = fx['BASIS_VECTORS'].header
if 'VERSION' in hdr:
self._version = hdr['VERSION']
else:
self._version = 'unknown'
self.wave = np.asarray(hdr['CRVAL1'] + \
hdr['CDELT1']*np.arange(hdr['NAXIS1']), dtype=np.float64)
if 'LOGLAM' in hdr and hdr['LOGLAM'] != 0:
self.wave = 10**self.wave
self.flux = np.asarray(native_endian(fx['BASIS_VECTORS'].data),
dtype=np.float64)
self._redshifts = None
## find out if redshift info is present in the file
old_style_templates = True
try:
self._redshifts = native_endian(fx['REDSHIFTS'].data)
old_style_templates = False
except KeyError:
pass
fx.close()
self._rrtype = hdr['RRTYPE'].strip().upper()
if old_style_templates:
if self._rrtype == 'GALAXY':
# redshifts = 10**np.arange(np.log10(1+0.005),
# np.log10(1+2.0), 1.5e-4) - 1
self._redshifts = 10**np.arange(np.log10(1-0.005),
np.log10(1+1.7), 3e-4) - 1
elif self._rrtype == 'STAR':
self._redshifts = np.arange(-0.002, 0.00201, 4e-5)
elif self._rrtype == 'QSO':
self._redshifts = 10**np.arange(np.log10(1+0.05),
np.log10(1+6.0), 5e-4) - 1
else:
raise ValueError("Unknown redshift range to use for "
"template type {}".format(self._rrtype))
zmin = self._redshifts[0]
zmax = self._redshifts[-1]
print("DEBUG: Using default redshift range {:.4f}-{:.4f} for "
"{}".format(zmin, zmax, os.path.basename(filename)))
else:
zmin = self._redshifts[0]
zmax = self._redshifts[-1]
print("DEBUG: Using redshift range {:.4f}-{:.4f} for "
"{}".format(zmin, zmax, os.path.basename(filename)))
self._subtype = None
if 'RRSUBTYP' in hdr:
self._subtype = hdr['RRSUBTYP'].strip().upper()
else:
self._subtype = ''
else:
self._rrtype = spectype
self._redshifts = redshifts
self.wave = wave
self.flux = flux
self._subtype = subtype
self._nbasis = self.flux.shape[0]
self._nwave = self.flux.shape[1]
@property
def nbasis(self):
return self._nbasis
@property
def nwave(self):
return self._nwave
@property
def template_type(self):
return self._rrtype
@property
def sub_type(self):
return self._subtype
@property
def full_type(self):
"""Return formatted type:subtype string.
"""
if self._subtype != '':
return '{}:::{}'.format(self._rrtype, self._subtype)
else:
return self._rrtype
@property
def redshifts(self):
return self._redshifts
def eval(self, coeff, wave, z):
"""Return template for given coefficients, wavelengths, and redshift
Args:
coeff : array of coefficients length self.nbasis
wave : wavelengths at which to evaluate template flux
z : redshift at which to evaluate template flux
Returns:
template flux array
Notes:
A single factor of (1+z)^-1 is applied to the resampled flux
to conserve integrated flux after redshifting.
"""
assert len(coeff) == self.nbasis
flux = self.flux.T.dot(coeff).T / (1+z)
return trapz_rebin(self.wave*(1+z), flux, wave)
def find_templates(template_dir=None):
"""Return list of redrock-\*.fits template files
Search directories in this order, returning results from first one found:
- template_dir
- $RR_TEMPLATE_DIR
- <redrock_code>/templates/
Args:
template_dir (str): optional directory containing the templates.
Returns:
list: a list of template files.
"""
if template_dir is None:
if 'RR_TEMPLATE_DIR' in os.environ:
template_dir = os.environ['RR_TEMPLATE_DIR']
else:
thisdir = os.path.dirname(__file__)
tempdir = os.path.join(os.path.abspath(thisdir), 'templates')
if os.path.exists(tempdir):
template_dir = tempdir
if template_dir is None:
raise IOError("ERROR: can't find template_dir, $RR_TEMPLATE_DIR, or {rrcode}/templates/")
else:
print('DEBUG: Read templates from {}'.format(template_dir) )
return sorted(glob(os.path.join(template_dir, 'rrtemplate-*.fits')))
class DistTemplatePiece(object):
"""One piece of the distributed template data.
This is a simple container for storing interpolated templates for a set of
redshift values. It is used for communicating the interpolated templates
between processes.
In the MPI case, each process will store at most two of these
simultaneously. This is the data that is computed on a single process and
passed between processes.
Args:
index (int): the chunk index of this piece- this corresponds to
the process rank that originally computed this piece.
redshifts (array): the redshift range contained in this piece.
data (list): a list of dictionaries, one for each redshift, and
each containing the 2D interpolated template values for all
"wavehash" keys.
"""
def __init__(self, index, redshifts, data):
self.index = index
self.redshifts = redshifts
self.data = data
def _mp_rebin_template(template, dwave, zlist, qout):
"""Function for multiprocessing version of rebinning.
"""
try:
results = dict()
for z in zlist:
binned = rebin_template(template, z, dwave)
results[z] = binned
qout.put(results)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
lines = [ "MP rebin: {}".format(x) for x in lines ]
print("".join(lines))
sys.stdout.flush()
return
class DistTemplate(object):
"""Distributed template data interpolated to all redshifts.
For a given template, the redshifts are distributed among the
processes in the communicator. Then each process will rebin the
template to those redshifts for the wavelength grids specified by
dwave.
Args:
template (Template): the template to distribute
dwave (dict): the keys are the "wavehash" and the values
are a 1D array containing the wavelength grid.
mp_procs (int): if not using MPI, restrict the number of
multiprocesses to this.
comm (mpi4py.MPI.Comm): (optional) the MPI communicator.
"""
def __init__(self, template, dwave, mp_procs=1, comm=None):
self._comm = comm
self._template = template
self._dwave = dwave
self._comm_rank = 0
self._comm_size = 1
if self._comm is not None:
self._comm_rank = self._comm.rank
self._comm_size = self._comm.size
self._distredshifts = np.array_split(self._template.redshifts,
self._comm_size)
myz = self._distredshifts[self._comm_rank]
nz = len(myz)
data = list()
# In the case of not using MPI (comm == None), one process is rebinning
# all the templates. In that scenario, use multiprocessing
# workers to do the rebinning.
if self._comm is not None:
# MPI case- compute our local redshifts
for z in myz:
binned = rebin_template(self._template, z, self._dwave)
data.append(binned)
else:
# We don't have MPI, so use multiprocessing
import multiprocessing as mp
qout = mp.Queue()
work = np.array_split(myz, mp_procs)
procs = list()
for i in range(mp_procs):
p = mp.Process(target=_mp_rebin_template,
args=(self._template, self._dwave, work[i], qout))
procs.append(p)
p.start()
# Extract the output into a single list
results = dict()
for i in range(mp_procs):
res = qout.get()
results.update(res)
for z in myz:
data.append(results[z])
# Correct spectra for Lyman-series
for i, z in enumerate(myz):
for k in list(self._dwave.keys()):
T = transmission_Lyman(z,self._dwave[k])
for vect in range(data[i][k].shape[1]):
data[i][k][:,vect] *= T
self._piece = DistTemplatePiece(self._comm_rank, myz, data)
@property
def comm(self):
return self._comm
@property
def template(self):
return self._template
@property
def local(self):
return self._piece
def cycle(self):
"""Pass our piece of data to the next process.
If we have returned to our original data, then return True, otherwise
return False.
Args:
Nothing
Returns (bool):
Whether we have finished (True) else False.
"""
# If we are not using MPI, this function is a no-op, so just return.
if self._comm is None:
return True
rank = self._comm_rank
nproc = self._comm_size
to_proc = rank + 1
if to_proc >= nproc:
to_proc = 0
from_proc = rank - 1
if from_proc < 0:
from_proc = nproc - 1
# Send our data and get a request handle for later checking.
req = self._comm.isend(self._piece, to_proc)
# Receive our data
incoming = self._comm.recv(source=from_proc)
# Wait for send to finishself._comm_rank = self._comm.rank
req.wait()
# Now replace our local piece with the new one
self._piece = incoming
# Are we done?
done = False
if self._piece.index == rank:
done = True
return done
def load_dist_templates(dwave, templates=None, comm=None, mp_procs=1):
"""Read and distribute templates from disk.
This reads one or more template files from disk and distributes them among
an MPI communicator. Each process will locally store interpolated data
for a redshift slice of each template. For a single redshift, the template
is interpolated to the wavelength grids specified by "dwave".
As an example, imagine 3 templates with independent redshift ranges. Also
imagine that the communicator has 2 processes. This function would return
a list of 3 DistTemplate objects. Within each of those objects, the 2
processes store the interpolated data for a subset of the redshift range:
DistTemplate #1: zmin1 <---- p0 ----> | <---- p1 ----> zmax1
DistTemplate #2: zmin2 <-- p0 --> | <-- p1 --> zmax2
DistTemplate #3: zmin3 <--- p0 ---> | <--- p1 ---> zmax3
Args:
dwave (dict): the dictionary of wavelength grids. Keys are the
"wavehash" and values are an array of wavelengths.
templates (str or None): if None, find all templates from the
redrock template directory. If a path to a file is specified,
load that single template. If a path to a directory is given,
load all templates in that directory.
comm (mpi4py.MPI.Comm): (optional) the MPI communicator.
mp_procs (int): if not using MPI, restrict the number of
multiprocesses to this.
Returns:
list: a list of DistTemplate objects.
"""
timer = elapsed(None, "", comm=comm)
template_files = None
if (comm is None) or (comm.rank == 0):
# Only one process needs to do this
if templates is not None:
if os.path.isfile(templates):
# we are using just a single file
template_files = [ templates ]
elif os.path.isdir(templates):
# this is a template dir
template_files = find_templates(template_dir=templates)
else:
print("{} is neither a file nor a directory"\
.format(templates))
sys.stdout.flush()
if comm is not None:
comm.Abort()
else:
template_files = find_templates()
if comm is not None:
template_files = comm.bcast(template_files, root=0)
template_data = list()
if (comm is None) or (comm.rank == 0):
for t in template_files:
template_data.append(Template(filename=t))
if comm is not None:
template_data = comm.bcast(template_data, root=0)
timer = elapsed(timer, "Read and broadcast of {} templates"\
.format(len(template_files)), comm=comm)
# Compute the interpolated templates in a distributed way with every
# process generating a slice of the redshift range.
dtemplates = list()
for t in template_data:
dtemplates.append(DistTemplate(t, dwave, mp_procs=mp_procs, comm=comm))
timer = elapsed(timer, "Rebinning templates", comm=comm)
return dtemplates
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BookBorrowRequest.status'
db.add_column('cabinet_bookborrowrequest', 'status',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'EBookRequest.status'
db.add_column('cabinet_ebookrequest', 'status',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BookBorrowRequest.status'
db.delete_column('cabinet_bookborrowrequest', 'status')
# Deleting field 'EBookRequest.status'
db.delete_column('cabinet_ebookrequest', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cabinet.book': {
'Meta': {'object_name': 'Book'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'douban_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cabinet.bookborrowrecord': {
'Meta': {'object_name': 'BookBorrowRecord'},
'borrow_date': ('django.db.models.fields.DateTimeField', [], {}),
'borrower': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ownership': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.BookOwnership']"}),
'planed_return_date': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'returned_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'cabinet.bookborrowrequest': {
'Meta': {'object_name': 'BookBorrowRequest'},
'bo_ship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.BookOwnership']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'planed_return_date': ('django.db.models.fields.DateField', [], {}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cabinet.bookcabinet': {
'Meta': {'object_name': 'BookCabinet'},
'books': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['cabinet.BookOwnership']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'cabinet.bookcomment': {
'Meta': {'object_name': 'BookComment'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Book']"}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.bookownership': {
'Meta': {'object_name': 'BookOwnership'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Book']"}),
'has_ebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '16'})
},
'cabinet.bookownershiptaguse': {
'Meta': {'object_name': 'BookOwnershipTagUse'},
'bookown': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.BookOwnership']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.booktaguse': {
'Meta': {'unique_together': "(('tag', 'user', 'book'),)", 'object_name': 'BookTagUse'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.cabinetnews': {
'Meta': {'ordering': "['-datetime']", 'object_name': 'CabinetNews'},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lead': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'news': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'cabinet.ebookrequest': {
'Meta': {'object_name': 'EBookRequest'},
'bo_ship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.BookOwnership']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cabinet.feedback': {
'Meta': {'object_name': 'Feedback'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.follow': {
'Meta': {'unique_together': "(('following', 'user'),)", 'object_name': 'Follow'},
'following': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'follower_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.joinrepositoryrequest': {
'Meta': {'object_name': 'JoinRepositoryRequest'},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Repository']"}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.repository': {
'Meta': {'object_name': 'Repository'},
'admin': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'managed_repos'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'create_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'joined_repos'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'cabinet.sysbooktaguse': {
'Meta': {'object_name': 'SysBookTagUse'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Tag']"})
},
'cabinet.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'cabinet.weibouser': {
'Meta': {'object_name': 'WeiboUser'},
'avatar': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expires_in': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabinet']
|
|
#!/usr/bin/env python
import json
import math
import re
class PManager(object):
def __init__(self, pm_data):
if isinstance(pm_data, (str, unicode)):
self.pm_data = json.loads(pm_data)
else:
self.pm_data = pm_data
self.data = self.pm_data['ks_spaces']
self.kernel_params = self.pm_data['kernel_params']
self.factor = 1
self.unit = "MiB"
self._pre = []
self._kick = []
self._post = []
self.raid_count = 0
self._pcount = {}
self._pend = {}
self._rcount = 0
self._pvcount = 0
def _pseparator(self, devname):
pseparator = ''
if devname.find('cciss') >= 0:
pseparator = 'p'
return pseparator
def pcount(self, disk_id, increment=0):
self._pcount[disk_id] = self._pcount.get(disk_id, 0) + increment
return self._pcount.get(disk_id, 0)
def psize(self, disk_id, increment=0):
self._pend[disk_id] = self._pend.get(disk_id, 0) + increment
return self._pend.get(disk_id, 0)
def rcount(self, increment=0):
self._rcount += increment
return self._rcount
def pvcount(self, increment=0):
self._pvcount += increment
return self._pvcount
def pre(self, command=None):
if command:
return self._pre.append(command)
return self._pre
def kick(self, command=None):
if command:
return self._kick.append(command)
return self._kick
def post(self, command=None):
if command:
return self._post.append(command)
return self._post
def _disk_dev(self, disk):
command = "$(readlink -f $( ("
command += " || ".join(["ls /dev/{0}".format(d)
for d in disk.get("extra", [])])
if disk["extra"]:
command += " || "
command += "ls /dev/{0}".format(disk["id"])
command += ") 2>/dev/null) )"
return command
def iterdisks(self):
for item in self.data:
if item["type"] == "disk" and item["size"] > 0:
yield item
def get_partition_count(self, name):
count = 0
for disk in self.iterdisks():
count += len([v for v in disk["volumes"]
if v.get('name') == name and v['size'] > 0])
return count
def num_ceph_journals(self):
return self.get_partition_count('cephjournal')
def num_ceph_osds(self):
return self.get_partition_count('ceph')
def _gettabfstype(self, vol):
if vol.get("file_system"):
return vol["file_system"]
elif vol["mount"] == "/":
return "ext4"
elif vol["mount"] == "/boot":
return "ext3"
elif vol["mount"] == "swap":
return "swap"
return "xfs"
def _gettabfsoptions(self, vol):
if self._gettabfstype(vol) == "xfs":
return "-f -s size=512"
return ""
def _getfstype(self, vol):
fstype = self._gettabfstype(vol)
if fstype == "swap":
return ""
return "--fstype=%s" % fstype
def _getlabel(self, label):
if not label:
return ""
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return " -L {0} ".format(label[:12])
def _parttype(self, n):
return "primary"
def _getsize(self, vol):
"""Get volume size.
Anaconda has hard coded limitation in 16TB
for ext3/4 and xfs filesystems (the only filesystems
we are supposed to use). Besides there is no stable
64-bit ext4 implementation at the moment, so the
limitation in 16TB for ext4 is not only
anaconda limitation.
Root partition can not be located on xfs file system
therefore we check if root filesystem is larger
than 16TB and set it size into 16TB if it is larger.
It is necessary to note that to format 16TB
volume on ext4 it is needed about 1G memory.
"""
if vol["size"] > 16777216 and vol["mount"] == "/":
return 16777216
return vol["size"]
def erase_lvm_metadata(self):
self.pre("for v in $(vgs | awk '{print $1}'); do "
"vgreduce -f --removemissing $v; vgremove -f $v; done")
self.pre("for p in $(pvs | grep '\/dev' | awk '{print $1}'); do "
"pvremove -ff -y $p ; done")
def erase_raid_metadata(self):
for disk in self.iterdisks():
self.pre("mdadm --zero-superblock --force {0}*"
"".format(self._disk_dev(disk)))
def clean(self, disk):
self.pre("hdparm -z {0}".format(self._disk_dev(disk)))
self.pre("test -e {0} && dd if=/dev/zero "
"of={0} bs=1M count=10".format(
self._disk_dev(disk)))
self.pre("sleep 10")
self.pre("hdparm -z {0}".format(self._disk_dev(disk)))
def gpt(self, disk):
self.pre("parted -s {0} mklabel gpt".format(self._disk_dev(disk)))
def bootable(self, disk):
"""Create and mark Bios Boot partition
Grub will embed its code later, useable for legacy boot.
May be way smaller, but be aware that the parted may
shrink 1M partition to zero at some disks and versions.
"""
self.pre("parted -a optimal -s {0} "
"unit {3} mkpart primary {1} {2}".format(
self._disk_dev(disk),
self.psize(disk["id"]),
self.psize(disk["id"], 24 * self.factor),
self.unit))
self.pre("parted -s {0} set {1} bios_grub on".format(
self._disk_dev(disk),
self.pcount(disk["id"], 1)))
"""Create partition for the EFI boot, minimum
size is 100M, recommended is 200M, with fat32 and
future mountpoint in the /boot/efi. There is also
'/usr/sbin/parted -s /dev/sda set 2 boot on'
which is strictly needed for EFI boot."""
self.pre("parted -a optimal -s {0} "
"unit {3} mkpart primary fat32 {1} {2}".format(
self._disk_dev(disk),
self.psize(disk["id"]),
self.psize(disk["id"], 200 * self.factor),
self.unit))
self.pre("parted -s {0} set {1} boot on".format(
self._disk_dev(disk),
self.pcount(disk["id"], 1)))
def boot(self):
self.plains(volume_filter=lambda x: x["mount"] == "/boot")
self.raids(volume_filter=lambda x: x["mount"] == "/boot")
def notboot(self):
self.plains(volume_filter=lambda x: x["mount"] != "/boot")
self.raids(volume_filter=lambda x: x["mount"] != "/boot")
def plains(self, volume_filter=None):
if not volume_filter:
volume_filter = default_volume_filter
ceph_osds = self.num_ceph_osds()
journals_left = ceph_osds
ceph_journals = self.num_ceph_journals()
for disk in self.iterdisks():
for part in filter(lambda p: p["type"] == "partition" and
volume_filter(p), disk["volumes"]):
if part["size"] <= 0:
continue
if part.get('name') == 'cephjournal':
# We need to allocate a journal partition for each ceph OSD
# Determine the number of journal partitions we need on
# each device
ratio = math.ceil(float(ceph_osds) / ceph_journals)
# No more than 10GB will be allocated to a single
# journal partition
size = part["size"] / ratio
if size > 10240:
size = 10240
# This will attempt to evenly spread partitions across
# multiple devices e.g. 5 osds with 2 journal devices will
# create 3 partitions on the first device and 2 on the
# second
if ratio < journals_left:
end = ratio
else:
end = journals_left
for i in range(0, end):
journals_left -= 1
pcount = self.pcount(disk["id"], 1)
self.pre("parted -a optimal -s /dev/{0} "
"unit {4} mkpart {1} {2} {3}".format(
disk["id"],
self._parttype(pcount),
self.psize(disk["id"]),
self.psize(disk["id"],
size * self.factor),
self.unit))
self.post("chroot /mnt/sysimage sgdisk "
"--typecode={0}:{1} /dev/{2}".format(
pcount, part["partition_guid"],
disk["id"]))
continue
pcount = self.pcount(disk["id"], 1)
self.pre("parted -a optimal -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk),
self._parttype(pcount),
self.psize(disk["id"]),
self.psize(disk["id"],
part["size"] * self.factor),
self.unit))
size = self._getsize(part)
tabmount = part["mount"] if part["mount"] != "swap" else "none"
tabfstype = self._gettabfstype(part)
tabfsoptions = self._gettabfsoptions(part)
if part.get("partition_guid"):
self.post("chroot /mnt/sysimage sgdisk "
"--typecode={0}:{1} {2}".format(
pcount, part["partition_guid"],
self._disk_dev(disk)))
if size > 0 and size <= 16777216 and part["mount"] != "none" \
and tabfstype != "xfs":
self.kick("partition {0} "
"--onpart={2}"
"{3}{4}".format(part["mount"], size,
self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount))
else:
if part["mount"] != "swap" and tabfstype != "none":
disk_label = self._getlabel(part.get('disk_label'))
self.post("mkfs.{0} {1} {2}"
"{3}{4} {5}".format(
tabfstype,
tabfsoptions,
self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount, disk_label))
if part["mount"] != "none":
self.post("mkdir -p /mnt/sysimage{0}".format(
part["mount"]))
if tabfstype != "none":
self.post("echo 'UUID=$(blkid -s UUID -o value "
"{0}{1}{2}) "
"{3} {4} defaults 0 0'"
" >> /mnt/sysimage/etc/fstab".format(
self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount, tabmount, tabfstype))
def raids(self, volume_filter=None):
if not volume_filter:
volume_filter = default_volume_filter
raids = {}
raid_info = {}
phys = {}
for disk in self.iterdisks():
for raid in filter(lambda p: p["type"] == "raid" and
volume_filter(p), disk["volumes"]):
if raid["size"] <= 0:
continue
raid_info[raid["mount"]] = raid
pcount = self.pcount(disk["id"], 1)
if not phys.get(raid["mount"]):
phys[raid["mount"]] = []
phys[raid["mount"]].append(
"{0}{1}{2}".format(
self._disk_dev(disk),
self._pseparator(disk["id"]), pcount))
rname = "raid.{0:03d}".format(self.rcount(1))
begin_size = self.psize(disk["id"])
end_size = self.psize(disk["id"], raid["size"] * self.factor)
self.pre("parted -a optimal -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk), self._parttype(pcount),
begin_size, end_size, self.unit))
self.kick("partition {0} "
"--onpart={2}{3}{4}"
"".format(rname, raid["size"], self._disk_dev(disk),
self._pseparator(disk["id"]), pcount))
if not raids.get(raid["mount"]):
raids[raid["mount"]] = []
raids[raid["mount"]].append(rname)
for (num, (mount, rnames)) in enumerate(raids.iteritems()):
raid = raid_info[mount]
fstype = self._gettabfstype(raid)
fsoptions = self._gettabfsoptions(raid)
label = raid.get('disk_label')
# Anaconda won't label a RAID array. It also can't create
# a single-drive RAID1 array, but mdadm can.
if label or len(rnames) == 1:
if len(rnames) == 1:
phys[mount].append('missing')
self.post("mdadm --create /dev/md{0} --run --level=1 "
"--raid-devices={1} {2}".format(
self.raid_count, len(phys[mount]),
' '.join(phys[mount])))
self.post("mkfs.{0} {1} {2} /dev/md{3}".format(
fstype, fsoptions,
self._getlabel(label), self.raid_count))
self.post("mdadm --detail --scan | grep '\/dev\/md{0}'"
">> /mnt/sysimage/etc/mdadm.conf".format(
self.raid_count))
self.post("mkdir -p /mnt/sysimage{0}".format(mount))
self.post("echo \\\"UUID=\$(blkid -s UUID -o value "
"/dev/md{0}) "
"{1} {2} defaults 0 0\\\""
" >> /mnt/sysimage/etc/fstab".format(
self.raid_count, mount, fstype))
else:
self.kick("raid {0} --device md{1} --fstype {3} "
"--level=RAID1 {2}".format(
mount, self.raid_count,
" ".join(rnames), fstype))
self.raid_count += 1
def pvs(self):
pvs = {}
for disk in self.iterdisks():
for pv in [p for p in disk["volumes"] if p["type"] == "pv"]:
if pv["size"] <= 0:
continue
pcount = self.pcount(disk["id"], 1)
pvname = "pv.{0:03d}".format(self.pvcount(1))
begin_size = self.psize(disk["id"])
end_size = self.psize(disk["id"], pv["size"] * self.factor)
self.pre("parted -a optimal -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk), self._parttype(pcount),
begin_size, end_size, self.unit))
self.kick("partition {0} "
"--onpart={2}{3}{4}"
"".format(pvname, pv["size"], self._disk_dev(disk),
self._pseparator(disk["id"]), pcount))
if not pvs.get(pv["vg"]):
pvs[pv["vg"]] = []
pvs[pv["vg"]].append(pvname)
for vg, pvnames in pvs.iteritems():
self.kick("volgroup {0} {1}".format(vg, " ".join(pvnames)))
def lvs(self):
for vg in [g for g in self.data if g["type"] == "vg"]:
for lv in vg["volumes"]:
if lv["size"] <= 0:
continue
fstype = self._getfstype(lv)
size = self._getsize(lv)
tabmount = lv["mount"] if lv["mount"] != "swap" else "none"
tabfstype = self._gettabfstype(lv)
tabfsoptions = self._gettabfsoptions(lv)
if size > 0 and size <= 16777216 and tabfstype != "xfs":
self.kick("logvol {0} --vgname={1} --size={2} "
"--name={3} {4}".format(
lv["mount"], vg["id"], size,
lv["name"], fstype))
else:
self.post("lvcreate --size {0} --name {1} {2}".format(
size, lv["name"], vg["id"]))
if lv["mount"] != "swap" and tabfstype != "none":
self.post("mkfs.{0} {1} /dev/mapper/{2}-{3}".format(
tabfstype, tabfsoptions, vg["id"], lv["name"]))
self.post("mkdir -p /mnt/sysimage{0}"
"".format(lv["mount"]))
if tabfstype != "none":
"""
The name of the device. An LVM device is
expressed as the volume group name and the logical
volume name separated by a hyphen. A hyphen in
the original name is translated to two hyphens.
"""
self.post("echo '/dev/mapper/{0}-{1} {2} {3} "
"defaults 0 0'"
" >> /mnt/sysimage/etc/fstab".format(
vg["id"].replace("-", "--"),
lv["name"].replace("-", "--"),
tabmount, tabfstype))
def bootloader(self):
devs = []
for disk in self.iterdisks():
devs.append("$(basename {0})"
"".format(self._disk_dev(disk)))
if devs:
self.kick("bootloader --location=mbr --driveorder={0} "
"--append=' {1} '".format(
",".join(devs),
self.kernel_params))
for dev in devs:
self.post("echo -n > /tmp/grub.script")
self.post("echo \\\"device (hd0) /dev/{0}\\\" >> "
"/tmp/grub.script".format(dev))
"""
This means that we set drive geometry manually into to
avoid grub register overlapping. We set it so that grub
thinks disk size is equal to 1G.
130 cylinders * (16065 * 512 = 8225280 bytes) = 1G
"""
self.post("echo \\\"geometry (hd0) 130 255 63\\\" >> "
"/tmp/grub.script")
self.post("echo \\\"root (hd0,2)\\\" >> /tmp/grub.script")
self.post("echo \\\"install /grub/stage1 (hd0) /grub/stage2 p "
"/grub/grub.conf\\\" >> /tmp/grub.script")
self.post("echo quit >> /tmp/grub.script")
self.post("cat /tmp/grub.script | chroot /mnt/sysimage "
"/sbin/grub --no-floppy --batch")
def expose(self,
kickfile="/tmp/partition.ks",
postfile="/tmp/post_partition.ks"):
result = ""
for pre in self.pre():
result += "{0}\n".format(pre)
result += "echo > {0}\n".format(kickfile)
for kick in self.kick():
result += "echo \"{0}\" >> {1}\n".format(kick, kickfile)
result += "echo \"%post --nochroot\" > {0}\n".format(postfile)
result += "echo \"set -x -v\" >> {0}\n".format(postfile)
result += ("echo \"exec 1>/mnt/sysimage/root/post-partition.log "
"2>&1\" >> {0}\n".format(postfile))
for post in self.post():
result += "echo \"{0}\" >> {1}\n".format(post, postfile)
result += "echo \"%end\" >> {0}\n".format(postfile)
return result
def eval(self):
for disk in self.iterdisks():
self.clean(disk)
self.gpt(disk)
self.bootable(disk)
self.boot()
self.notboot()
self.pvs()
self.lvs()
self.bootloader()
self.pre("sleep 10")
for disk in self.iterdisks():
self.pre("hdparm -z {0}".format(self._disk_dev(disk)))
self.erase_lvm_metadata()
self.erase_raid_metadata()
class PreseedPManager(object):
def __init__(self, pm_data):
if isinstance(pm_data, (str, unicode)):
self.pm_data = json.loads(pm_data)
else:
self.pm_data = pm_data
self.data = self.pm_data['ks_spaces']
self.kernel_params = self.pm_data['kernel_params']
# enumerating disks
for num, item in enumerate(
[d for d in self.data if d["type"] == "disk"]):
item["enum"] = num
self.validate()
self.factor = 1
self.unit = "MiB"
self.disks = sorted([self._disk_dev(d) for d in self.iterdisks()])
self.os_disk = self.os_disks()[0]
self._pcount = {}
self._pend = {}
self._recipe = []
self._late = []
self._early = []
def os_disks(self):
return [self._disk_dev(d) for d in self.iterdisks() if
filter(lambda x: x.get("vg") == "os" and
x.get("size") > 0, d["volumes"])]
def validate(self):
# os volume group can not be distributed over more than one disk.
# it is because we use plain partition for / and swap on ubuntu.
if len(self.os_disks()) > 1:
raise Exception("OS volume group must be located on one disk")
def _disk_by_links(self, disk):
command = "$(readlink -f $( ("
command += " || ".join(["ls /dev/{0}".format(d)
for d in disk.get("extra", [])])
if disk["extra"]:
command += " || "
command += "ls /dev/{0}".format(disk["id"])
command += ") 2>/dev/null) )"
return command
def _disk_dev(self, disk):
return "${{DISK_{0}}}".format(disk["enum"])
def disks_map(self):
command = ""
for disk in self.iterdisks():
command += "export DISK_{0}={1}; ".format(
disk["enum"], self._disk_by_links(disk))
return command
def iterdisks(self):
for item in self.data:
if item["type"] == "disk" and item["size"] > 0:
yield item
def recipe(self, command=None):
if command:
return self._recipe.append(command)
return self._recipe
def late(self, command=None, in_target=False, udev_settle=False):
if command:
if udev_settle:
# this gonna wait until udev event queue is handled
# and avoid appearing udev race condition.
# for example http://permalink.gmane.org/gmane.linux.raid/34027
self._late.append(("/sbin/udevadm settle", False))
return self._late.append((command, in_target))
return self._late
def early(self, command=None):
if command:
return self._early.append(command)
return self._early
def _pseparator(self, devname):
pseparator = ''
if devname.find('cciss') >= 0:
pseparator = 'p'
return pseparator
def _getlabel(self, label):
if not label:
return ""
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return " -L {0} ".format(label[:12])
def _parttype(self, n):
return "primary"
def _fsoptions(self, fstype):
if fstype == "xfs":
return "-f -s size=512"
return ""
def _umount_target(self):
self.late("umount /target/dev")
self.late("umount /target/sys")
self.late("umount /target/proc")
self.late("umount /target/boot")
self.late("umount /target")
self.late("umount {0}{1}3".format(
self.os_disk, self._pseparator(self.os_disk)))
self.late("swapoff {0}{1}4".format(
self.os_disk, self._pseparator(self.os_disk)))
def _mount_target(self):
self.late("mount {0}{1}3 /target".format(
self.os_disk, self._pseparator(self.os_disk)))
self.late("mount {0}{1}2 /target/boot".format(
self.os_disk, self._pseparator(self.os_disk)))
self.late("mount -t proc none /target/proc")
self.late("mount -o bind /dev /target/dev")
self.late("mount -o bind /sys /target/sys")
self.late("swapon {0}{1}4".format(
self.os_disk, self._pseparator(self.os_disk)))
def _long_logger(self):
"""Create script for logging long lines.
This method puts script which splits
long line and sends them to logger
#!/bin/sh
chunk=80
while read string; do
iterations=`expr ${#string} / $chunk + 1`; i=0;
while [ $i -le $(( iterations - 1)) ]; do
start_sym=$(( $i * ${chunk} + 1 ))
end_sym=$(( $(( $i + 1 )) * ${chunk}))
echo $string | cut -c ${start_sym}-${end_sym} | logger
i=$(( i + 1 )); done; done;
"""
return (
"echo '#!/bin/sh' > /tmp/long_logger.sh;",
"echo 'chunk=80;' >> /tmp/long_logger.sh;",
"echo 'while read string; do' >> /tmp/long_logger.sh;",
("echo 'iterations=`expr ${#string} / $chunk + 1`; i=0;' "
">> /tmp/long_logger.sh;"),
("echo 'while [ $i -le $(( iterations - 1)) ]; do' "
">> /tmp/long_logger.sh;"),
("echo 'start_sym=$(( $i * ${chunk} + 1 ))' "
">> /tmp/long_logger.sh;"),
("echo 'end_sym=$(( $(( $i + 1 )) * ${chunk}))' "
">> /tmp/long_logger.sh;"),
("echo 'echo $string | cut -c ${start_sym}-${end_sym} | logger' "
">> /tmp/long_logger.sh;"),
"echo 'i=$(( i + 1 )); done; done;' >> /tmp/long_logger.sh;",
"chmod +x /tmp/long_logger.sh;"
)
def non_boot_partitions(self, volumes):
for part in filter(lambda p: p["type"] == "partition" and
p["mount"] != "/boot", volumes):
if part["size"] > 0:
yield part
def pcount(self, disk_id, increment=0):
self._pcount[disk_id] = self._pcount.get(disk_id, 0) + increment
return self._pcount.get(disk_id, 0)
def psize(self, disk_id, increment=0):
self._pend[disk_id] = self._pend.get(disk_id, 0) + increment
return self._pend.get(disk_id, 0)
def get_partition_count(self, name):
count = 0
for disk in self.iterdisks():
count += len([v for v in disk["volumes"]
if v.get('name') == name and v['size'] > 0])
return count
def num_ceph_journals(self):
return self.get_partition_count('cephjournal')
def num_ceph_osds(self):
return self.get_partition_count('ceph')
def erase_partition_table(self):
for disk in self.iterdisks():
self.early("test -e {0} && "
"dd if=/dev/zero of={0} "
"bs=1M count=10".format(self._disk_dev(disk)))
self.early("sleep 10")
self.early("hdparm -z {0}".format(self._disk_dev(disk)))
self.early("parted -s {0} print free".format(self._disk_dev(disk)))
def log_lvm(self, line, early=True):
func = self.early
if not early:
func = self.late
func("echo \"=== {0} ===\"".format(line))
func("vgs -a --noheadings")
func("pvs --noheadings")
def erase_lvm_metadata(self, early=True):
func = self.early
if not early:
func = self.late
func("for v in $(vgs -a --noheadings 2>/dev/null | "
"sed 's/^\([ ]*\)\([^ ]\+\)\(.*\)/\\2/g'); do "
"vgreduce --force --removemissing $v; "
"vgremove --force $v; done")
func("for p in $(pvs --noheadings 2>/dev/null | "
"sed 's/^\([ ]*\)\([^ ]\+\)\(.*\)/\\2/g'); do "
"pvremove -ff -y $p; done")
def _blacklist_udev_rules(self):
self.late(
"for rules in $(ls -1 /lib/udev/rules.d/*.rules); do "
"test -e /etc/udev/rules.d/$(basename $rules) && "
"mv /etc/udev/rules.d/$(basename $rules) "
"/etc/udev/rules.d/$(basename $rules).bak; "
"echo '#' > /etc/udev/rules.d/$(basename $rules); "
"done")
self.late("udevadm control --reload")
def _unblacklist_udev_rules(self):
self.late(
"for rules in $(ls -1 /lib/udev/rules.d/*.rules); do "
"if test -e /etc/udev/rules.d/$(basename $rules).bak; "
"then mv /etc/udev/rules.d/$(basename $rules).bak "
"/etc/udev/rules.d/$(basename $rules); "
"else rm -f /etc/udev/rules.d/$(basename $rules); "
"fi; done")
self.late("udevadm control --reload")
self.late("udevadm trigger")
self.late("udevadm settle --quiet")
def boot(self):
self.recipe("24 24 24 ext3 "
"$gptonly{ } "
"$bios_boot{ } "
"method{ biosgrub } .")
self.psize(self.os_disk, 24 * self.factor)
self.pcount(self.os_disk, 1)
self.late("parted -s $(readlink -f {0}) set {1} bios_grub on".format(
self.os_disk,
self.pcount(self.os_disk)
)
)
self.recipe(
"200 200 200 ext3 $primary{ } "
"$gptonly{ } "
"$bootable{ } method{ format } format{ } use_filesystem{ } "
"filesystem{ ext3 } mountpoint{ /boot } .")
self.pcount(self.os_disk, 1)
self.psize(self.os_disk, 200 * self.factor)
def os(self):
for vg in [v for v in self.data
if v["type"] == "vg" and v["id"] == "os"]:
for vol in vg["volumes"]:
if vol["mount"] == "swap":
swap_size = vol["size"]
elif vol["mount"] == "/":
root_size = vol["size"]
self.recipe("{0} {0} {0} ext4 "
"$gptonly{{ }} "
"method{{ format }} format{{ }} use_filesystem{{ }} "
"filesystem{{ ext4 }} mountpoint{{ / }} ."
"".format(root_size))
self.pcount(self.os_disk, 1)
self.psize(self.os_disk, root_size * self.factor)
self.recipe("{0} {0} {0} linux-swap "
"$gptonly{{ }} "
"method{{ swap }} format{{ }} .".format(swap_size))
self.pcount(self.os_disk, 1)
self.psize(self.os_disk, swap_size * self.factor)
"""
We need this line because debian-installer takes total disk space
for the last partition. So to be able to allocate custom partitions
during the late stage we need to create fake swap partition that
we then destroy.
"""
self.recipe("1 1 -1 ext3 $gptonly{ } method{ keep } .")
self.late("parted -s {0} print free".format(self.os_disk))
self._umount_target()
self.late("parted {0} rm 5".format(self.os_disk))
self.late("sleep 10")
self.late("hdparm -z {0}".format(self.os_disk))
self.late("parted -s {0} print free".format(self.os_disk))
self.late("find /dev \( -type l -o -type b \) -exec ls -l {} \;")
self._mount_target()
def partitions(self):
ceph_osds = self.num_ceph_osds()
journals_left = ceph_osds
ceph_journals = self.num_ceph_journals()
self._umount_target()
self._blacklist_udev_rules()
cephjournal_guid_commands = []
for disk in self.iterdisks():
for part in self.non_boot_partitions(disk["volumes"]):
if self.pcount(self._disk_dev(disk)) == 0:
self.late("parted -s {0} mklabel gpt".format(
self._disk_dev(disk)), udev_settle=True)
self.late("parted -a optimal -s {0} "
"unit {3} mkpart primary {1} {2}".format(
self._disk_dev(disk),
self.psize(self._disk_dev(disk)),
self.psize(self._disk_dev(disk),
24 * self.factor),
self.unit),
udev_settle=True)
self.late("parted -s {0} set {1} "
"bios_grub on".format(
self._disk_dev(disk),
self.pcount(self._disk_dev(disk), 1)),
udev_settle=True)
self.late("parted -s {0} print free".format(
self._disk_dev(disk)))
if part.get('name') == 'cephjournal':
# We need to allocate a journal partition for each ceph OSD
# Determine the number of journal partitions we need on
# each device
ratio = math.ceil(float(ceph_osds) / ceph_journals)
# No more than 10GB will be allocated to a single
# journal partition
size = part["size"] / ratio
if size > 10240:
size = 10240
# This will attempt to evenly spread partitions across
# multiple devices e.g. 5 osds with 2 journal devices will
# create 3 partitions on the first device and 2 on the
# second
if ratio < journals_left:
end = ratio
else:
end = journals_left
for i in range(0, end):
journals_left -= 1
pcount = self.pcount(self._disk_dev(disk), 1)
part["pcount"] = pcount
self.late(
"parted -a optimal -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk),
self._parttype(pcount),
self.psize(self._disk_dev(disk)),
self.psize(self._disk_dev(disk),
size * self.factor),
self.unit),
udev_settle=True
)
# We don't want to append late command right here
# because we need sgdisk to be run in-target so
# the target must be mounted. Instead of additional
# mounting and unmounting we just collect all those
# commands and them run them all at once.
cephjournal_guid_commands.append(
"sgdisk --typecode={0}:{1} {2}".format(
pcount,
part["partition_guid"],
self._disk_dev(disk)
)
)
self.late("parted -s {0} print free".format(
self._disk_dev(disk)))
continue
pcount = self.pcount(self._disk_dev(disk), 1)
part["pcount"] = pcount
tabmount = part["mount"] if part["mount"] != "swap" else "none"
self.late("parted -a optimal -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk),
self._parttype(pcount),
self.psize(self._disk_dev(disk)),
self.psize(self._disk_dev(disk),
part["size"] * self.factor),
self.unit),
udev_settle=True)
self.late("sleep 10")
self.late("hdparm -z {0}"
"".format(self._disk_dev(disk)))
self.late("parted -s {0} print free".format(
self._disk_dev(disk)))
self.late(
"find /dev \( -type l -o -type b \) -exec ls -l {} \;")
self.late("mount")
self.late("cat /proc/swaps")
self.late("cat /proc/mdstat")
self.late("cat /proc/partitions")
# clear any fs info that may remain on newly created partition
self.late("dd if=/dev/zero of={0}{1}{2} bs=1M count=10"
"".format(self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount))
fs = part.get("file_system", "xfs")
if fs not in ("swap", None, "none"):
disk_label = self._getlabel(part.get("disk_label"))
self.late("mkfs.{0} {1} {2}{3}{4} {5}".format(
part.get("file_system", "xfs"),
self._fsoptions(part.get("file_system", "xfs")),
self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount, disk_label))
self._unblacklist_udev_rules()
self._mount_target()
# Partition guids must be set in-target, which requires target to be
# mounted. But for cephjournal we have a separate collection of
# late commands.
for disk in self.iterdisks():
for part in self.non_boot_partitions(disk["volumes"]):
if (part.get("partition_guid") and
part.get("name") != "cephjournal"):
self.late("sgdisk --typecode={0}:{1} {2}"
"".format(part["pcount"], part["partition_guid"],
self._disk_dev(disk)), True)
# This loop appends commands which set cephjournal guids.
for command in cephjournal_guid_commands:
self.late(command, True)
for disk in self.iterdisks():
for part in filter(lambda p: p["type"] == "partition" and
p["mount"] != "/boot" and p["size"] > 0 and
p.get('name') != 'cephjournal',
disk["volumes"]):
if not part["mount"] in (None, "none", "swap"):
self.late("mkdir -p /target{0}".format(part["mount"]))
if not part["mount"] in (None, "none"):
self.late("echo 'UUID=$(blkid -s UUID -o value "
"{0}{1}{2}) "
"{3} {4} {5} 0 0'"
" >> /target/etc/fstab"
"".format(
self._disk_dev(disk),
self._pseparator(disk["id"]),
part["pcount"], tabmount,
part.get("file_system", "xfs"),
("defaults" if part["mount"] != "swap"
else "sw")))
def lv(self):
self.log_lvm("before creating lvm", False)
devices_dict = {}
pvlist = []
self._umount_target()
self._blacklist_udev_rules()
for disk in self.iterdisks():
self.late("parted -s {0} print free".format(self._disk_dev(disk)))
for pv in [p for p in disk["volumes"]
if p["type"] == "pv" and p["vg"] != "os"]:
if pv["size"] <= 0:
continue
if self.pcount(self._disk_dev(disk)) == 0:
# this gonna wait until udev event queue is handled
self.late("parted -s {0} mklabel gpt".format(
self._disk_dev(disk)), udev_settle=True)
self.late("parted -a optimal -s {0} "
"unit {3} mkpart primary {1} {2}".format(
self._disk_dev(disk),
self.psize(self._disk_dev(disk)),
self.psize(self._disk_dev(disk),
24 * self.factor),
self.unit),
udev_settle=True)
self.late("parted -s {0} set {1} "
"bios_grub on".format(
self._disk_dev(disk),
self.pcount(self._disk_dev(disk), 1)),
udev_settle=True)
self.late("parted -s {0} print free".format(
self._disk_dev(disk)))
pcount = self.pcount(self._disk_dev(disk), 1)
begin_size = self.psize(self._disk_dev(disk))
end_size = self.psize(self._disk_dev(disk),
pv["size"] * self.factor)
self.late("parted -a optimal -s {0} "
"unit {4} mkpart {1} {2} {3}".format(
self._disk_dev(disk),
self._parttype(pcount),
begin_size,
end_size,
self.unit),
udev_settle=True)
self.late("sleep 10")
self.log_lvm("after creating partition", False)
self.erase_lvm_metadata(False)
self.late("hdparm -z {0}"
"".format(self._disk_dev(disk)))
self.late("parted -s {0} print free".format(
self._disk_dev(disk)))
self.late(
"find /dev \( -type l -o -type b \) -exec ls -l {} \;")
self.late("mount")
self.late("cat /proc/swaps")
self.late("cat /proc/mdstat")
self.late("cat /proc/partitions")
pvlist.append("pvcreate -ff {0}{1}{2}"
"".format(self._disk_dev(disk),
self._pseparator(disk["id"]),
pcount))
if not devices_dict.get(pv["vg"]):
devices_dict[pv["vg"]] = []
devices_dict[pv["vg"]].append(
"{0}{1}{2}"
"".format(self._disk_dev(disk),
self._pseparator(disk["id"]), pcount)
)
self._unblacklist_udev_rules()
self.log_lvm("before additional cleaning", False)
self.erase_lvm_metadata(False)
self.log_lvm("before pvcreate", False)
for pvcommand in pvlist:
self.late(pvcommand)
self.log_lvm("before vgcreate", False)
for vg, devs in devices_dict.iteritems():
self.late("vgremove -f {0}".format(vg), udev_settle=True)
self.late("vgcreate -s 32m {0} {1}".format(vg, " ".join(devs)),
udev_settle=True)
self.log_lvm("after vgcreate", False)
self._mount_target()
for vg in [v for v in self.data
if v["type"] == "vg" and v["id"] != "os"]:
for lv in vg["volumes"]:
if lv["size"] <= 0:
continue
self.late("lvcreate -L {0}m -Z n -n {1} {2}".format(
lv["size"], lv["name"], vg["id"]), udev_settle=True)
self.late("sleep 10")
self.late("lvscan")
tabmount = lv["mount"] if lv["mount"] != "swap" else "none"
if (
(lv.get("file_system", "xfs") not in
("swap", None, "none"))
and
(lv["mount"] not in ("swap", "/"))
):
self.late("mkfs.{0} {1} /dev/mapper/{2}-{3}".format(
lv.get("file_system", "xfs"),
self._fsoptions(lv.get("file_system", "xfs")),
vg["id"].replace("-", "--"),
lv["name"].replace("-", "--")))
if not lv["mount"] in (None, "none", "swap", "/"):
self.late("mkdir -p /target{0}".format(lv["mount"]))
if not lv["mount"] in (None, "none", "swap", "/"):
self.late("echo '/dev/mapper/{0}-{1} "
"{2} {3} {4} 0 0' >> /target/etc/fstab"
"".format(
vg["id"].replace("-", "--"),
lv["name"].replace("-", "--"),
tabmount,
lv.get("file_system", "xfs"),
("defaults" if lv["mount"] != "swap"
else "sw")))
def eval(self):
self.early(self.disks_map())
self.late(self.disks_map())
self.log_lvm("before early lvm cleaning")
self.erase_lvm_metadata()
self.log_lvm("after early lvm cleaning")
self.erase_partition_table()
self.boot()
self.os()
self.partitions()
self.erase_lvm_metadata()
self.lv()
self.late("apt-get install -y grub-pc", True)
self.late(
"sed -i "
"-e '$a\ ' "
"-e '$a\#Disable the waiting "
"for user input if the last boot failed' "
"-e '$a\GRUB_RECORDFAIL_TIMEOUT=$GRUB_TIMEOUT' /etc/default/grub",
True)
self.late("sed -i "
"-e 's/.*GRUB_TERMINAL.*/GRUB_TERMINAL=console/g' "
"-e 's/.*GRUB_GFXMODE.*/#GRUB_GFXMODE=640x480/g' "
"-e 's/.*GRUB_CMDLINE_LINUX.*/"
"GRUB_CMDLINE_LINUX=\" {0} \"/g' /etc/default/grub".format(
self.kernel_params),
True)
self._umount_target()
self._mount_target()
self.late("grub-mkconfig", True)
self.late("grub-mkdevicemap", True)
for disk in self.iterdisks():
self.late("grub-install {0}"
"".format(self._disk_dev(disk)), True)
self.late("update-grub", True)
self.late("find /dev \( -type l -o -type b \) -exec ls -l {} \;")
def expose_recipe(self):
return " \\\n".join(self.recipe())
def expose_late(self, gzip=False):
result = ""
for line, in_target in self.late():
line_to_append = "{0}{1}".format(
("in-target " if in_target else ""), line)
result += ("echo '{0}' | /tmp/long_logger.sh;\\\n"
"".format(re.sub("'", "'\"'\"'", line_to_append)))
result += line_to_append + " 2>&1 | /tmp/long_logger.sh;\\\n"
return result.rstrip()
def expose_early(self):
result = ""
for line in self._long_logger():
result += "{0}\\\n".format(line)
for line in self.early():
line_to_append = "{0}".format(line)
result += ("echo '{0}' | /tmp/long_logger.sh;\\\n"
"".format(re.sub("'", "'\"'\"'", line_to_append)))
result += line_to_append + " 2>&1 | /tmp/long_logger.sh;\\\n"
return result.rstrip()
def expose_disks(self):
return self.os_disk
def pm(data):
pmanager = PManager(data)
pmanager.eval()
return pmanager.expose()
example = """
[
{
"name": "sda",
"free_space": 101772,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"size": 12352,
"type": "pv",
"lvm_meta_size": 64,
"vg": "os"
},
{
"size": 89548,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:06.0-scsi-0:0:0:0",
"size": 102400
},
{
"name": "sdb",
"free_space": 101772,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"type": "raid",
"size": 200
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 101836,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:06.0-scsi-0:0:1:0",
"size": 102400
},
{
"min_size": 12288,
"type": "vg",
"id": "os",
"volumes": [
{
"mount": "/",
"type": "lv",
"name": "root",
"size": 10240
},
{
"mount": "swap",
"type": "lv",
"name": "swap",
"size": 2048
}
],
"label": "Base System"
},
{
"min_size": 5120,
"type": "vg",
"id": "image",
"volumes": [
{
"mount": "/var/lib/glance",
"type": "lv",
"name": "glance",
"size": 191256
}
],
"label": "Image Storage"
}
]
"""
# pmanager = PreseedPManager(example)
# pmanager.eval()
# print pmanager.expose_late()
def default_volume_filter(volume):
"""Default volume filter. Always return True."""
return True
|
|
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import webob.exc
from glance.api import policy
from glance.common import wsgi
import glance.context
from glance.i18n import _, _LW
context_opts = [
cfg.BoolOpt('owner_is_tenant', default=True,
help=_("""
Set the image owner to tenant or the authenticated user.
Assign a boolean value to determine the owner of an image. When set to
True, the owner of the image is the tenant. When set to False, the
owner of the image will be the authenticated user issuing the request.
Setting it to False makes the image private to the associated user and
sharing with other users within the same tenant (or "project")
requires explicit image sharing via image membership.
Possible values:
* True
* False
Related options:
* None
""")),
cfg.StrOpt('admin_role', default='admin',
help=_("""
Role used to identify an authenticated user as administrator.
Provide a string value representing a Keystone role to identify an
administrative user. Users with this role will be granted
administrative privileges. The default value for this option is
'admin'.
Possible values:
* A string value which is a valid Keystone role
Related options:
* None
""")),
cfg.BoolOpt('allow_anonymous_access', default=False,
help=_("""
Allow limited access to unauthenticated users.
Assign a boolean to determine API access for unathenticated
users. When set to False, the API cannot be accessed by
unauthenticated users. When set to True, unauthenticated users can
access the API with read-only privileges. This however only applies
when using ContextMiddleware.
Possible values:
* True
* False
Related options:
* None
""")),
cfg.IntOpt('max_request_id_length', default=64, min=0,
help=_("""
Limit the request ID length.
Provide an integer value to limit the length of the request ID to
the specified length. The default value is 64. Users can change this
to any ineteger value between 0 and 16384 however keeping in mind that
a larger value may flood the logs.
Possible values:
* Integer value between 0 and 16384
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(context_opts)
LOG = logging.getLogger(__name__)
class BaseContextMiddleware(wsgi.Middleware):
def process_response(self, resp):
try:
request_id = resp.request.context.request_id
except AttributeError:
LOG.warn(_LW('Unable to retrieve request id from context'))
else:
# For python 3 compatibility need to use bytes type
prefix = b'req-' if isinstance(request_id, bytes) else 'req-'
if not request_id.startswith(prefix):
request_id = prefix + request_id
resp.headers['x-openstack-request-id'] = request_id
return resp
class ContextMiddleware(BaseContextMiddleware):
def __init__(self, app):
self.policy_enforcer = policy.Enforcer()
super(ContextMiddleware, self).__init__(app)
def process_request(self, req):
"""Convert authentication information into a request context
Generate a glance.context.RequestContext object from the available
authentication headers and store on the 'context' attribute
of the req object.
:param req: wsgi request object that will be given the context object
:raises: webob.exc.HTTPUnauthorized: when value of the
X-Identity-Status header is not
'Confirmed' and anonymous access
is disallowed
"""
if req.headers.get('X-Identity-Status') == 'Confirmed':
req.context = self._get_authenticated_context(req)
elif CONF.allow_anonymous_access:
req.context = self._get_anonymous_context()
else:
raise webob.exc.HTTPUnauthorized()
def _get_anonymous_context(self):
kwargs = {
'user': None,
'tenant': None,
'roles': [],
'is_admin': False,
'read_only': True,
'policy_enforcer': self.policy_enforcer,
}
return glance.context.RequestContext(**kwargs)
def _get_authenticated_context(self, req):
service_catalog = None
if req.headers.get('X-Service-Catalog') is not None:
try:
catalog_header = req.headers.get('X-Service-Catalog')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
request_id = req.headers.get('X-Openstack-Request-ID')
if request_id and (0 < CONF.max_request_id_length <
len(request_id)):
msg = (_('x-openstack-request-id is too long, max size %s') %
CONF.max_request_id_length)
return webob.exc.HTTPRequestHeaderFieldsTooLarge(comment=msg)
kwargs = {
'owner_is_tenant': CONF.owner_is_tenant,
'service_catalog': service_catalog,
'policy_enforcer': self.policy_enforcer,
'request_id': request_id,
}
ctxt = glance.context.RequestContext.from_environ(req.environ,
**kwargs)
# FIXME(jamielennox): glance has traditionally lowercased its roles.
# This was related to bug #1010519 where at least the admin role was
# case insensitive. This seems to no longer be the case and should be
# fixed.
ctxt.roles = [r.lower() for r in ctxt.roles]
if CONF.admin_role.strip().lower() in ctxt.roles:
ctxt.is_admin = True
return ctxt
class UnauthenticatedContextMiddleware(BaseContextMiddleware):
def process_request(self, req):
"""Create a context without an authorized user."""
kwargs = {
'user': None,
'tenant': None,
'roles': [],
'is_admin': True,
}
req.context = glance.context.RequestContext(**kwargs)
|
|
# -*- coding: utf-8 -*-
import json
import yaml
import pytest
from unittest import TestCase
from lain_sdk.yaml.parser import (
LainConf, ProcType, Proc,
just_simple_scale,
render_resource_instance_meta, DEFAULT_SYSTEM_VOLUMES,
DOMAIN,
MIN_SETUP_TIME, MAX_SETUP_TIME, MIN_KILL_TIMEOUT, MAX_KILL_TIMEOUT
)
FIXTURES_EXTRA_DOMAINS = ['extra.domain1.com', 'extra.domain2.org']
class LainConfUtilsTests(TestCase):
def test_just_simple_scale(self):
assert just_simple_scale('cpu', Proc)
assert not just_simple_scale('cmd', Proc)
class LainConfTests(TestCase):
def test_lain_conf_without_appname(self):
meta_yaml = '''
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
with pytest.raises(Exception) as e:
hello_conf.load(meta_yaml, meta_version, None)
assert 'invalid lain conf: no appname' in str(e.value)
def test_lain_conf_smoke(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
memory: 64m
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
web.bar:
cmd: bar
port: 8080
mountpoint:
- a.com
- b.cn/xyz
https_only: false
worker.foo:
cmd: worker
memory: 128m
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == ['ENV_A=enva', 'ENV_B=envb']
assert hello_conf.procs['web'].memory == '64m'
assert hello_conf.procs['web'].user == ''
assert hello_conf.procs['web'].working_dir == ''
assert hello_conf.procs['web'].dns_search == ['hello.lain']
assert hello_conf.procs['web'].volumes == ['/data', '/var/lib/mysql']
assert hello_conf.procs['web'].port[80].port == 80
assert hello_conf.procs['web'].stateful is False
assert hello_conf.procs['foo'].memory == '128m'
assert hello_conf.procs['foo'].cmd == ['worker']
assert hello_conf.procs['foo'].type == ProcType.worker
assert hello_conf.procs['bar'].cmd == ['bar']
assert hello_conf.procs['bar'].type == ProcType.web
assert hello_conf.procs['bar'].mountpoint == ['a.com', 'b.cn/xyz']
assert hello_conf.procs['bar'].https_only is False
def test_lain_conf_notify_slack(self):
meta_yaml = '''
appname: hello
build:
base: golang
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
web:
cmd: hello
port: 80
notify:
slack: "#hello"
'''
repo_name = 'lain/hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.appname == 'hello'
assert hello_conf.notify == {'slack': '#hello'}
def test_lain_conf_notify_missing(self):
meta_yaml = '''
appname: hello
build:
base: golang
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
web:
cmd: hello
port: 80
'''
repo_name = 'lain/hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.appname == 'hello'
assert hello_conf.notify == {}
def test_lain_conf_empty_cmd(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd:
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].port[80].port == 80
assert hello_conf.procs['web'].cmd == []
def test_lain_conf_port_with_type(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80:tcp
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == ['ENV_A=enva', 'ENV_B=envb']
assert hello_conf.procs['web'].volumes == ['/data', '/var/lib/mysql']
assert hello_conf.procs['web'].logs == []
assert hello_conf.procs['web'].port[80].port == 80
def test_lain_conf_without_logs(self):
meta_yaml = '''
appname: hello
build:
base: golang
web:
volumes:
- /data
- /var/lib/mysql
'''
repo_name = 'lain/hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].volumes == ['/data', '/var/lib/mysql']
assert hello_conf.procs['web'].logs == []
def test_lain_conf_logs(self):
meta_yaml = '''
appname: hello
build:
base: golang
web:
volumes:
- /data
- /var/lib/mysql
logs:
- a.log
- b.log
- a.log
'''
repo_name = 'lain/hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].volumes == [
'/data', '/var/lib/mysql', '/lain/logs']
assert hello_conf.procs['web'].logs == ['a.log', 'b.log']
annotation = json.loads(hello_conf.procs['web'].annotation)
assert annotation['logs'] == ['a.log', 'b.log']
def test_lain_conf_port_with_type_but_toomuch(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80:tcp:foo
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
with pytest.raises(Exception) as e:
hello_conf.load(meta_yaml, meta_version, None)
assert 'not supported port desc 80:tcp:foo' in str(e.value)
def test_lain_conf_port_with_type_in_property_list(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: {80: ['type:tcp']}
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == ['ENV_A=enva', 'ENV_B=envb']
assert hello_conf.procs['web'].volumes == ['/data', '/var/lib/mysql']
assert hello_conf.procs['web'].port[80].port == 80
def test_lain_conf_port_webtype_without_port_meta(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == ['ENV_A=enva', 'ENV_B=envb']
assert hello_conf.procs['web'].volumes == ['/data', '/var/lib/mysql']
assert hello_conf.procs['web'].port[80].port == 80
def test_lain_conf_proc_name(self):
meta_yaml = '''
appname: hello
build:
base: golang
script: [go build -o hello]
release:
dest_base: ubuntu
copy:
- {dest: /usr/bin/hello, src: hello}
test:
script: [go test]
web.web1:
cmd: hello
port: 80
cpu: 1
mountpoint:
- a.foo
notify: {slack: '#hello'}
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
self.assertEquals(hello_conf.appname, 'hello')
self.assertEquals(hello_conf.procs['web1'].cpu, 1)
self.assertEquals(hello_conf.procs['web1'].port[80].port, 80)
def test_lain_conf_dup_proc_name(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
proc.web:
type: web
cmd: hello
port: 80
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
with pytest.raises(Exception) as e:
hello_conf.load(meta_yaml, meta_version, None)
assert 'duplicated proc name web' in str(e.value)
def test_lain_conf_proc_type(self):
meta_yaml = '''
appname: hello
build:
base: golang
script: [go build -o hello]
release:
dest_base: ubuntu
copy:
- {dest: /usr/bin/hello, src: hello}
test:
script: [go test]
proc.mailer: {type: worker, cmd: hello, port: 80, memory: 128m}
notify: {slack: '#hello'}
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
self.assertEquals(hello_conf.appname, 'hello')
self.assertEquals(hello_conf.procs['mailer'].type, ProcType.worker)
self.assertEquals(hello_conf.procs['mailer'].memory, '128m')
self.assertEquals(hello_conf.procs['mailer'].port[80].port, 80)
def test_lain_conf_proc_env_and_volumes_null(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
env:
volumes:
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == []
assert hello_conf.procs['web'].volumes == []
assert hello_conf.procs['web'].port[80].port == 80
def test_lain_conf_proc_secret_files(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
secret_files:
- "hello/hello.tex"
- " /secret"
- /hello
notify:
slack: "#hello"
'''
repo_name = 'lain/hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == []
assert hello_conf.procs['web'].volumes == []
assert hello_conf.procs['web'].port[80].port == 80
assert hello_conf.procs['web'].secret_files_bypass == False
assert hello_conf.procs['web'].secret_files == [
'/lain/app/hello/hello.tex', '/lain/app/ /secret', '/hello']
def test_lain_conf_proc_secret_files_bypass(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
secret_files_bypass: True
secret_files:
- "hello/hello.tex"
- " /secret"
- /hello
notify:
slack: "#hello"
'''
repo_name = 'lain/hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == []
assert hello_conf.procs['web'].volumes == []
assert hello_conf.procs['web'].port[80].port == 80
assert hello_conf.procs['web'].secret_files_bypass == True
assert hello_conf.procs['web'].secret_files == [
'/lain/app/hello/hello.tex', '/lain/app/ /secret', '/hello']
def test_lain_conf_proc_env_notexists(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == []
assert hello_conf.procs['web'].volumes == []
assert hello_conf.procs['web'].port[80].port == 80
def test_lain_conf_proc_patch(self):
meta_yaml = '''
appname: hello
build:
base: golang
script: [go build -o hello]
release:
dest_base: ubuntu
copy:
- {dest: /usr/bin/hello, src: hello}
test:
script: [go test]
proc.mailer: {type: worker, cmd: hello, port: 80, memory: 128m}
notify: {slack: '#hello'}
'''
payload = {
"cpu": 2,
"memory": "64m",
"num_instances": 2,
"cmd": "hello world",
"port": 8080
}
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
mailer = hello_conf.procs['mailer']
assert mailer.cpu == 0
assert mailer.memory == "128m"
assert mailer.num_instances == 1
assert mailer.cmd == ["hello"]
assert mailer.port[80].port == 80
mailer.patch(payload)
mailer = hello_conf.procs['mailer']
assert mailer.cpu == 2
assert mailer.memory == "64m"
assert mailer.num_instances == 2
assert mailer.cmd == ["hello", "world"]
assert mailer.port[8080].port == 8080
def test_lain_conf_proc_patch_only_simple_scale_meta(self):
meta_old = {
"memory": "32m",
"num_instances": 1,
"cmd": "hello",
"port": 80
}
meta_new = {
"cpu": 2,
"memory": "64m",
"num_instances": 2,
"cmd": "hello world",
"port": 8080
}
proc = Proc()
proc.load('web', meta_old, 'hello',
'111111111-aaaaaaaaaaaaaaaaa', None)
proc1 = Proc()
proc1.load('web', meta_new, 'hello',
'22222222-bbbbbbbbbbbbbbbbb', None)
assert proc.name == 'web'
assert proc.type.name == 'web'
assert proc.cpu == 0
assert proc.memory == "32m"
assert proc.cmd == ["hello"]
assert proc.num_instances == 1
assert proc.port[80].port == 80
proc.patch_only_simple_scale_meta(proc1)
assert proc.name == 'web'
assert proc.type.name == 'web'
assert proc.cpu == 2
assert proc.memory == "64m"
assert proc.cmd == ["hello"]
assert proc.num_instances == 2
assert proc.port[80].port == 80
def test_lain_conf_auto_insert_default_mountpoint_for_procname_web(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
memory: 64m
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['web'].env == ['ENV_A=enva', 'ENV_B=envb']
assert hello_conf.procs['web'].memory == '64m'
assert hello_conf.procs['web'].volumes == ['/data', '/var/lib/mysql']
assert hello_conf.procs['web'].port[80].port == 80
my_mountpoint = hello_conf.procs['web'].mountpoint
expect_mountpoint = [
"%s.%s" % (hello_conf.appname, DOMAIN),
"%s.lain" % hello_conf.appname,
]
# resource instance test
resource_instance_meta_yaml = '''
appname: resource.demo-service.hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
service.echo:
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./echo -p 1234
port: 1234
num_instances: 3
portal:
allow_clients: "hello"
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./proxy
port: 4321
web:
cmd: hello
port: 80
memory: 64m
env:
- ENV_A=enva
- ENV_B=envb
volumes:
- /data
- /var/lib/mysql
notify:
slack: "#demo-service"
'''
meta_version = '1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc'
domains = [DOMAIN] + FIXTURES_EXTRA_DOMAINS
r_conf = LainConf()
r_conf.load(resource_instance_meta_yaml,
meta_version, None, domains=domains)
assert r_conf.appname == 'resource.demo-service.hello'
assert r_conf.procs['web'].env == ['ENV_A=enva', 'ENV_B=envb']
assert r_conf.procs['web'].memory == '64m'
assert r_conf.procs['web'].volumes == ['/data', '/var/lib/mysql']
assert r_conf.procs['web'].port[80].port == 80
app_domain = 'hello.demo-service.resource'
assert r_conf.procs['web'].mountpoint == ["%s.%s" % (app_domain, DOMAIN)] + \
["%s.%s" % (app_domain, d) for d in FIXTURES_EXTRA_DOMAINS] + \
["%s.lain" % app_domain]
def test_lain_conf_no_mountpoint_for_not_web_type_proc(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
worker:
cmd: worker
memory: 64m
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['worker'].memory == '64m'
assert hello_conf.procs['worker'].mountpoint == []
# resource instance test
resource_instance_meta_yaml = '''
appname: resource.demo-service.hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
service.echo:
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./echo -p 1234
port: 1234
num_instances: 3
portal:
allow_clients: "hello"
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./proxy
port: 4321
worker:
cmd: worker
memory: 64m
notify:
slack: "#demo-service"
'''
repo_name = 'resource.demo-service.hello'
meta_version = '1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc'
r_conf = LainConf()
r_conf.load(resource_instance_meta_yaml, repo_name, meta_version)
assert r_conf.appname == 'resource.demo-service.hello'
assert r_conf.procs['echo'].mountpoint == []
assert r_conf.procs['worker'].memory == '64m'
assert r_conf.procs['worker'].mountpoint == []
def test_lain_conf_auto_prefix_default_mountpoint_for_proctype_web(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
memory: 64m
mountpoint:
- /web
- a.foo
- c.com/y/z
web.admin:
cmd: admin
port: 80
mountpoint:
- /admin
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None, domains=[
DOMAIN] + FIXTURES_EXTRA_DOMAINS)
assert hello_conf.appname == 'hello'
my_mountpoint = hello_conf.procs['web'].mountpoint
expect_mountpoint = ['a.foo', 'c.com/y/z',
'%s.%s' % (hello_conf.appname, DOMAIN),
'%s.lain' % hello_conf.appname,
'%s.%s/web' % (hello_conf.appname, DOMAIN),
'%s.lain/web' % hello_conf.appname
]
for extra_domain in FIXTURES_EXTRA_DOMAINS:
expect_mountpoint.append('%s.%s' % (
hello_conf.appname, extra_domain))
expect_mountpoint.append('%s.%s/web' %
(hello_conf.appname, extra_domain))
my_mountpoint.sort()
expect_mountpoint.sort()
assert my_mountpoint == expect_mountpoint
my_mountpoint1 = hello_conf.procs['admin'].mountpoint
expect_mountpoint1 = [
'%s.%s/admin' % (hello_conf.appname, DOMAIN),
'%s.lain/admin' % hello_conf.appname
]
for extra_domain in FIXTURES_EXTRA_DOMAINS:
expect_mountpoint1.append('%s.%s/admin' %
(hello_conf.appname, extra_domain))
my_mountpoint1.sort()
expect_mountpoint1.sort()
assert my_mountpoint1 == expect_mountpoint1
# resource instance test
resource_instance_meta_yaml = '''
appname: resource.demo-service.hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
service.echo:
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./echo -p 1234
port: 1234
num_instances: 3
portal:
allow_clients: "hello"
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./proxy
port: 4321
web:
cmd: hello
port: 80
memory: 64m
mountpoint:
- /web
- a.foo
- c.com/y/z
web.admin:
cmd: admin
port: 80
mountpoint:
- /admin
notify:
slack: "#demo-service"
'''
repo_name = 'resource.demo-service.hello'
meta_version = '1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc'
r_conf = LainConf()
r_conf.load(resource_instance_meta_yaml, repo_name, meta_version)
assert r_conf.appname == 'resource.demo-service.hello'
my_mountpoint = r_conf.procs['web'].mountpoint
app_domain = 'hello.demo-service.resource'
expect_mountpoint = ['a.foo', 'c.com/y/z',
'%s.%s' % (app_domain, DOMAIN),
'%s.lain' % app_domain,
'%s.%s/web' % (app_domain, DOMAIN),
'%s.lain/web' % app_domain
]
my_mountpoint.sort()
expect_mountpoint.sort()
assert my_mountpoint == expect_mountpoint
my_mountpoint1 = r_conf.procs['admin'].mountpoint
expect_mountpoint1 = [
'%s.%s/admin' % (app_domain, DOMAIN),
'%s.lain/admin' % app_domain
]
my_mountpoint1.sort()
expect_mountpoint1.sort()
assert my_mountpoint1 == expect_mountpoint1
def test_lain_conf_auto_append_default_mountpoint_for_procname_web(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
memory: 64m
mountpoint:
- a.foo
- a.foo/search
- b.foo.bar/x
- c.com/y/z
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None, domains=[
DOMAIN] + FIXTURES_EXTRA_DOMAINS)
assert hello_conf.appname == 'hello'
my_mountpoint = hello_conf.procs['web'].mountpoint
expect_mountpoint = ['a.foo', 'a.foo/search', 'b.foo.bar/x', 'c.com/y/z',
'%s.%s' % (hello_conf.appname, DOMAIN),
'%s.lain' % hello_conf.appname
]
expect_mountpoint += ['%s.%s' % (hello_conf.appname, d)
for d in FIXTURES_EXTRA_DOMAINS]
my_mountpoint.sort()
expect_mountpoint.sort()
assert my_mountpoint == expect_mountpoint
# resource instance test
resource_instance_meta_yaml = '''
appname: resource.demo-service.hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
service.echo:
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./echo -p 1234
port: 1234
num_instances: 3
portal:
allow_clients: "hello"
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./proxy
port: 4321
web:
cmd: hello
port: 80
memory: 64m
mountpoint:
- a.foo
- a.foo/search
- b.foo.bar/x
- c.com/y/z
notify:
slack: "#demo-service"
'''
repo_name = 'resource.demo-service.hello'
meta_version = '1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc'
r_conf = LainConf()
r_conf.load(resource_instance_meta_yaml, repo_name, meta_version)
assert r_conf.appname == 'resource.demo-service.hello'
app_domain = 'hello.demo-service.resource'
my_mountpoint = r_conf.procs['web'].mountpoint
expect_mountpoint = ['a.foo', 'a.foo/search', 'b.foo.bar/x', 'c.com/y/z',
'%s.%s' % (app_domain, DOMAIN),
'%s.lain' % app_domain
]
my_mountpoint.sort()
expect_mountpoint.sort()
assert my_mountpoint == expect_mountpoint
def test_lain_conf_no_mountpoint_for_web_type_but_name_is_not_web_proc_should_raise_exception(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web.foo:
cmd: foo
memory: 64m
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
with pytest.raises(Exception) as e:
hello_conf.load(meta_yaml, meta_version, None)
assert 'proc (type is web but name is not web) should have own mountpoint.' in str(
e.value)
# resource instance test
resource_instance_meta_yaml = '''
appname: resource.demo-service.hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
service.echo:
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./echo -p 1234
port: 1234
num_instances: 3
portal:
allow_clients: "hello"
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./proxy
port: 4321
web.foo:
cmd: foo
memory: 64m
notify:
slack: "#demo-service"
'''
repo_name = 'resource.demo-service.hello'
meta_version = '1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc'
r_conf = LainConf()
with pytest.raises(Exception) as e:
r_conf.load(meta_yaml, repo_name, meta_version)
assert 'proc (type is web but name is not web) should have own mountpoint.' in str(
e.value)
def test_lain_conf_service_abbreviation(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
service.echo:
cmd: ./echo -p 1234
port: 1234
num_instances: 3
portal:
allow_clients: "**"
cmd: ./proxy
port: 4321
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['echo'].port[1234].port == 1234
assert hello_conf.procs['echo'].type == ProcType.worker
assert hello_conf.procs['portal-echo'].port[4321].port == 4321
assert hello_conf.procs['portal-echo'].type == ProcType.portal
def test_lain_conf_service_full_definition(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
proc.echo:
cmd: ./echo -p 1234
port: 1234
num_instances: 3
portal.portal-echo:
service_name: echo
allow_clients: "**"
cmd: ./proxy
port: 4321
notify:
slack: "#hello"
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, meta_version, None)
assert hello_conf.appname == 'hello'
assert hello_conf.procs['echo'].port[1234].port == 1234
assert hello_conf.procs['echo'].type == ProcType.worker
assert hello_conf.procs['portal-echo'].port[4321].port == 4321
assert hello_conf.procs['portal-echo'].type == ProcType.portal
def test_lain_conf_use_services_smoke(self):
meta_yaml = '''
appname: echo-client
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
use_services:
echo-server:
- echo1
- echo2
bark-server:
- bark1
proc.echo-client:
cmd: ./ping echo1 echo2 bark1 -p 4321
'''
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
echoclient_conf = LainConf()
echoclient_conf.load(meta_yaml, meta_version, None)
assert echoclient_conf.appname == 'echo-client'
assert echoclient_conf.use_services == {
'echo-server': ["echo1", "echo2"],
'bark-server': ["bark1"]
}
assert echoclient_conf.procs['echo-client'].type == ProcType.worker
def test_lain_conf_resource_instance_abbreviation(self):
meta_yaml = '''
appname: resource.demo-service.hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
service.echo:
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./echo -p 1234
port: 1234
num_instances: 3
portal:
allow_clients: "hello"
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./proxy
port: 4321
notify:
slack: "#demo-service"
'''
meta_version = '1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc'
r_conf = LainConf()
r_conf.load(meta_yaml, meta_version, None)
assert r_conf.appname == 'resource.demo-service.hello'
assert r_conf.procs['echo'].port[1234].port == 1234
assert r_conf.procs['echo'].type == ProcType.worker
assert r_conf.procs[
'echo'].image == "regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc"
assert r_conf.procs['portal-echo'].port[4321].port == 4321
assert r_conf.procs['portal-echo'].type == ProcType.portal
assert r_conf.procs[
'portal-echo'].image == "regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc"
def test_lain_conf_resource_instance_full_definition(self):
meta_yaml = '''
appname: resource/demo-service/hello
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
proc.echo:
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./echo -p 1234
port: 1234
num_instances: 3
portal.portal-echo:
service_name: echo
allow_clients: "hello"
image: regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc
cmd: ./proxy
port: 4321
notify:
slack: "#demo-service"
'''
repo_name = 'resource/demo-service/hello'
meta_version = '1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc'
r_conf = LainConf()
r_conf.load(meta_yaml, meta_version, None)
assert r_conf.appname == 'resource/demo-service/hello'
assert r_conf.procs['echo'].port[1234].port == 1234
assert r_conf.procs['echo'].type == ProcType.worker
assert r_conf.procs[
'echo'].image == "regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc"
assert r_conf.procs['portal-echo'].port[4321].port == 4321
assert r_conf.procs['portal-echo'].type == ProcType.portal
assert r_conf.procs['portal-echo'].service_name == 'echo'
assert r_conf.procs['portal-echo'].allow_clients == 'hello'
assert r_conf.procs[
'portal-echo'].image == "regsitry.lain.local/demo-service:release-1428553798-7142797e64bb7b4d057455ef13de6be156ae81cc"
def test_lain_conf_use_resources_smoke(self):
meta_yaml = '''
appname: echo-client
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
use_resources:
echo-server:
memory: 128M
cpu: 2
services:
- echo1
- echo2
bark-server:
services:
- bark1
proc.echo-client:
cmd: ./ping echo1 echo2 bark1 -p 4321
'''
repo_name = 'echo-client'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
echoclient_conf = LainConf()
echoclient_conf.load(meta_yaml, meta_version, None)
assert echoclient_conf.appname == 'echo-client'
assert echoclient_conf.use_resources == {
'echo-server': {
'services': ["echo1", "echo2"],
'context': {
'cpu': 2,
'memory': '128M'
}
},
'bark-server': {
'services': ["bark1"],
'context': {}
}
}
assert echoclient_conf.procs['echo-client'].type == ProcType.worker
def test_lain_conf_default_system_volumes(self):
meta_yaml = '''
appname: echo-client
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
use_resources:
echo-server:
memory: 128M
cpu: 2
services:
- echo1
- echo2
bark-server:
services:
- bark1
proc.echo-client:
cmd: ./ping echo1 echo2 bark1 -p 4321
'''
repo_name = 'echo-client'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
echoclient_conf = LainConf()
echoclient_conf.load(meta_yaml, repo_name, meta_version)
assert echoclient_conf.appname == 'echo-client'
for proc in echoclient_conf.procs.values():
assert proc.system_volumes == DEFAULT_SYSTEM_VOLUMES
def test_lain_conf_cloud_volumes_multi_type(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
script:
- echo buildscript1
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
memory: 64m
cloud_volumes:
dirs:
- /data
- /var/lib/mysql
'''
repo_name = 'hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
echoclient_conf = LainConf()
echoclient_conf.load(meta_yaml, repo_name, meta_version)
assert echoclient_conf.appname == 'hello'
assert echoclient_conf.procs['web'].cloud_volumes.get(
'multi') == ['/data', '/var/lib/mysql']
print echoclient_conf.procs['web'].cloud_volumes
def test_lain_conf_cloud_volumes_single_type(self):
meta_yaml = '''
appname: hello
build:
base: golang
prepare:
- echo prepare1
script:
- echo buildscript1
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
test:
script:
- go test
web:
cmd: hello
port: 80
memory: 64m
cloud_volumes:
type: single
dirs:
- /data
- /var/lib/mysql
'''
repo_name = 'hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
echoclient_conf = LainConf()
echoclient_conf.load(meta_yaml, repo_name, meta_version)
assert echoclient_conf.appname == 'hello'
assert echoclient_conf.procs['web'].cloud_volumes.get('multi') == None
assert echoclient_conf.procs['web'].cloud_volumes.get(
'single') == ['/data', '/var/lib/mysql']
print echoclient_conf.procs['web'].cloud_volumes
def test_lain_conf_volume_backup(self):
meta_yaml = '''
appname: echo-client
build:
base: golang
prepare:
- echo prepare1
- echo prepare2
script:
- echo buildscript1
- echo buildscript2
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
- src: entry.sh
dest: /entry.sh
proc.echo-client:
cmd: ./ping echo1 echo2 bark1 -p 4321
volumes:
- /etc/hello:
backup_full:
schedule: "* 1 * * *"
expire: 30d
pre_run: backup.sh
post_run: end.sh
backup_increment:
schedule: "0 1 * * *"
expire: 3d
pre_run: backup.sh
post_run: end.sh
- /data/backupfile:
backup_full:
schedule: "* 1 * * *"
expire: 30d
pre_run: backup.sh
post_run: end.sh
'''
repo_name = 'echo-client'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
echoclient_conf = LainConf()
echoclient_conf.load(meta_yaml, repo_name, meta_version)
assert len(echoclient_conf.procs['echo-client'].backup) == 3
for backup_info in echoclient_conf.procs['echo-client'].backup:
assert backup_info['procname'] == 'echo-client.worker.echo-client'
if backup_info['volume'] == "/data/backupfile":
assert backup_info['volume'] == "/data/backupfile"
assert backup_info['schedule'] == "* 1 * * *"
assert backup_info['expire'] == "30d"
assert backup_info['preRun'] == "backup.sh"
assert backup_info['postRun'] == "end.sh"
assert backup_info['mode'] == "full"
elif backup_info['volume'] == "/etc/hello":
assert backup_info['volume'] == "/etc/hello"
assert backup_info['preRun'] == "backup.sh"
assert backup_info['postRun'] == "end.sh"
if backup_info['mode'] == 'increment':
assert backup_info['schedule'] == "0 1 * * *"
assert backup_info['expire'] == "3d"
assert backup_info['mode'] == "increment"
else:
assert backup_info['schedule'] == "* 1 * * *"
assert backup_info['expire'] == "30d"
assert backup_info['mode'] == "full"
def test_lain_conf_setuptime_and_killtimeout(self):
meta_yaml = '''
appname: hello
build:
base: golang
web:
cmd: test
'''
repo_name = 'lain/hello'
meta_version = '1428553798.443334-7142797e64bb7b4d057455ef13de6be156ae81cc'
hello_conf = LainConf()
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.procs['web'].setup_time == MIN_SETUP_TIME
assert hello_conf.procs['web'].kill_timeout == MIN_KILL_TIMEOUT
meta_yaml = '''
appname: hello
build:
base: golang
web:
cmd: test
setup_time: 2342
kill_timeout: 1
'''
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.procs['web'].setup_time == MAX_SETUP_TIME
assert hello_conf.procs['web'].kill_timeout == MIN_KILL_TIMEOUT
meta_yaml = '''
appname: hello
build:
base: golang
web:
cmd: test
setup_time: 10
kill_timeout: 20
'''
hello_conf.load(meta_yaml, repo_name, meta_version)
assert hello_conf.procs['web'].setup_time == 10
assert hello_conf.procs['web'].kill_timeout == 20
REDIS_CLIENT_META = '''
appname: hello
build:
base: golang
script:
- go build -o hello
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
use_resources:
redis:
memory: 128M
services:
- redis
web:
cmd: /hello
env:
- REDIS_ADDR: redis:3333
'''
REDIS_CLIENT_META_VERSION = '1439365341-06e92b4456116ad5e6875c8c34797d22156d44a5'
REDIS_RESOURCE_META = '''
appname: redis
apptype: resource
build:
base: golang
script:
- go build -o hello
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
service.redis:
cmd: redis -p 3333
port: 3333
memory: "{{ memory|default('64M') }}"
portal:
image: myregistry.lain.org/proxy:release-1234567-abc
cmd: ./proxy
'''
REDIS_RESOURCE_META_VERSION = '1439365340-06e92b4456116ad5e6875c8c34797d22156d44a5'
MYSQL_CLIENT_META = '''
appname: hello
build:
base: golang
script:
- go build -o hello
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
use_resources:
mysql:
memory: 128M
num_instances: 2
services:
- mysqld
web:
cmd: /hello
env:
- MYSQL_ADDR: mysqld:3309
'''
MYSQL_CLIENT_META_VERSION = '1439365342-06e92b4456116ad5e6875c8c34797d22156d44a5'
MYSQL_RESOURCE_META = '''
appname: mysql
apptype: resource
build:
base: golang
script:
- go build -o hello
release:
dest_base: ubuntu
copy:
- src: hello
dest: /usr/bin/hello
worker.mysqld:
image: mysql:5.6
cmd: mysqld {{ memory|default('64M') }}
port: 3306
num_instances: "{{ num_instances|default(1)|int(1) }}"
memory: "{{ memory|default('64M') }}"
portal.mysqlproxy:
service_name: mysqld
image: myregistry.lain.org/proxy:release-1234567-abc
cmd: mysqlproxy
port: 3309
'''
MYSQL_RESOURCE_META_VERSION = '1439365343-06e92b4456116ad5e6875c8c34797d22156d44a5'
def test_resource_instance_meta_render_abbreviation():
resource_appname = 'redis'
resource_meta_version = REDIS_RESOURCE_META_VERSION
resource_meta_template = REDIS_RESOURCE_META
client_appname = 'hello'
client_meta = REDIS_CLIENT_META
client_meta_version = REDIS_CLIENT_META_VERSION
registry = 'registry.lain.local'
domain = 'lain.local'
hello_config = LainConf()
hello_config.load(client_meta, client_meta_version, None,
registry=registry, domain=domain)
context = hello_config.use_resources[resource_appname]['context']
resource_instance_meta = render_resource_instance_meta(
resource_appname, resource_meta_version, resource_meta_template,
client_appname, context, registry, domain
)
resource_default_image = '{}/{}:release-{}'.format(
registry, resource_appname, resource_meta_version
)
resource_instance_yaml = yaml.safe_load(resource_instance_meta)
assert not resource_instance_yaml.has_key('apptye')
resource_instance_config = LainConf()
resource_instance_config.load(resource_instance_meta,
resource_meta_version,
resource_default_image,
registry=registry,
domain=domain,
)
assert resource_instance_config.appname == 'resource.redis.hello'
redis_proc = resource_instance_config.procs['redis']
assert redis_proc.memory == '128M'
assert redis_proc.image == resource_default_image
portalredis_proc = resource_instance_config.procs['portal-redis']
assert portalredis_proc.image == 'myregistry.lain.org/proxy:release-1234567-abc'
def test_resource_instance_meta_render_full():
resource_appname = 'mysql'
resource_meta_version = MYSQL_RESOURCE_META_VERSION
resource_meta_template = MYSQL_RESOURCE_META
client_appname = 'hello'
client_meta = MYSQL_CLIENT_META
client_meta_version = MYSQL_CLIENT_META_VERSION
registry = 'registry.lain.local'
domain = 'lain.local'
hello_config = LainConf()
hello_config.load(client_meta, client_meta_version, None,
registry=registry, domain=domain)
context = hello_config.use_resources[resource_appname]['context']
resource_instance_meta = render_resource_instance_meta(
resource_appname, resource_meta_version, resource_meta_template,
client_appname, context, registry, domain
)
resource_default_image = '{}/{}:release-{}'.format(
registry, resource_appname, resource_meta_version
)
resource_instance_yaml = yaml.safe_load(resource_instance_meta)
assert not resource_instance_yaml.has_key('apptye')
resource_instance_config = LainConf()
resource_instance_config.load(resource_instance_meta,
resource_meta_version,
None,
registry=registry,
domain=domain,
)
assert resource_instance_config.appname == 'resource.mysql.hello'
mysqld_proc = resource_instance_config.procs['mysqld']
assert mysqld_proc.memory == '128M'
assert mysqld_proc.num_instances == 2
assert mysqld_proc.image == 'mysql:5.6'
assert mysqld_proc.cmd == ['mysqld', '128M']
mysqlproxy_proc = resource_instance_config.procs['mysqlproxy']
assert mysqlproxy_proc.image == 'myregistry.lain.org/proxy:release-1234567-abc'
def test_build_section_with_old_prepare(old_prepare_yaml):
app_meta_version = '123456-abcdefg'
app_conf = LainConf()
app_conf.load(old_prepare_yaml, 'console', app_meta_version,
domains=['registry.lain.local', 'lain.local'])
assert app_conf.build.base == 'sunyi00/centos-python:1.0.0'
assert app_conf.build.script == ['( pip install -r pip-req.txt )']
assert app_conf.build.build_arg == ['ARG1=arg1', 'ARG2=arg2']
assert app_conf.build.prepare.version == "0"
assert app_conf.build.prepare.keep == []
assert app_conf.build.prepare.script == [
'( touch /sbin/modprobe && chmod +x /sbin/modprobe )',
'( pip install -r pip-req.txt )',
'( rm -rf /lain/app/* )',
'( ls -1 | xargs rm -rf )'
]
assert not app_conf.procs['web'].stateful
def test_build_section_with_new_prepare(new_prepare_yaml):
app_meta_version = '123456-abcdefg'
app_conf = LainConf()
app_conf.load(new_prepare_yaml, 'console', app_meta_version,
domains=['registry.lain.local', 'lain.local'])
assert app_conf.build.base == 'sunyi00/centos-python:1.0.0'
assert app_conf.build.script == ['( pip install -r pip-req.txt )']
assert app_conf.build.build_arg == ['ARG1=arg1', 'ARG2=arg2']
assert app_conf.build.prepare.version == "0"
assert app_conf.build.prepare.keep == [
'node_modules',
'bundle'
]
assert app_conf.build.prepare.script == [
'( touch /sbin/modprobe && chmod +x /sbin/modprobe )',
'( pip install -r pip-req.txt )',
'( rm -rf /lain/app/* )',
'( ls -1 | grep -v \'\\bnode_modules\\b\' | grep -v \'\\bbundle\\b\' | xargs rm -rf )'
]
assert app_conf.procs['web'].stateful
def test_proc_section_with_healthcheck(healthcheck_yaml):
app_meta_version = '123456-abcdefg'
app_conf = LainConf()
app_conf.load(healthcheck_yaml, 'console', app_meta_version,
domains=['registry.lain.local', 'lain.local'])
assert app_conf.build.base == 'sunyi00/centos-python:1.0.0'
assert app_conf.build.script == ['( pip install -r pip-req.txt )']
assert app_conf.build.prepare.version == "0"
assert app_conf.build.prepare.keep == [
'node_modules',
'bundle'
]
assert app_conf.build.prepare.script == [
'( touch /sbin/modprobe && chmod +x /sbin/modprobe )',
'( pip install -r pip-req.txt )',
'( rm -rf /lain/app/* )',
'( ls -1 | grep -v \'\\bnode_modules\\b\' | grep -v \'\\bbundle\\b\' | xargs rm -rf )'
]
annotation = json.loads(app_conf.procs['web'].annotation)
assert annotation['healthcheck'] == '/kg/health/check'
assert app_conf.procs['web'].healthcheck == '/kg/health/check'
def test_release(release_yaml):
app_meta_version = '123456-abcdefg'
app_conf = LainConf()
app_conf.load(release_yaml, 'release', app_meta_version,
domains=['registry.lain.local', 'lain.local'])
assert app_conf.release.copy == [
{'dest': '/usr/bin/hello', 'src': 'hello'}, {'dest': 'hi', 'src': 'hi'}]
|
|
from unittest import TestCase, main
import datetime
from random import choice, randint
from string import ascii_letters
from uuid import UUID
from amgut.lib.data_access.ag_data_access import AGDataAccess
from amgut.lib.util import rollback
class TestAGDataAccess(TestCase):
def setUp(self):
self.ag_data = AGDataAccess()
def tearDown(self):
del self.ag_data
def test_authenticateWebAppUser(self):
# Test right pass but non-existant kit ID
obs = self.ag_data.authenticateWebAppUser('randomkitID', 'test')
self.assertEqual(obs, False)
kit_id = 'tst_xfphP'
# Test wrong password
obs = self.ag_data.authenticateWebAppUser(kit_id, 'wrongPass')
self.assertEqual(obs, False)
# Test corect password
obs = self.ag_data.authenticateWebAppUser(kit_id, 'test')
self.assertTrue(isinstance(obs, dict))
self.assertEqual(obs['ag_login_id'],
'ded5101d-cafb-f6b3-e040-8a80115d6f03')
def test_check_login_exists(self):
email = 'Reallylongemailthatshouldntexist@someplacenotreal.com'
obs = self.ag_data.check_login_exists(email)
self.assertEqual(obs, None)
email = 'REMOVED'
obs = self.ag_data.check_login_exists(email)
as_uuid = UUID(obs)
self.assertTrue(as_uuid.version, 4)
def test_addAGLogin(self):
# test new user
exists = 'EXISTS'
while exists is not None:
email = ''.join([choice(ascii_letters)
for i in range(randint(5, 10))])
domain = ''.join([choice(ascii_letters)
for i in range(randint(5, 10))])
new_email = '@'.join([email, domain]) + '.com'
exists = self.ag_data.check_login_exists(new_email)
# make sure the ag_login_id is a UUID4 string
ag_login_id = self.ag_data.addAGLogin(
new_email, 'TESTDUDE', '123 fake test street', 'testcity',
'teststate', '1L2 2G3', 'United Kingdom')
as_uuid = UUID(ag_login_id)
self.assertTrue(as_uuid.version, 4)
# test existing user
ag_login_id = self.ag_data.addAGLogin(
'TEST@EMAIL.com', 'TESTOTHER', '123 fake test street', 'testcity',
'teststate', '1L2 2G3', 'United Kingdom')
obs = self.ag_data.addAGLogin(
'test@EMAIL.com', 'TESTDUDE', '123 fake test street', 'testcity',
'teststate', '1L2 2G3', 'United Kingdom')
self.assertEqual(ag_login_id, obs)
def test_getAGBarcodeDetails_bad_barcode(self):
# test non-existant barcode
with self.assertRaises(ValueError):
self.ag_data.getAGBarcodeDetails('99')
# test existing barcode but not in AG
with self.assertRaises(ValueError):
self.ag_data.getAGBarcodeDetails('000006232')
def test_get_nonconsented_scanned_barcodes(self):
obs = self.ag_data.get_nonconsented_scanned_barcodes('tst_KWfyv')
exp = ['000027262']
self.assertEqual(obs, exp)
def test_getAGBarcodeDetails(self):
# test existing AG barcode
obs = self.ag_data.getAGBarcodeDetails('000001047')
exp = {
'barcode': '000001047',
'status': 'Received',
'ag_kit_id': 'd8592c74-7e35-2135-e040-8a80115d6401',
'name': 'REMOVED',
'participant_name': 'REMOVED-0',
'email': 'REMOVED',
'site_sampled': 'Stool',
'environment_sampled': None,
'sample_date': datetime.date(2013, 3, 28),
'sample_time': datetime.time(23, 25),
'notes': 'REMOVED',
'overloaded': None,
'withdrawn': None,
'other': None,
'moldy': None,
'refunded': None,
'ag_kit_barcode_id': 'd8592c74-7e36-2135-e040-8a80115d6401',
'date_of_last_email': None,
'other_text': 'REMOVED'
}
self.assertEqual(obs, exp)
def test_getAGKitDetails(self):
# test non-existant kit
with self.assertRaises(ValueError):
self.ag_data.getAGKitDetails('IDONTEXI5T')
# test existing AG kit
obs = self.ag_data.getAGKitDetails('tst_ODmhG')
exp = {'ag_kit_id': 'd8592c74-84bb-2135-e040-8a80115d6401',
'supplied_kit_id': 'tst_ODmhG',
'swabs_per_kit': 1,
'verification_email_sent': 'n',
'kit_verification_code': 'f4UjhV4B',
'kit_password': '$2a$12$LiakUCHOpAMvEp9Wxehw5OIlD/TIIP0Bs3blw18'
'ePcmKHWWAePrQ.',
'kit_verified': 'y'}
self.assertEqual(obs, exp)
def test_get_all_handout_kits(self):
obs = self.ag_data.get_all_handout_kits()
self.assertTrue(isinstance(obs, list))
self.assertTrue(len(obs) > 0)
for kit_id in obs:
self.assertRegexpMatches(kit_id, 'tst_[a-zA-Z]{5}')
def test_registerHandoutKit_bad_data(self):
# run on bad data
with self.assertRaises(ValueError):
self.ag_data.registerHandoutKit('BAD', 'DATA')
def test_registerHandoutKit_bad_idz(self):
# run on non-existant login id
ag_login_id = '877bb1b5-7352-48bf-a7b1-1248c689b819'
kit = self.ag_data.get_all_handout_kits()[0]
obs = self.ag_data.registerHandoutKit(ag_login_id, kit)
self.assertFalse(obs)
# run on non-existant kit_id
ag_login_id = 'dc3172b2-792c-4087-8a20-714297821c6a'
kit = 'NoTR3AL'
obs = self.ag_data.registerHandoutKit(ag_login_id, kit)
self.assertFalse(obs)
def test_registerHandoutKit(self):
# run on real data
ag_login_id = 'dc3172b2-792c-4087-8a20-714297821c6a'
kit = self.ag_data.get_all_handout_kits()[0]
obs = self.ag_data.registerHandoutKit(ag_login_id, kit)
self.assertTrue(obs)
# make sure kit removed from ag_handout_kits and inserted in ag_kit
kits = self.ag_data.get_all_handout_kits()
self.assertNotIn(kit, kits)
obs = self.ag_data.getAGKitDetails(kit)
self.assertEqual(obs['supplied_kit_id'], kit)
@rollback
def test_deleteAGParticipantSurvey(self):
self.ag_data.deleteAGParticipantSurvey(
'000fc4cd-8fa4-db8b-e050-8a800c5d02b5', 'REMOVED-0')
with self.assertRaises(ValueError):
self.ag_data.getConsent('8b2b45bb3390b585')
res = self.ag_data.get_withdrawn()
today = datetime.datetime.now().date()
exp = [['000fc4cd-8fa4-db8b-e050-8a800c5d02b5', 'REMOVED-0',
'REMOVED', today]]
self.assertItemsEqual(res, exp)
def test_getConsent(self):
res = self.ag_data.getConsent("8b2b45bb3390b585")
exp = {'date_signed': None,
'assent_obtainer': None,
'age_range': None,
'parent_1_name': 'REMOVED',
'participant_email': 'REMOVED',
'parent_2_name': 'REMOVED',
'ag_login_id': '000fc4cd-8fa4-db8b-e050-8a800c5d02b5',
'deceased_parent': 'false',
'participant_name': 'REMOVED-0',
'survey_id': '8b2b45bb3390b585',
'is_juvenile': False}
self.assertEquals(res, exp)
def test_getConsentNotPresent(self):
with self.assertRaises(ValueError):
self.ag_data.getConsent("42")
def test_logParticipantSample_badinfo(self):
# bad ag_login_id
with self.assertRaises(ValueError):
self.ag_data.logParticipantSample(
'11111111-1111-1111-1111-714297821c6a', '000001047',
'stool', None, datetime.date(2015, 9, 27),
datetime.time(15, 54), 'BADNAME', '')
def test_logParticipantSample(self):
# regular sample
ag_login_id = '7732aafe-c4e1-4ae4-8337-6f22704c1064'
barcode = '000027376'
self.ag_data.logParticipantSample(
ag_login_id, barcode, 'Stool', None, datetime.date(2015, 9, 27),
datetime.time(15, 54), 'REMOVED-0', '')
obs = self.ag_data.getAGBarcodeDetails(barcode)
exp = {'status': None,
'ag_kit_id': '5bfa9526-8dbb-492f-937c-bceb6b5a56fe',
'ag_kit_barcode_id': '793dab39-d9bf-4a0f-8d67-f21796e3faae',
'barcode': '000027376',
'site_sampled': 'Stool',
'environment_sampled': None,
'name': 'REMOVED',
'sample_date': datetime.date(2015, 9, 27),
'sample_time': datetime.time(15, 54),
'notes': '', 'overloaded': None,
'withdrawn': None,
'email': 'REMOVED',
'other': None,
'moldy': None,
'participant_name': 'REMOVED-0',
'refunded': None,
'date_of_last_email': None,
'other_text': 'REMOVED'
}
self.ag_data.deleteSample(barcode, ag_login_id)
self.assertEqual(obs, exp)
# env sample
self.ag_data.logParticipantSample(
ag_login_id, barcode, None, 'animal_habitat',
datetime.date(2015, 9, 26), datetime.time(15, 00), 'REMOVED', '')
obs = self.ag_data.getAGBarcodeDetails(barcode)
exp = {'status': None,
'ag_kit_id': '5bfa9526-8dbb-492f-937c-bceb6b5a56fe',
'ag_kit_barcode_id': '793dab39-d9bf-4a0f-8d67-f21796e3faae',
'barcode': '000027376',
'site_sampled': None,
'environment_sampled': 'animal_habitat',
'name': 'REMOVED',
'sample_date': datetime.date(2015, 9, 26),
'sample_time': datetime.time(15, 00),
'notes': '', 'overloaded': None,
'withdrawn': None,
'email': 'REMOVED',
'other': None,
'moldy': None,
'participant_name': None,
'refunded': None,
'date_of_last_email': None,
'other_text': 'REMOVED'
}
self.ag_data.deleteSample(barcode, ag_login_id)
self.assertEqual(obs, exp)
def test_getHumanParticipants(self):
i = "d8592c74-9694-2135-e040-8a80115d6401"
res = self.ag_data.getHumanParticipants(i)
exp = ['REMOVED-2', 'REMOVED-0', 'REMOVED-3', 'REMOVED-1']
self.assertItemsEqual(res, exp)
def test_getHumanParticipantsNotPresent(self):
i = '00000000-0000-0000-0000-000000000000'
res = self.ag_data.getHumanParticipants(i)
self.assertEqual(res, [])
def test_vioscreen_status(self):
survey_id = 'eba20dea4f54b997'
self.ag_data.updateVioscreenStatus(survey_id, 3)
obs = self.ag_data.get_vioscreen_status(survey_id)
self.assertEqual(obs, 3)
self.ag_data.updateVioscreenStatus(survey_id, None)
obs = self.ag_data.get_vioscreen_status(survey_id)
self.assertEqual(obs, None)
def test_get_vioscreen_status_unknown_survey(self):
with self.assertRaises(ValueError):
self.ag_data.get_vioscreen_status('SomeRandomSurveyID')
def test_getAnimalParticipants(self):
i = "ed5ab96f-fe3b-ead5-e040-8a80115d1c4b"
res = self.ag_data.getAnimalParticipants(i)
exp = ['REMOVED-0']
self.assertItemsEqual(res, exp)
def test_getAnimalParticipantsNotPresent(self):
i = "00711b0a-67d6-0fed-e050-8a800c5d7570"
res = self.ag_data.getAnimalParticipants(i)
self.assertEqual(res, [])
def test_getParticipantSamples(self):
i = "d6b0f287-b9d9-40d4-82fd-a8fd3db6c476"
res = self.ag_data.getParticipantSamples(i, "REMOVED-0")
exp = [{'status': None,
'sample_time': datetime.time(11, 55),
'notes': 'REMOVED',
'barcode': '000028432',
'sample_date': datetime.date(2015, 6, 7),
'site_sampled': 'Stool'}]
self.assertEqual(res, exp)
i = "d8592c74-9694-2135-e040-8a80115d6401"
res = self.ag_data.getParticipantSamples(i, "REMOVED-0")
exp = [{'status': 'Received',
'sample_time': datetime.time(7, 40),
'notes': 'REMOVED',
'barcode': '000016704',
'sample_date': datetime.date(2014, 6, 5),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(11, 30),
'notes': 'REMOVED',
'barcode': '000016705',
'sample_date': datetime.date(2014, 6, 1),
'site_sampled': 'Stool'},
{'status': 'Received', 'sample_time': datetime.time(9, 20),
'notes': 'REMOVED',
'barcode': '000016706',
'sample_date': datetime.date(2014, 6, 8),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(9, 20),
'notes': 'REMOVED',
'barcode': '000016707',
'sample_date': datetime.date(2014, 6, 1),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(22, 0),
'notes': 'REMOVED',
'barcode': '000016708',
'sample_date': datetime.date(2014, 5, 28),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(11, 0),
'notes': 'REMOVED',
'barcode': '000016709',
'sample_date': datetime.date(2014, 5, 29),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(22, 20),
'notes': 'REMOVED',
'barcode': '000016710',
'sample_date': datetime.date(2014, 5, 27),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(8, 0),
'notes': 'REMOVED',
'barcode': '000016711',
'sample_date': datetime.date(2014, 6, 11),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(8, 15),
'notes': 'REMOVED',
'barcode': '000016712',
'sample_date': datetime.date(2014, 6, 2),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(12, 0),
'notes': 'REMOVED',
'barcode': '000016713',
'sample_date': datetime.date(2014, 5, 30),
'site_sampled': 'Stool'},
{'status': None,
'sample_time': datetime.time(19, 30),
'notes': 'REMOVED',
'barcode': '000016496',
'sample_date': datetime.date(2014, 4, 29),
'site_sampled': 'Stool'},
{'status': None,
'sample_time': datetime.time(19, 30),
'notes': 'REMOVED',
'barcode': '000016497',
'sample_date': datetime.date(2014, 4, 29),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(10, 20),
'notes': 'REMOVED',
'barcode': '000004213',
'sample_date': datetime.date(2013, 10, 16),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(9, 50),
'notes': 'REMOVED',
'barcode': '000004214',
'sample_date': datetime.date(2013, 10, 14),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(12, 0),
'notes': 'REMOVED',
'barcode': '000004215',
'sample_date': datetime.date(2013, 10, 13),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(9, 30),
'notes': 'REMOVED',
'barcode': '000004216',
'sample_date': datetime.date(2013, 10, 15),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(14, 25),
'notes': 'REMOVED',
'barcode': '000004218',
'sample_date': datetime.date(2013, 10, 12),
'site_sampled': 'Stool'},
{'status': 'Received',
'sample_time': datetime.time(10, 15),
'notes': 'REMOVED',
'barcode': '000004219',
'sample_date': datetime.date(2013, 10, 17),
'site_sampled': 'Stool'}]
self.assertItemsEqual(res, exp)
def test_getParticipantSamplesNotPresent(self):
i = '00000000-0000-0000-0000-000000000000'
res = self.ag_data.getParticipantSamples(i, "REMOVED")
self.assertEqual(res, [])
def test_getEnvironmentalSamples(self):
i = "d6b0f287-b9d9-40d4-82fd-a8fd3db6c476"
res = self.ag_data.getEnvironmentalSamples(i)
exp = [{'status': None, 'sample_time': datetime.time(21, 45),
'notes': 'REMOVED', 'barcode': '000028433',
'sample_date': datetime.date(2015, 6, 7),
'site_sampled': None}]
self.assertItemsEqual(res, exp)
def test_getEnvironmentalSamplesNotPresent(self):
i = '00000000-0000-0000-0000-000000000000'
res = self.ag_data.getEnvironmentalSamples(i)
self.assertEqual(res, [])
def test_getAvailableBarcodes(self):
i = "d8592c74-9694-2135-e040-8a80115d6401"
res = self.ag_data.getAvailableBarcodes(i)
exp = ['000005628', '000005627', '000005624',
'000005625', '000005626', '000004217']
self.assertItemsEqual(res, exp)
i = "d6b0f287-b9d9-40d4-82fd-a8fd3db6c476"
res = self.ag_data.getAvailableBarcodes(i)
exp = ['000028434']
self.assertItemsEqual(res, exp)
def test_getAvailableBarcodesNotPresent(self):
i = '00000000-0000-0000-0000-000000000000'
res = self.ag_data.getAvailableBarcodes(i)
self.assertEqual(res, [])
def test_verifyKit(self):
# Test verifying works
kit = self.ag_data._get_unverified_kits()[0]
self.ag_data.verifyKit(kit)
obs = self.ag_data.getAGKitDetails(kit)
self.assertEqual(obs['kit_verified'], 'y')
# Test verifying a non-existant kit
with self.assertRaises(ValueError):
self.ag_data.getAGKitDetails('NOTAREALKITID')
def test__get_unverified_kits(self):
obs = self.ag_data._get_unverified_kits()
self.assertTrue(isinstance(obs, list))
self.assertTrue(len(obs) > 0)
for kit_id in obs:
self.assertRegexpMatches(kit_id, 'tst_[a-zA-Z]{5}')
obs = self.ag_data.getAGKitDetails(kit_id)
self.assertEqual(obs['kit_verified'], 'n')
def test_handoutCheck(self):
# Test proper password for handout
# All tests use assertEqual to make sure bool object returned
kit = self.ag_data.get_all_handout_kits()[0]
obs = self.ag_data.handoutCheck(kit, 'test')
self.assertEqual(obs, True)
# Test wrong password
obs = self.ag_data.handoutCheck(kit, 'badPass')
self.assertEqual(obs, False)
# Test non-handout kit
obs = self.ag_data.handoutCheck('tst_ODmhG', 'test')
self.assertEqual(obs, False)
obs = self.ag_data.handoutCheck('randomKitID', 'test')
self.assertEqual(obs, False)
def test_check_access(self):
# Has access
obs = self.ag_data.check_access('tst_BudVu', '000001047')
self.assertEqual(obs, True)
# No access
obs = self.ag_data.check_access('tst_BudVu', '000001111')
self.assertEqual(obs, False)
def test_ag_set_pass_change_code(self):
# Generate new random code and assign it
testcode = ''.join(choice(ascii_letters) for i in range(10))
self.ag_data.ag_set_pass_change_code('REMOVED', 'tst_ULGcr', testcode)
# Actually test the code change
obs = self.ag_data.ag_verify_kit_password_change_code(
'REMOVED', 'tst_ULGcr', 'SOMELONGTHINGTHATWILLFAIL')
self.assertEqual(obs, False)
obs = self.ag_data.ag_verify_kit_password_change_code(
'REMOVED', 'tst_ULGcr', testcode)
# Using equal to make sure boolean True is returned, not something that
# equates to True
self.assertEqual(obs, True)
# Test giving nonsense email
# TODO: make this raise error and test
self.ag_data.ag_set_pass_change_code('Fake@notarealemail.com',
'tst_ULGcr', testcode)
# Test giving bad skid
# TODO: make this raise error and test
self.ag_data.ag_set_pass_change_code('REMOVED', 'NOTINTHEDB', testcode)
def test_ag_update_kit_password(self):
# Generate new pass and make sure is different from current pass
newpass = ''.join(choice(ascii_letters) for i in range(randint(8, 15)))
auth = self.ag_data.authenticateWebAppUser('tst_ULGcr', newpass)
self.assertFalse(
auth, msg="Randomly generated password matches existing")
# Actually test password change
self.ag_data.ag_update_kit_password('tst_ULGcr', newpass)
auth = self.ag_data.authenticateWebAppUser('tst_ULGcr', newpass)
self.assertTrue(isinstance(auth, dict))
self.assertEqual(auth['ag_login_id'],
'd8592c74-8416-2135-e040-8a80115d6401')
# Test giving bad skid
# TODO: make this raise error and test
self.ag_data.ag_update_kit_password('NOTINTHEDB', newpass)
def test_ag_verify_kit_password_change_code(self):
# Test actual functionality
obs = self.ag_data.ag_verify_kit_password_change_code(
'REMOVED', 'tst_omubN', 'FAIL')
# Using assertEqual to make sure boolean False is returned, not
# something that equates to False. Same for rest of assertEquals below
self.assertEqual(obs, False)
# Outside reset time, should fail
obs = self.ag_data.ag_verify_kit_password_change_code(
'REMOVED', 'tst_omubN', 'Mw1eY4wWVXpE0cQlvQwS')
self.assertEqual(obs, False)
# Reset code and make sure it works
testcode = ''.join(choice(ascii_letters) for i in range(10))
self.ag_data.ag_set_pass_change_code('REMOVED', 'tst_ULGcr', testcode)
obs = self.ag_data.ag_verify_kit_password_change_code(
'REMOVED', 'tst_ULGcr', testcode)
self.assertEqual(obs, True)
# Test with incorrect kit id
obs = self.ag_data.ag_verify_kit_password_change_code(
'REMOVED', 'NOTAREALKITID', 'FAIL')
self.assertEqual(obs, False)
# Test with incorrect email
obs = self.ag_data.ag_verify_kit_password_change_code(
'notreal@fake.com', 'tst_ULGcr', testcode)
self.assertEqual(obs, False)
def test_getBarcodesByKit(self):
res = self.ag_data.getBarcodesByKit('tst_qmhLX')
exp = ['000001322']
self.assertItemsEqual(res, exp)
def test_getBarcodesByKitNotPresent(self):
res = self.ag_data.getBarcodesByKit('42')
self.assertEqual(res, [])
def test_checkPrintResults(self):
obs = self.ag_data.checkPrintResults('tst_oasoR')
self.assertFalse(obs)
obs = self.ag_data.checkPrintResults('tst_TMYwD')
self.assertTrue(obs)
def test_checkPrintResults_invalid_ids(self):
obs = self.ag_data.checkPrintResults('xxx00112333123---123222')
self.assertFalse(obs)
obs = self.ag_data.checkPrintResults(':Lfoo:Lbar:Lbaz:Ospam:Leggs')
self.assertFalse(obs)
def test_get_user_for_kit(self):
obs = self.ag_data.get_user_for_kit('tst_IueFX')
self.assertEqual('ded5101d-c8e3-f6b3-e040-8a80115d6f03', obs)
obs = self.ag_data.get_user_for_kit('tst_esABz')
self.assertEqual('d8592c74-8421-2135-e040-8a80115d6401', obs)
def test_get_user_for_kit_errors(self):
with self.assertRaises(ValueError):
self.ag_data.get_user_for_kit('the_fooster')
with self.assertRaises(ValueError):
self.ag_data.get_user_for_kit('tst_esXXX')
def test_get_menu_items(self):
obs = self.ag_data.get_menu_items('tst_pDWcB')
self.assertEqual(({}, {}, [], True), obs)
obs = self.ag_data.get_menu_items('tst_VpQsT')
self.assertEqual(({'REMOVED-0': []}, {}, [], True), obs)
def test_get_menu_items_errors(self):
with self.assertRaises(ValueError):
self.ag_data.get_menu_items('tst_esXXX')
def test_check_if_consent_exists(self):
obs = self.ag_data.check_if_consent_exists(
'00711b0a-67d6-0fed-e050-8a800c5d7570', 'REMOVED-42')
self.assertTrue(obs)
def test_check_if_consent_exists_non_existent_user(self):
obs = self.ag_data.check_if_consent_exists(
'00711b0a-67d6-0fed-e050-8a800c5d7570', 'REMOVED-111')
self.assertFalse(obs)
def test_get_user_info(self):
obs = self.ag_data.get_user_info('tst_wAhSB')
exp = {'address': 'REMOVED', 'ag_login_id':
'd8592c74-84a5-2135-e040-8a80115d6401', 'city': 'REMOVED',
'country': 'REMOVED', 'email': 'REMOVED', 'name': 'REMOVED',
'state': 'REMOVED', 'zip': 'REMOVED'}
self.assertEqual(exp, obs)
def test_get_user_info_non_existent(self):
with self.assertRaises(ValueError):
self.ag_data.get_user_info('tst_XX1123')
def test_get_barcode_results(self):
obs = self.ag_data.get_barcode_results('tst_yCzro')
exp = [{'barcode': '000016704', 'participant_name': 'REMOVED-0'},
{'barcode': '000016705', 'participant_name': 'REMOVED-0'},
{'barcode': '000016706', 'participant_name': 'REMOVED-0'},
{'barcode': '000016707', 'participant_name': 'REMOVED-0'},
{'barcode': '000016708', 'participant_name': 'REMOVED-0'},
{'barcode': '000016709', 'participant_name': 'REMOVED-0'},
{'barcode': '000016710', 'participant_name': 'REMOVED-0'},
{'barcode': '000016711', 'participant_name': 'REMOVED-0'},
{'barcode': '000016712', 'participant_name': 'REMOVED-0'},
{'barcode': '000016713', 'participant_name': 'REMOVED-0'},
{'barcode': '000004213', 'participant_name': 'REMOVED-0'},
{'barcode': '000004214', 'participant_name': 'REMOVED-0'},
{'barcode': '000004215', 'participant_name': 'REMOVED-0'},
{'barcode': '000004216', 'participant_name': 'REMOVED-0'},
{'barcode': '000004218', 'participant_name': 'REMOVED-0'},
{'barcode': '000004219', 'participant_name': 'REMOVED-0'}]
self.assertItemsEqual(obs, exp)
def test_get_barcode_results_non_existant_id(self):
with self.assertRaises(ValueError):
self.ag_data.get_barcode_results("something that doesn't exist")
def test_get_login_info(self):
id_ = 'fecebeae-4244-2d78-e040-8a800c5d4f50'
exp = [{'ag_login_id': id_,
'email': 'REMOVED',
'name': 'REMOVED',
'address': 'REMOVED',
'city': 'REMOVED',
'state': 'REMOVED',
'zip': 'REMOVED',
'country': 'REMOVED'}]
obs = self.ag_data.get_login_info(id_)
self.assertEqual(obs, exp)
def test_get_login_info_non_existant_id(self):
id_ = '00000000-0000-0000-0000-000000000000'
with self.assertRaises(ValueError):
self.ag_data.get_login_info(id_)
def test_get_survey_id(self):
id_ = '8ca47059-000a-469f-aa64-ff1afbd6fcb1'
obs = self.ag_data.get_survey_id(id_, 'REMOVED-0')
self.assertEquals(obs, 'd08758a1510256f0')
def test_get_survey_id_non_existant_id(self):
id_ = '00000000-0000-0000-0000-000000000000'
with self.assertRaises(ValueError):
self.ag_data.get_survey_id(id_, 'REMOVED')
def test_get_countries(self):
obs = self.ag_data.get_countries()
# Make sure is a list with proper length
self.assertTrue(isinstance(obs, list))
self.assertEqual(len(obs), 244)
# Spot check a few countries
self.assertIn('United States', obs)
self.assertIn('United Kingdom', obs)
def test_is_deposited_ebi(self):
obs = self.ag_data.is_deposited_ebi('000027262')
self.assertFalse(obs)
def test_is_deposited_ebi_bad_barcode(self):
with self.assertRaises(ValueError):
self.ag_data.is_deposited_ebi('NOTABARCODE')
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_felix
~~~~~~~~~~~
Top level tests for Felix.
"""
import logging
import mock
import socket
import sys
import time
import unittest
import uuid
import calico.felix.futils as futils
# Import our stub utils module which replaces time etc.
import calico.felix.test.stub_utils as stub_utils
# Replace zmq with our stub zmq.
import calico.felix.test.stub_zmq as stub_zmq
from calico.felix.test.stub_zmq import (TYPE_EP_REQ, TYPE_EP_REP,
TYPE_ACL_REQ, TYPE_ACL_SUB)
sys.modules['zmq'] = stub_zmq
# Hide iptc, since we do not have it.
sys.modules['iptc'] = __import__('calico.felix.test.stub_empty')
# Replace calico.felix.fiptables with calico.felix.test.stub_fiptables
import calico.felix.test.stub_fiptables
sys.modules['calico.felix.fiptables'] = __import__('calico.felix.test.stub_fiptables')
calico.felix.fiptables = calico.felix.test.stub_fiptables
stub_fiptables = calico.felix.test.stub_fiptables
#*****************************************************************************#
#* Load calico.felix.devices and calico.felix.test.stub_devices, and the *#
#* same for ipsets; we do not blindly override as we need to avoid getting *#
#* into a state where tests of these modules cannot be made to work. *#
#*****************************************************************************#
import calico.felix.devices
import calico.felix.test.stub_devices as stub_devices
import calico.felix.ipsets
import calico.felix.test.stub_ipsets as stub_ipsets
# Now import felix, and away we go.
import calico.felix.felix as felix
import calico.felix.endpoint as endpoint
import calico.felix.frules as frules
import calico.common as common
from calico.felix.futils import IPV4, IPV6
from calico.felix.endpoint import Endpoint
# IPtables state.
expected_iptables = stub_fiptables.TableState()
expected_ipsets = stub_ipsets.IpsetState()
# Default config path.
config_path = "calico/felix/test/data/felix_debug.cfg"
# Logger
log = logging.getLogger(__name__)
class TestBasic(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Completely replace the devices and ipsets modules.
cls.real_devices = calico.felix.devices
endpoint.devices = stub_devices
cls.real_ipsets = calico.felix.ipsets
frules.ipsets = stub_ipsets
@classmethod
def tearDownClass(cls):
# Reinstate the modules we overwrote
endpoint.devices = cls.real_devices
frules.ipsets = cls.real_ipsets
def create_patch(self, name):
return thing
def setUp(self):
# Mock out time
patcher = mock.patch('calico.felix.futils.time_ms')
patcher.start().side_effect = stub_utils.get_time
self.addCleanup(patcher.stop)
stub_utils.set_time(0)
stub_fiptables.reset_current_state()
stub_devices.reset()
stub_ipsets.reset()
expected_iptables.reset()
expected_ipsets.reset()
def tearDown(self):
pass
def test_startup(self):
common.default_logging()
context = stub_zmq.Context()
agent = felix.FelixAgent(config_path, context)
set_expected_global_rules()
stub_fiptables.check_state(expected_iptables)
stub_ipsets.check_state(expected_ipsets)
self.assertEqual(agent.hostname, "test_hostname")
def test_no_work(self):
"""
Test starting up, and sending no work at all.
"""
common.default_logging()
context = stub_zmq.Context()
agent = felix.FelixAgent(config_path, context)
context.add_poll_result(0)
agent.run()
set_expected_global_rules()
stub_fiptables.check_state(expected_iptables)
stub_ipsets.check_state(expected_ipsets)
def test_main_flow(self):
"""
Test starting up and going through some of the basic flow.
"""
common.default_logging()
context = stub_zmq.Context()
agent = felix.FelixAgent(config_path, context)
context.add_poll_result(0)
agent.run()
# Now we want to reply to the RESYNC request.
resync_req = context.sent_data[TYPE_EP_REQ].pop()
log.debug("Resync request : %s" % resync_req)
self.assertFalse(context.sent_data_present())
resync_id = resync_req['resync_id']
resync_rsp = { 'type': "RESYNCSTATE",
'endpoint_count': 1,
'rc': "SUCCESS",
'message': "hello" }
poll_result = context.add_poll_result(50)
poll_result.add(TYPE_EP_REQ, resync_rsp)
agent.run()
# Felix expects one endpoint created message - give it what it wants
endpoint_id = str(uuid.uuid4())
log.debug("Build first endpoint created : %s" % endpoint_id)
mac = stub_utils.get_mac()
suffix = endpoint_id[:11]
tap = "tap" + suffix
addr = '1.2.3.4'
endpoint_created_req = { 'type': "ENDPOINTCREATED",
'endpoint_id': endpoint_id,
'resync_id': resync_id,
'issued': futils.time_ms(),
'mac': mac,
'state': Endpoint.STATE_ENABLED,
'addrs': [ {'gateway': "1.2.3.1", 'addr': addr} ] }
poll_result = context.add_poll_result(100)
poll_result.add(TYPE_EP_REP, endpoint_created_req)
agent.run()
log.debug("Create tap interface %s" % tap)
tap_obj = stub_devices.TapInterface(tap)
stub_devices.add_tap(tap_obj)
poll_result = context.add_poll_result(150)
agent.run()
#*********************************************************************#
#* As soon as that endpoint has been made to exist, we should see an *#
#* ACL request coming through, and a response to the endpoint *#
#* created. We send a reply to that now. *#
#*********************************************************************#
endpoint_created_rsp = context.sent_data[TYPE_EP_REP].pop()
self.assertEqual(endpoint_created_rsp['rc'], "SUCCESS")
acl_req = context.sent_data[TYPE_ACL_REQ].pop()
self.assertFalse(context.sent_data_present())
self.assertEqual(acl_req['endpoint_id'], endpoint_id)
acl_rsp = { 'type': "GETACLSTATE",
'rc': "SUCCESS",
'message': "" }
poll_result = context.add_poll_result(200)
poll_result.add(TYPE_ACL_REQ, acl_rsp)
# Check the rules are what we expect.
set_expected_global_rules()
add_endpoint_rules(suffix, tap, addr, None, mac)
stub_fiptables.check_state(expected_iptables)
add_endpoint_ipsets(suffix)
stub_ipsets.check_state(expected_ipsets)
# OK - now try giving it some ACLs, and see if they get applied correctly.
acls = get_blank_acls()
acls['v4']['outbound'].append({ 'cidr': "0.0.0.0/0", 'protocol': "icmp" })
acls['v4']['outbound'].append({ 'cidr': "1.2.3.0/24", 'protocol': "tcp" })
acls['v4']['outbound'].append({ 'cidr': "0.0.0.0/0", 'protocol': "tcp", 'port': "80" })
acls['v4']['inbound'].append({ 'cidr': "1.2.2.0/24", 'protocol': "icmp" })
acls['v4']['inbound'].append({ 'cidr': "0.0.0.0/0", 'protocol': "tcp", 'port': "8080" })
acls['v4']['inbound'].append({ 'cidr': "2.4.6.8/32", 'protocol': "udp", 'port': "8080" })
acls['v4']['inbound'].append({ 'cidr': "1.2.3.3/32" })
acls['v4']['inbound'].append({ 'cidr': "3.6.9.12/32",
'protocol': "tcp",
'port': ['10', '50'] })
acls['v4']['inbound'].append({ 'cidr': "5.4.3.2/32",
'protocol': "icmp",
'icmp_type': "3",
'icmp_code': "2" })
acls['v4']['inbound'].append({ 'cidr': "5.4.3.2/32",
'protocol': "icmp",
'icmp_type': "9" })
acls['v4']['inbound'].append({ 'cidr': "5.4.3.2/32",
'protocol': "icmp",
'icmp_type': "blah" })
# We include a couple of invalid rules that Felix will just ignore (and log).
acls['v4']['inbound'].append({ 'cidr': "4.3.2.1/32",
'protocol': "tcp",
'port': ['blah', 'blah'] })
acls['v4']['inbound'].append({ 'cidr': "4.3.2.1/32",
'protocol': "tcp",
'port': ['1', '2', '3'] })
acls['v4']['inbound'].append({ 'cidr': "4.3.2.1/32",
'protocol': "tcp",
'port': 'flibble' })
acls['v4']['inbound'].append({ 'protocol': "tcp" })
acls['v4']['inbound'].append({ 'cidr': "4.3.2.1/32",
'port': "123" })
acls['v4']['inbound'].append({ 'cidr': "4.3.2.1/32",
'protocol': "icmp",
'icmp_code': "blah" })
acls['v4']['inbound'].append({ 'cidr': "4.3.2.1/32",
'protocol': "icmp",
'port': "1" })
acls['v4']['inbound'].append({ 'cidr': "4.3.2.1/32",
'protocol': "rsvp",
'port': "1" })
acl_req = { 'type': "ACLUPDATE",
'acls': acls }
poll_result.add(TYPE_ACL_SUB, acl_req, endpoint_id)
agent.run()
stub_fiptables.check_state(expected_iptables)
expected_ipsets.add("felix-from-icmp-" + suffix, "0.0.0.0/1")
expected_ipsets.add("felix-from-icmp-" + suffix, "128.0.0.0/1")
expected_ipsets.add("felix-from-port-" + suffix, "1.2.3.0/24,tcp:0")
expected_ipsets.add("felix-from-port-" + suffix, "0.0.0.0/1,tcp:80")
expected_ipsets.add("felix-from-port-" + suffix, "128.0.0.0/1,tcp:80")
expected_ipsets.add("felix-to-icmp-" + suffix, "1.2.2.0/24")
expected_ipsets.add("felix-to-port-" + suffix, "0.0.0.0/1,tcp:8080")
expected_ipsets.add("felix-to-port-" + suffix, "128.0.0.0/1,tcp:8080")
expected_ipsets.add("felix-to-port-" + suffix, "2.4.6.8/32,udp:8080")
expected_ipsets.add("felix-to-addr-" + suffix, "1.2.3.3/32")
expected_ipsets.add("felix-to-port-" + suffix, "3.6.9.12/32,tcp:10-50")
expected_ipsets.add("felix-to-port-" + suffix, "5.4.3.2/32,icmp:3/2")
expected_ipsets.add("felix-to-port-" + suffix, "5.4.3.2/32,icmp:9/0")
expected_ipsets.add("felix-to-port-" + suffix, "5.4.3.2/32,icmp:blah")
stub_ipsets.check_state(expected_ipsets)
# Add another endpoint, and check the state.
endpoint_id2 = str(uuid.uuid4())
log.debug("Build second endpoint created : %s" % endpoint_id2)
mac2 = stub_utils.get_mac()
suffix2 = endpoint_id2[:11]
tap2 = "tap" + suffix2
addr2 = '1.2.3.5'
endpoint_created_req = { 'type': "ENDPOINTCREATED",
'endpoint_id': endpoint_id2,
'issued': futils.time_ms(),
'mac': mac2,
'state': Endpoint.STATE_ENABLED,
'addrs': [ {'gateway': "1.2.3.1", 'addr': addr2} ] }
poll_result = context.add_poll_result(250)
poll_result.add(TYPE_EP_REP, endpoint_created_req)
tap_obj2 = stub_devices.TapInterface(tap2)
stub_devices.add_tap(tap_obj2)
agent.run()
# Check that we got what we expected - i.e. a success response, a GETACLSTATE,
# and the rules in the right state.
endpoint_created_rsp = context.sent_data[TYPE_EP_REP].pop()
self.assertEqual(endpoint_created_rsp['rc'], "SUCCESS")
acl_req = context.sent_data[TYPE_ACL_REQ].pop()
self.assertEqual(acl_req['endpoint_id'], endpoint_id2)
self.assertFalse(context.sent_data_present())
add_endpoint_rules(suffix2, tap2, addr2, None, mac2)
stub_fiptables.check_state(expected_iptables)
add_endpoint_ipsets(suffix2)
stub_ipsets.check_state(expected_ipsets)
# OK, finally wind down with an ENDPOINTDESTROYED message for that second endpoint.
endpoint_destroyed_req = { 'type': "ENDPOINTDESTROYED",
'endpoint_id': endpoint_id2,
'issued': futils.time_ms() }
poll_result = context.add_poll_result(300)
poll_result.add(TYPE_EP_REP, endpoint_destroyed_req)
stub_devices.del_tap(tap2)
agent.run()
# Rebuild and recheck the state.
set_expected_global_rules()
add_endpoint_rules(suffix, tap, addr, None, mac)
stub_fiptables.check_state(expected_iptables)
def test_rule_reordering(self):
# TODO: Want to check that with extra rules, the extras get tidied up.
pass
def test_ipv6_reordering(self):
# TODO: Want to test IP v6 addresses and rules too.
pass
class TestTimings(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Completely replace the devices and ipsets modules.
cls.real_devices = calico.felix.devices
endpoint.devices = stub_devices
cls.real_ipsets = calico.felix.ipsets
frules.ipsets = stub_ipsets
@classmethod
def tearDownClass(cls):
# Reinstate the modules we overwrote
endpoint.devices = cls.real_devices
frules.ipsets = cls.real_ipsets
def setUp(self):
# Mock out time
patcher = mock.patch('calico.felix.futils.time_ms')
patcher.start().side_effect = stub_utils.get_time
self.addCleanup(patcher.stop)
stub_utils.set_time(0)
stub_fiptables.reset_current_state()
stub_devices.reset()
stub_ipsets.reset()
expected_iptables.reset()
expected_ipsets.reset()
def tearDown(self):
pass
def test_resync(self):
"""
Test the resync flows.
"""
common.default_logging()
context = stub_zmq.Context()
agent = felix.FelixAgent(config_path, context)
#*********************************************************************#
#* Set the resync timeout to 5 seconds, and the KEEPALIVE timeout to *#
#* much more. *#
#*********************************************************************#
agent.config.RESYNC_INT_SEC = 5
agent.config.CONN_TIMEOUT_MS = 50000
agent.config.CONN_KEEPALIVE_MS = 50000
# Get started.
context.add_poll_result(0)
agent.run()
# Now we should have got a resync request.
resync_req = context.sent_data[TYPE_EP_REQ].pop()
log.debug("Resync request : %s" % resync_req)
self.assertFalse(context.sent_data_present())
resync_id = resync_req['resync_id']
resync_rsp = { 'type': "RESYNCSTATE",
'endpoint_count': "0",
'rc': "SUCCESS",
'message': "hello" }
poll_result = context.add_poll_result(1000)
poll_result.add(TYPE_EP_REQ, resync_rsp)
agent.run()
# nothing yet
self.assertFalse(context.sent_data_present())
poll_result = context.add_poll_result(5999)
agent.run()
# nothing yet - 4999 ms since last request
self.assertFalse(context.sent_data_present())
poll_result = context.add_poll_result(6001)
agent.run()
# We should have got another resync request.
resync_req = context.sent_data[TYPE_EP_REQ].pop()
log.debug("Resync request : %s" % resync_req)
self.assertFalse(context.sent_data_present())
resync_id = resync_req['resync_id']
resync_rsp = { 'type': "RESYNCSTATE",
'endpoint_count': "2",
'rc': "SUCCESS",
'message': "hello" }
# No more resyncs until enough data has arrived.
poll_result = context.add_poll_result(15000)
poll_result.add(TYPE_EP_REQ, resync_rsp)
agent.run()
self.assertFalse(context.sent_data_present())
# Send an endpoint created message to Felix.
endpoint_id = str(uuid.uuid4())
log.debug("Build first endpoint created : %s" % endpoint_id)
mac = stub_utils.get_mac()
suffix = endpoint_id[:11]
tap = "tap" + suffix
addr = '1.2.3.4'
endpoint_created_req = { 'type': "ENDPOINTCREATED",
'endpoint_id': endpoint_id,
'resync_id': resync_id,
'issued': futils.time_ms(),
'mac': mac,
'state': Endpoint.STATE_ENABLED,
'addrs': [ {'gateway': "1.2.3.1", 'addr': addr} ] }
poll_result = context.add_poll_result(15001)
poll_result.add(TYPE_EP_REP, endpoint_created_req)
agent.run()
# We stop using sent_data_present, since there are ACL requests around.
endpoint_created_rsp = context.sent_data[TYPE_EP_REP].pop()
self.assertEqual(endpoint_created_rsp['rc'], "SUCCESS")
self.assertFalse(context.sent_data[TYPE_EP_REQ])
# Send a second endpoint created message to Felix - triggers another resync.
endpoint_id = str(uuid.uuid4())
log.debug("Build second endpoint created : %s" % endpoint_id)
mac = stub_utils.get_mac()
suffix = endpoint_id[:11]
tap = "tap" + suffix
addr = '1.2.3.5'
endpoint_created_req = { 'type': "ENDPOINTCREATED",
'endpoint_id': endpoint_id,
'resync_id': resync_id,
'issued': futils.time_ms(),
'mac': mac,
'state': Endpoint.STATE_ENABLED,
'addrs': [ {'gateway': "1.2.3.1", 'addr': addr} ] }
poll_result = context.add_poll_result(15002)
poll_result.add(TYPE_EP_REP, endpoint_created_req)
agent.run()
endpoint_created_rsp = context.sent_data[TYPE_EP_REP].pop()
self.assertEqual(endpoint_created_rsp['rc'], "SUCCESS")
self.assertFalse(context.sent_data[TYPE_EP_REQ])
# No more resyncs until enough 5000 ms after last rsp.
poll_result = context.add_poll_result(20000)
poll_result.add(TYPE_EP_REQ, resync_rsp)
agent.run()
self.assertFalse(context.sent_data[TYPE_EP_REQ])
# We should have got another resync request.
poll_result = context.add_poll_result(20003)
poll_result.add(TYPE_EP_REP, endpoint_created_req)
agent.run()
resync_req = context.sent_data[TYPE_EP_REQ].pop()
log.debug("Resync request : %s" % resync_req)
self.assertFalse(context.sent_data[TYPE_EP_REQ])
def get_blank_acls():
"""
Return a blank set of ACLs, with nothing permitted.
"""
acls = {}
acls['v4'] = {}
acls['v6'] = {}
acls['v4']['inbound_default'] = "deny"
acls['v4']['outbound_default'] = "deny"
acls['v4']['inbound'] = []
acls['v4']['outbound'] = []
acls['v6']['inbound_default'] = "deny"
acls['v6']['outbound_default'] = "deny"
acls['v6']['inbound'] = []
acls['v6']['outbound'] = []
return acls
def set_expected_global_rules():
"""
Sets up the minimal global rules we expect to have.
"""
expected_iptables.reset()
table = expected_iptables.tables_v4["filter"]
stub_fiptables.get_chain(table, "felix-TO-ENDPOINT")
stub_fiptables.get_chain(table, "felix-FROM-ENDPOINT")
stub_fiptables.get_chain(table, "felix-FORWARD")
stub_fiptables.get_chain(table, "felix-INPUT")
chain = table._chains_dict["FORWARD"]
chain.rules.append(stub_fiptables.Rule(IPV4, "felix-FORWARD"))
chain = table._chains_dict["INPUT"]
chain.rules.append(stub_fiptables.Rule(IPV4, "felix-INPUT"))
chain = table._chains_dict["felix-FORWARD"]
rule = stub_fiptables.Rule(type, "felix-FROM-ENDPOINT")
rule.in_interface = "tap+"
chain.rules.append(rule)
rule = stub_fiptables.Rule(type, "felix-TO-ENDPOINT")
rule.out_interface = "tap+"
chain.rules.append(rule)
rule = stub_fiptables.Rule(type, "ACCEPT")
rule.in_interface = "tap+"
chain.rules.append(rule)
rule = stub_fiptables.Rule(type, "ACCEPT")
rule.out_interface = "tap+"
chain.rules.append(rule)
chain = table._chains_dict["felix-INPUT"]
rule = stub_fiptables.Rule(type, "felix-FROM-ENDPOINT")
rule.in_interface = "tap+"
chain.rules.append(rule)
rule = stub_fiptables.Rule(type, "ACCEPT")
rule.in_interface = "tap+"
chain.rules.append(rule)
table = expected_iptables.tables_v4["nat"]
chain = table._chains_dict["PREROUTING"]
chain.rules.append(stub_fiptables.Rule(IPV4, "felix-PREROUTING"))
chain = stub_fiptables.get_chain(table, "felix-PREROUTING")
rule = stub_fiptables.Rule(IPV4)
rule.protocol = "tcp"
rule.create_tcp_match("80")
rule.create_target("DNAT", {'to_destination': '127.0.0.1:9697'})
chain.rules.append(rule)
table = expected_iptables.tables_v6["filter"]
stub_fiptables.get_chain(table, "felix-TO-ENDPOINT")
stub_fiptables.get_chain(table, "felix-FROM-ENDPOINT")
stub_fiptables.get_chain(table, "felix-FORWARD")
stub_fiptables.get_chain(table, "felix-INPUT")
chain = table._chains_dict["FORWARD"]
chain.rules.append(stub_fiptables.Rule(IPV6, "felix-FORWARD"))
chain = table._chains_dict["INPUT"]
chain.rules.append(stub_fiptables.Rule(IPV6, "felix-INPUT"))
chain = table._chains_dict["felix-FORWARD"]
rule = stub_fiptables.Rule(type, "felix-FROM-ENDPOINT")
rule.in_interface = "tap+"
chain.rules.append(rule)
rule = stub_fiptables.Rule(type, "felix-TO-ENDPOINT")
rule.out_interface = "tap+"
chain.rules.append(rule)
rule = stub_fiptables.Rule(type, "ACCEPT")
rule.in_interface = "tap+"
chain.rules.append(rule)
rule = stub_fiptables.Rule(type, "ACCEPT")
rule.out_interface = "tap+"
chain.rules.append(rule)
chain = table._chains_dict["felix-INPUT"]
rule = stub_fiptables.Rule(type, "felix-FROM-ENDPOINT")
rule.in_interface = "tap+"
chain.rules.append(rule)
rule = stub_fiptables.Rule(type, "ACCEPT")
rule.in_interface = "tap+"
chain.rules.append(rule)
def add_endpoint_rules(suffix, tap, ipv4, ipv6, mac):
"""
This adds the rules for an endpoint, appending to the end. This generates
a clean state to allow us to test that the state is correct, even after
it starts with extra rules etc.
"""
table = expected_iptables.tables_v4["filter"]
chain = table._chains_dict["felix-FROM-ENDPOINT"]
rule = stub_fiptables.Rule(IPV4, "felix-from-%s" % suffix)
rule.in_interface = tap
chain.rules.append(rule)
chain = table._chains_dict["felix-TO-ENDPOINT"]
rule = stub_fiptables.Rule(IPV4, "felix-to-%s" % suffix)
rule.out_interface = tap
chain.rules.append(rule)
chain = stub_fiptables.get_chain(table, "felix-from-%s" % suffix)
rule = stub_fiptables.Rule(IPV4, "DROP")
rule.create_conntrack_match(["INVALID"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "RETURN")
rule.create_conntrack_match(["RELATED,ESTABLISHED"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "RETURN")
rule.protocol = "udp"
rule.create_udp_match("68", "67")
chain.rules.append(rule)
if ipv4 is not None:
rule = stub_fiptables.Rule(IPV4)
rule.create_target("MARK", {"set_mark": "1"})
rule.src = ipv4
rule.create_mac_match(mac)
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "DROP")
rule.create_mark_match("!1")
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "RETURN")
rule.create_set_match(["felix-from-port-%s" % suffix, "dst,dst"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "RETURN")
rule.create_set_match(["felix-from-addr-%s" % suffix, "dst"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "RETURN")
rule.protocol = "icmp"
rule.create_set_match(["felix-from-icmp-%s" % suffix, "dst"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "DROP")
chain.rules.append(rule)
chain = stub_fiptables.get_chain(table, "felix-to-%s" % suffix)
rule = stub_fiptables.Rule(IPV4, "DROP")
rule.create_conntrack_match(["INVALID"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "RETURN")
rule.create_conntrack_match(["RELATED,ESTABLISHED"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "RETURN")
rule.create_set_match(["felix-to-port-%s" % suffix, "src,dst"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "RETURN")
rule.create_set_match(["felix-to-addr-%s" % suffix, "src"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "RETURN")
rule.protocol = "icmp"
rule.create_set_match(["felix-to-icmp-%s" % suffix, "src"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV4, "DROP")
chain.rules.append(rule)
table = expected_iptables.tables_v6["filter"]
chain = table._chains_dict["felix-FROM-ENDPOINT"]
rule = stub_fiptables.Rule(IPV6, "felix-from-%s" % suffix)
rule.in_interface = tap
chain.rules.append(rule)
chain = table._chains_dict["felix-TO-ENDPOINT"]
rule = stub_fiptables.Rule(IPV6, "felix-to-%s" % suffix)
rule.out_interface = tap
chain.rules.append(rule)
chain = stub_fiptables.get_chain(table, "felix-from-%s" % suffix)
rule = stub_fiptables.Rule(type, "RETURN")
rule.protocol = "icmpv6"
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "DROP")
rule.create_conntrack_match(["INVALID"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "RETURN")
rule.create_conntrack_match(["RELATED,ESTABLISHED"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "RETURN")
rule.protocol = "udp"
rule.create_udp_match("546", "547")
chain.rules.append(rule)
if ipv6 is not None:
rule = stub_fiptables.Rule(IPV6)
rule.create_target("MARK", {"set_mark": "1"})
rule.src = ipv6
rule.create_mac_match(mac)
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "DROP")
rule.create_mark_match("!1")
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "RETURN")
rule.create_set_match(["felix-6-from-port-%s" % suffix, "dst,dst"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "RETURN")
rule.create_set_match(["felix-6-from-addr-%s" % suffix, "dst"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "RETURN")
rule.protocol = "icmpv6"
rule.create_set_match(["felix-6-from-icmp-%s" % suffix, "dst"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "DROP")
chain.rules.append(rule)
chain = stub_fiptables.get_chain(table, "felix-to-%s" % suffix)
for icmp in ["130", "131", "132", "134", "135", "136"]:
rule = stub_fiptables.Rule(futils.IPV6, "RETURN")
rule.protocol = "icmpv6"
rule.create_icmp6_match([icmp])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "DROP")
rule.create_conntrack_match(["INVALID"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "RETURN")
rule.create_conntrack_match(["RELATED,ESTABLISHED"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "RETURN")
rule.create_set_match(["felix-6-to-port-%s" % suffix, "src,dst"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "RETURN")
rule.create_set_match(["felix-6-to-addr-%s" % suffix, "src"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "RETURN")
rule.protocol = "icmpv6"
rule.create_set_match(["felix-6-to-icmp-%s" % suffix, "src"])
chain.rules.append(rule)
rule = stub_fiptables.Rule(IPV6, "DROP")
chain.rules.append(rule)
def add_endpoint_ipsets(suffix):
"""
Sets up the ipsets for a given endpoint. Actual entries in these endpoints
must then be added manually.
"""
# Create ipsets if they do not already exist.
expected_ipsets.create("felix-to-port-" + suffix, "hash:net,port", "inet")
expected_ipsets.create("felix-to-addr-" + suffix, "hash:net", "inet")
expected_ipsets.create("felix-to-icmp-" + suffix, "hash:net", "inet")
expected_ipsets.create("felix-from-port-" + suffix, "hash:net,port", "inet")
expected_ipsets.create("felix-from-addr-" + suffix, "hash:net", "inet")
expected_ipsets.create("felix-from-icmp-" + suffix, "hash:net", "inet")
expected_ipsets.create("felix-6-to-port-" + suffix, "hash:net,port", "inet6")
expected_ipsets.create("felix-6-to-addr-" + suffix, "hash:net", "inet6")
expected_ipsets.create("felix-6-to-icmp-" + suffix, "hash:net", "inet6")
expected_ipsets.create("felix-6-from-port-" + suffix, "hash:net,port", "inet6")
expected_ipsets.create("felix-6-from-addr-" + suffix, "hash:net", "inet6")
expected_ipsets.create("felix-6-from-icmp-" + suffix, "hash:net", "inet6")
|
|
# -*- coding: utf-8 -*-
"""
NEEDS CLEANUP SO IT EITHER DOES THE IMPORTS OR GENERATES THE FILE
python -c "import utool"
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import multiprocessing
import textwrap
#import types
#DEBUG_IMPORTS = '--debug-imports' in sys.argv
#----------
# EXECUTORS
#----------
def __excecute_imports(module, modname, imports, verbose=False):
""" Module Imports """
# level: -1 is a the Python2 import strategy
# level: 0 is a the Python3 absolute import
if verbose:
print('[UTIL_IMPORT] EXECUTING %d IMPORT TUPLES' % (len(imports),))
level = 0
for name in imports:
if level == -1:
tmp = __import__(name, globals(), locals(), fromlist=[], level=level)
elif level == 0:
# FIXME: should support unicode. Maybe just a python2 thing
tmp = __import__(modname, globals(), locals(), fromlist=[str(name)], level=level)
def __execute_fromimport(module, modname, import_tuples, verbose=False):
""" Module From Imports """
if verbose:
print('[UTIL_IMPORT] EXECUTING %d FROM IMPORT TUPLES' % (len(import_tuples),))
from_imports = __get_from_imports(import_tuples)
for name, fromlist in from_imports:
full_modname = '.'.join((modname, name))
tmp = __import__(full_modname, globals(), locals(), fromlist=fromlist, level=0)
for attrname in fromlist:
setattr(module, attrname, getattr(tmp, attrname))
return from_imports
def __execute_fromimport_star(module, modname, import_tuples, ignore_list=[],
ignore_startswith=[], ignore_endswith=[],
check_not_imported=True, verbose=False,
veryverbose=False):
r"""
Effectively import * statements
The dynamic_import must happen before any * imports otherwise it wont catch
anything.
Ignore:
ignore_startswith = []
ignore_endswith = []
check_not_imported = False
verbose = True
veryverbose = True
"""
if verbose:
print('[UTIL_IMPORT] EXECUTE %d FROMIMPORT STAR TUPLES.' % (len(import_tuples),))
from_imports = []
# Explicitly ignore these special functions (usually stdlib functions)
ignoreset = set(['print', 'print_', 'printDBG', 'rrr', 'profile',
'print_function', 'absolute_import', 'division', 'zip',
'map', 'range', 'list', 'zip_longest', 'filter', 'filterfalse',
'dirname', 'realpath', 'join', 'exists', 'normpath',
'splitext', 'expanduser', 'relpath', 'isabs',
'commonprefix', 'basename', 'input', 'reduce',
#'OrderedDict',
#'product',
] + ignore_list)
#'isdir', 'isfile', '
#def is_defined_by_module2(item, module):
# belongs = False
# if hasattr(item, '__module__'):
# belongs = item.__module__ == module.__name__
# elif hasattr(item, 'func_globals'):
# belongs = item.func_globals['__name__'] == module.__name__
# return belongs
for name, fromlist in import_tuples:
#absname = modname + '.' + name
child_module = sys.modules[modname + '.' + name]
# Check if the variable already belongs to the module
varset = set(vars(module)) if check_not_imported else set()
fromset = set(fromlist) if fromlist is not None else set()
def valid_attrname(attrname):
"""
Guess if the attrname is valid based on its name
"""
is_forced = attrname in fromset
is_private = attrname.startswith('_')
is_conflit = attrname in varset
is_module = attrname in sys.modules # Isn't fool proof (next step is)
is_ignore1 = attrname in ignoreset
is_ignore2 = any([attrname.startswith(prefix) for prefix in ignore_startswith])
is_ignore3 = any([attrname.endswith(suffix) for suffix in ignore_endswith])
is_ignore = any((is_ignore1, is_ignore2, is_ignore3))
is_valid = not any((is_ignore, is_private, is_conflit, is_module))
#is_valid = is_valid and is_defined_by_module2(getattr(child_module, attrname), child_module)
return (is_forced or is_valid)
allattrs = dir(child_module)
fromlist_ = [attrname for attrname in allattrs if valid_attrname(attrname)]
#if verbose:
# print('[UTIL_IMPORT] name=%r, len(allattrs)=%d' % (name, len(allattrs)))
#if verbose:
# print('[UTIL_IMPORT] name=%r, len(fromlist_)=%d' % (name, len(fromlist_)))
valid_fromlist_ = []
for attrname in fromlist_:
attrval = getattr(child_module, attrname)
try:
# Disallow fromimport modules
forced = attrname in fromset
if not forced and getattr(attrval, '__name__') in sys.modules:
if veryverbose:
print('[UTIL_IMPORT] not importing: %r' % attrname)
continue
except AttributeError:
pass
if veryverbose:
print('[UTIL_IMPORT] %s is importing: %r' % (modname, attrname))
valid_fromlist_.append(attrname)
setattr(module, attrname, attrval)
if verbose:
print('[UTIL_IMPORT] name=%r, len(valid_fromlist_)=%d' % (name, len(valid_fromlist_)))
from_imports.append((name, valid_fromlist_))
return from_imports
#----------
# PARSERS
#----------
def __get_from_imports(import_tuples):
""" Returns import names and fromlist
import_tuples are specified as
(name, fromlist, ispackage)
"""
from_imports = [(tup[0], tup[1]) for tup in import_tuples
if tup[1] is not None and len(tup[1]) > 0]
return from_imports
#----------
# STRING MAKERS
#----------
def _initstr(modname, imports, from_imports, inject_execstr, withheader=True):
""" Calls the other string makers """
header = _make_module_header() if withheader else ''
import_str = _make_imports_str(imports, modname)
fromimport_str = _make_fromimport_str(from_imports, modname)
initstr = '\n'.join([str_ for str_ in [
header,
import_str,
fromimport_str,
inject_execstr,
] if len(str_) > 0])
return initstr
def _make_module_header():
return '\n'.join([
'# flake8: noqa',
'from __future__ import absolute_import, division, print_function, unicode_literals'])
def _make_imports_str(imports, rootmodname='.'):
imports_fmtstr = 'from {rootmodname} import %s'.format(rootmodname=rootmodname)
return '\n'.join([imports_fmtstr % (name,) for name in imports])
def _make_fromimport_str(from_imports, rootmodname='.'):
from utool import util_str
if rootmodname == '.':
# dot is already taken care of in fmtstr
rootmodname = ''
def _pack_fromimport(tup):
name, fromlist = tup[0], tup[1]
from_module_str = 'from {rootmodname}.{name} import ('.format(rootmodname=rootmodname, name=name)
newline_prefix = (' ' * len(from_module_str))
if len(fromlist) > 0:
rawstr = from_module_str + ', '.join(fromlist) + ',)'
else:
rawstr = ''
# not sure why this isn't 76? >= maybe?
packstr = util_str.pack_into(rawstr, textwidth=75,
newline_prefix=newline_prefix,
break_words=False)
return packstr
from_str = '\n'.join(map(_pack_fromimport, from_imports))
return from_str
def _inject_execstr(modname, import_tuples):
""" Injection and Reload String Defs """
if modname == 'utool':
# Special case import of the util_inject module
injecter = 'util_inject'
injecter_import = ''
else:
# Normal case implicit import of util_inject
injecter_import = 'import utool'
injecter = 'utool'
injectstr_fmt = textwrap.dedent(
r'''
# STARTBLOCK
{injecter_import}
print, rrr, profile = {injecter}.inject2(__name__, '[{modname}]')
def reassign_submodule_attributes(verbose=1):
"""
Updates attributes in the __init__ modules with updated attributes
in the submodules.
"""
import sys
if verbose and '--quiet' not in sys.argv:
print('dev reimport')
# Self import
import {modname}
# Implicit reassignment.
seen_ = set([])
for tup in IMPORT_TUPLES:
if len(tup) > 2 and tup[2]:
continue # dont import package names
submodname, fromimports = tup[0:2]
submod = getattr({modname}, submodname)
for attr in dir(submod):
if attr.startswith('_'):
continue
if attr in seen_:
# This just holds off bad behavior
# but it does mimic normal util_import behavior
# which is good
continue
seen_.add(attr)
setattr({modname}, attr, getattr(submod, attr))
def reload_subs(verbose=1):
""" Reloads {modname} and submodules """
if verbose:
print('Reloading {modname} submodules')
rrr(verbose > 1)
def wrap_fbrrr(mod):
def fbrrr(*args, **kwargs):
""" fallback reload """
if verbose > 0:
print('Auto-reload (using rrr) not setup for mod=%r' % (mod,))
return fbrrr
def get_rrr(mod):
if hasattr(mod, 'rrr'):
return mod.rrr
else:
return wrap_fbrrr(mod)
def get_reload_subs(mod):
return getattr(mod, 'reload_subs', wrap_fbrrr(mod))
{reload_body}
rrr(verbose > 1)
try:
# hackish way of propogating up the new reloaded submodule attributes
reassign_submodule_attributes(verbose=verbose)
except Exception as ex:
print(ex)
rrrr = reload_subs
# ENDBLOCK
''')
injectstr_fmt = injectstr_fmt.replace('# STARTBLOCK', '')
injectstr_fmt = injectstr_fmt.replace('# ENDBLOCK', '')
rrrdir_fmt = ' get_reload_subs({modname})(verbose=verbose)'
rrrfile_fmt = ' get_rrr({modname})(verbose > 1)'
def _reload_command(tup):
if len(tup) > 2 and tup[2] is True:
return rrrdir_fmt.format(modname=tup[0])
else:
return rrrfile_fmt.format(modname=tup[0])
reload_body = '\n'.join(map(_reload_command, import_tuples)).strip()
format_dict = {
'modname': modname,
'reload_body': reload_body,
'injecter': injecter,
'injecter_import': injecter_import,
}
inject_execstr = injectstr_fmt.format(**format_dict).strip()
return inject_execstr
#----------
# PUBLIC FUNCTIONS
#----------
def dynamic_import(modname, import_tuples, developing=True, ignore_froms=[],
dump=False, ignore_startswith=[], ignore_endswith=[],
ignore_list=[], check_not_imported=True, return_initstr=False,
verbose=False):
"""
MAIN ENTRY POINT
Dynamically import listed util libraries and their attributes.
Create reload_subs function.
Using __import__ like this is typically not considered good style However,
it is better than import * and this will generate the good file text that
can be used when the module is 'frozen"
Returns:
str: init_inject_str - by default all imports are executed in this
function and only the remainig code needed to be executed is
returned to define the reload logic.
str, str: init_inject_str, init_str - if return_initstr is True then
also returns init_str defining the from imports.
Ignore:
ignore_startswith = []
ignore_endswith = []
check_not_imported = True
verbose = True
"""
if verbose:
print('[UTIL_IMPORT] Running Dynamic Imports for modname=%r ' % modname)
# Get the module that will be imported into
try:
module = sys.modules[modname]
except:
module = __import__(modname)
# List of modules to be imported
imports = [tup[0] for tup in import_tuples]
# Import the modules
__excecute_imports(module, modname, imports, verbose=verbose)
# If developing do explicit import stars
if developing:
from_imports = __execute_fromimport_star(module, modname, import_tuples,
ignore_list=ignore_list,
ignore_startswith=ignore_startswith,
ignore_endswith=ignore_endswith,
check_not_imported=check_not_imported,
verbose=verbose)
else:
from_imports = __execute_fromimport(module, modname, import_tuples, verbose=verbose)
inject_execstr = _inject_execstr(modname, import_tuples)
# If requested: print what the __init__ module should look like
dump_requested = (('--dump-%s-init' % modname) in sys.argv or
('--print-%s-init' % modname) in sys.argv) or dump
overwrite_requested = ('--update-%s-init' % modname) in sys.argv
if verbose:
print('[UTIL_IMPORT] Finished Dynamic Imports for modname=%r ' % modname)
if dump_requested:
is_main_proc = multiprocessing.current_process().name == 'MainProcess'
if is_main_proc:
from utool import util_str
initstr = _initstr(modname, imports, from_imports, inject_execstr)
print(util_str.indent(initstr))
# Overwrite the __init__.py file with new explicit imports
if overwrite_requested:
"""
SeeAlso:
util_inject.inject_python_code
util_str.replace_between_tags
"""
is_main_proc = multiprocessing.current_process().name == 'MainProcess'
if is_main_proc:
from utool import util_str
from os.path import join, exists
initstr = _initstr(modname, imports, from_imports, inject_execstr, withheader=False)
new_else = util_str.indent(initstr)
#print(new_else)
# Get path to init file so we can overwrite it
init_fpath = join(module.__path__[0], '__init__.py')
print('attempting to update: %r' % init_fpath)
assert exists(init_fpath)
new_lines = []
editing = False
updated = False
#start_tag = '# <AUTOGEN_INIT>'
#end_tag = '# </AUTOGEN_INIT>'
with open(init_fpath, 'r') as file_:
#text = file_.read()
lines = file_.readlines()
for line in lines:
if not editing:
new_lines.append(line)
if line.strip().startswith('# <AUTOGEN_INIT>'):
new_lines.append('\n' + new_else + '\n # </AUTOGEN_INIT>\n')
editing = True
updated = True
if line.strip().startswith('# </AUTOGEN_INIT>'):
editing = False
# TODO:
#new_text = util_str.replace_between_tags(text, new_else, start_tag, end_tag)
if updated:
print('writing updated file: %r' % init_fpath)
new_text = ''.join(new_lines)
with open(init_fpath, 'w') as file_:
file_.write(new_text)
else:
print('no write hook for file: %r' % init_fpath)
if return_initstr:
initstr = _initstr(modname, imports, from_imports, '', withheader=False)
return inject_execstr, initstr
else:
return inject_execstr
def make_initstr(modname, import_tuples, verbose=False):
"""
Just creates the string representation. Does no importing.
"""
imports = [tup[0] for tup in import_tuples]
from_imports = __get_from_imports(import_tuples)
inject_execstr = _inject_execstr(modname, import_tuples)
return _initstr(modname, imports, from_imports, inject_execstr)
def make_import_tuples(module_path, exclude_modnames=[]):
""" Infer the import_tuples from a module_path """
from utool import util_path
kwargs = dict(private=False, full=False)
module_list = util_path.ls_modulefiles(module_path, noext=True, **kwargs)
package_list = util_path.ls_moduledirs(module_path, **kwargs)
exclude_set = set(exclude_modnames)
module_import_tuples = [(modname, None) for modname in module_list
if modname not in exclude_set]
package_import_tuples = [(modname, None, True) for modname in package_list
if modname not in exclude_set]
import_tuples = (module_import_tuples + package_import_tuples)
return import_tuples
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import copy
import functools
import inspect
import logging
from oslo_config import cfg
import pkg_resources
import six
from cinder.openstack.common._i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
deprecated_opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
]
def list_opts():
"""Entry point for oslo.config-generator.
"""
return [(None, copy.deepcopy(deprecated_opts))]
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
LIBERTY = 'L'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
'L': 'Liberty',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
report_deprecated_feature(LOG, msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the oslo-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
report_deprecated_feature(LOG, msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
# Track the messages we have sent already. See
# report_deprecated_feature().
_deprecated_messages_sent = {}
def report_deprecated_feature(logger, msg, *args, **kwargs):
"""Call this function when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
CONF.register_opts(deprecated_opts)
if CONF.fatal_deprecations:
logger.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = _deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
logger.warn(stdmsg, *args, **kwargs)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
from __future__ import absolute_import, division, print_function
import pytest
import os
import numpy as np
import sqlalchemy as sa
from datashape import discover, dshape
import datashape
from into.backends.sql import (dshape_to_table, create_from_datashape,
dshape_to_alchemy)
from into.utils import tmpfile, raises
from into import convert, append, resource, discover, into
def test_resource():
sql = resource('sqlite:///:memory:::mytable',
dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
assert sql.name == 'mytable'
assert isinstance(sql.bind, sa.engine.base.Engine)
assert set([c.name for c in sql.c]) == set(['x', 'y'])
def test_append_and_convert_round_trip():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = sa.Table('bank', metadata,
sa.Column('name', sa.String, primary_key=True),
sa.Column('balance', sa.Integer))
t.create()
data = [('Alice', 1), ('Bob', 2)]
append(t, data)
assert convert(list, t) == data
def test_plus_must_have_text():
with pytest.raises(NotImplementedError):
resource('redshift+://user:pass@host:1234/db')
def test_resource_on_file():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
sql = resource(uri, 'foo', dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
sql = resource(uri + '::' + 'foo', dshape='var * {x: int, y: int}')
assert isinstance(sql, sa.Table)
def test_resource_to_engine():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
r = resource(uri)
assert isinstance(r, sa.engine.Engine)
assert r.dialect.name == 'sqlite'
def test_resource_to_engine_to_create_tables():
with tmpfile('.db') as fn:
uri = 'sqlite:///' + fn
ds = datashape.dshape('{mytable: var * {name: string, amt: int}}')
r = resource(uri, dshape=ds)
assert isinstance(r, sa.engine.Engine)
assert r.dialect.name == 'sqlite'
assert discover(r) == ds
def test_discovery():
assert discover(sa.String()) == datashape.string
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('timestamp', sa.DateTime, primary_key=True))
assert discover(s) == \
dshape('var * {name: ?string, amount: ?int32, timestamp: datetime}')
def test_discovery_numeric_column():
assert discover(sa.String()) == datashape.string
metadata = sa.MetaData()
s = sa.Table('name', metadata,
sa.Column('name', sa.types.NUMERIC),)
assert discover(s)
def test_discover_null_columns():
assert dshape(discover(sa.Column('name', sa.String, nullable=True))) == \
dshape('{name: ?string}')
assert dshape(discover(sa.Column('name', sa.String, nullable=False))) == \
dshape('{name: string}')
def single_table_engine():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
t.create()
return engine, t
def test_select_to_iterator():
engine, t = single_table_engine()
append(t, [('Alice', 100), ('Bob', 200)])
sel = sa.select([t.c.amount + 1])
assert convert(list, sel) == [(101,), (201,)]
assert convert(list, sel, dshape=dshape('var * int')) == [101, 201]
sel2 = sa.select([sa.sql.func.sum(t.c.amount)])
assert convert(int, sel2, dshape=dshape('int')) == 300
sel3 = sa.select([t])
result = convert(list, sel3, dshape=discover(t))
assert type(result[0]) is tuple
def test_discovery_engine():
engine, t = single_table_engine()
assert discover(engine, 'accounts') == discover(t)
assert str(discover(engine)) == str(discover({'accounts': t}))
def test_discovery_metadata():
engine, t = single_table_engine()
metadata = t.metadata
assert str(discover(metadata)) == str(discover({'accounts': t}))
def test_discover_views():
engine, t = single_table_engine()
metadata = t.metadata
with engine.connect() as conn:
conn.execute('''CREATE VIEW myview AS
SELECT name, amount
FROM accounts
WHERE amount > 0''')
assert str(discover(metadata)) == str(discover({'accounts': t, 'myview': t}))
def test_extend_empty():
engine, t = single_table_engine()
assert not convert(list, t)
append(t, [])
assert not convert(list, t)
def test_dshape_to_alchemy():
assert dshape_to_alchemy('string') == sa.Text
assert isinstance(dshape_to_alchemy('string[40]'), sa.String)
assert not isinstance(dshape_to_alchemy('string["ascii"]'), sa.Unicode)
assert isinstance(dshape_to_alchemy('string[40, "U8"]'), sa.Unicode)
assert dshape_to_alchemy('string[40]').length == 40
assert dshape_to_alchemy('float32').precision == 24
assert dshape_to_alchemy('float64').precision == 53
def test_dshape_to_table():
t = dshape_to_table('bank', '{name: string, amount: int}')
assert isinstance(t, sa.Table)
assert t.name == 'bank'
assert [c.name for c in t.c] == ['name', 'amount']
def test_create_from_datashape():
engine = sa.create_engine('sqlite:///:memory:')
ds = dshape('''{bank: var * {name: string, amount: int},
points: var * {x: int, y: int}}''')
engine = create_from_datashape(engine, ds)
assert discover(engine) == ds
def test_into_table_iterator():
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
t = dshape_to_table('points', '{x: int, y: int}', metadata=metadata)
t.create()
data = [(1, 1), (2, 4), (3, 9)]
append(t, data)
assert convert(list, t) == data
t2 = dshape_to_table('points2', '{x: int, y: int}', metadata=metadata)
t2.create()
data2 = [{'x': 1, 'y': 1}, {'x': 2, 'y': 4}, {'x': 3, 'y': 9}]
append(t2, data2)
assert convert(list, t2) == data
def test_sql_field_names_disagree_on_order():
r = resource('sqlite:///:memory:::tb', dshape=dshape('{x: int, y: int}'))
append(r, [(1, 2), (10, 20)], dshape=dshape('{y: int, x: int}'))
assert convert(set, r) == set([(2, 1), (20, 10)])
def test_sql_field_names_disagree_on_names():
r = resource('sqlite:///:memory:::tb', dshape=dshape('{x: int, y: int}'))
assert raises(Exception, lambda: append(r, [(1, 2), (10, 20)],
dshape=dshape('{x: int, z: int}')))
def test_resource_on_dialects():
assert (resource.dispatch('mysql://foo') is
resource.dispatch('mysql+pymysql://foo'))
assert (resource.dispatch('never-before-seen-sql://foo') is
resource.dispatch('mysql://foo'))
@pytest.yield_fixture
def sqlite_file():
try:
yield 'sqlite:///db.db'
finally:
os.remove('db.db')
def test_append_from_select(sqlite_file):
# we can't test in memory here because that creates two independent
# databases
raw = np.array([(200.0, 'Glenn'),
(314.14, 'Hope'),
(235.43, 'Bob')], dtype=[('amount', 'float64'),
('name', 'S5')])
raw2 = np.array([(800.0, 'Joe'),
(914.14, 'Alice'),
(1235.43, 'Ratso')], dtype=[('amount', 'float64'),
('name', 'S5')])
t = into('%s::t' % sqlite_file, raw)
s = into('%s::s' % sqlite_file, raw2)
t = append(t, s.select())
result = into(list, t)
expected = np.concatenate((raw, raw2)).tolist()
assert result == expected
def test_engine_metadata_caching():
with tmpfile('db') as fn:
engine = resource('sqlite:///' + fn)
a = resource('sqlite:///' + fn + '::a', dshape=dshape('var * {x: int}'))
b = resource('sqlite:///' + fn + '::b', dshape=dshape('var * {y: int}'))
assert a.metadata is b.metadata
assert engine is a.bind is b.bind
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'JobServer'
db.create_table(u'porkweb_jobserver', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ipaddr', self.gf('django.db.models.fields.IPAddressField')(max_length=15)),
('port', self.gf('django.db.models.fields.IntegerField')(default=8117)),
('hostname', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('os', self.gf('django.db.models.fields.CharField')(max_length=32)),
('status', self.gf('django.db.models.fields.CharField')(default='Offline', max_length=16)),
('details', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal(u'porkweb', ['JobServer'])
# Adding model 'AttackType'
db.create_table(u'porkweb_attacktype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'porkweb', ['AttackType'])
# Adding model 'Param'
db.create_table(u'porkweb_param', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('value', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal(u'porkweb', ['Param'])
# Adding model 'AttackParam'
db.create_table(u'porkweb_attackparam', (
(u'param_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['porkweb.Param'], unique=True, primary_key=True)),
('attack', self.gf('django.db.models.fields.related.ForeignKey')(related_name='params', to=orm['porkweb.AttackType'])),
))
db.send_create_signal(u'porkweb', ['AttackParam'])
# Adding model 'JobParam'
db.create_table(u'porkweb_jobparam', (
(u'param_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['porkweb.Param'], unique=True, primary_key=True)),
('job', self.gf('django.db.models.fields.related.ForeignKey')(related_name='params', to=orm['porkweb.Job'])),
))
db.send_create_signal(u'porkweb', ['JobParam'])
# Adding model 'HashType'
db.create_table(u'porkweb_hashtype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=32)),
('hashcatType', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('hashcat', self.gf('django.db.models.fields.BooleanField')(default=False)),
('ocllite', self.gf('django.db.models.fields.BooleanField')(default=False)),
('oclplus', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'porkweb', ['HashType'])
# Adding model 'Cracked'
db.create_table(u'porkweb_cracked', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('when', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('hash', self.gf('django.db.models.fields.CharField')(max_length=255)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
('job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['porkweb.Job'])),
))
db.send_create_signal(u'porkweb', ['Cracked'])
# Adding model 'Job'
db.create_table(u'porkweb_job', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hashes', self.gf('django.db.models.fields.TextField')()),
('hashType', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['porkweb.HashType'])),
('attackType', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['porkweb.AttackType'])),
('jobServer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['porkweb.JobServer'], null=True, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='New', max_length=16)),
('progress', self.gf('django.db.models.fields.FloatField')(default=0)),
('started', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('finished', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('eta', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('results', self.gf('django.db.models.fields.TextField')(blank=True)),
('speed', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'porkweb', ['Job'])
# Adding model 'Log'
db.create_table(u'porkweb_log', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('when', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('line', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'porkweb', ['Log'])
def backwards(self, orm):
# Deleting model 'JobServer'
db.delete_table(u'porkweb_jobserver')
# Deleting model 'AttackType'
db.delete_table(u'porkweb_attacktype')
# Deleting model 'Param'
db.delete_table(u'porkweb_param')
# Deleting model 'AttackParam'
db.delete_table(u'porkweb_attackparam')
# Deleting model 'JobParam'
db.delete_table(u'porkweb_jobparam')
# Deleting model 'HashType'
db.delete_table(u'porkweb_hashtype')
# Deleting model 'Cracked'
db.delete_table(u'porkweb_cracked')
# Deleting model 'Job'
db.delete_table(u'porkweb_job')
# Deleting model 'Log'
db.delete_table(u'porkweb_log')
models = {
u'porkweb.attackparam': {
'Meta': {'object_name': 'AttackParam', '_ormbases': [u'porkweb.Param']},
'attack': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'params'", 'to': u"orm['porkweb.AttackType']"}),
u'param_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['porkweb.Param']", 'unique': 'True', 'primary_key': 'True'})
},
u'porkweb.attacktype': {
'Meta': {'object_name': 'AttackType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'porkweb.cracked': {
'Meta': {'object_name': 'Cracked'},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.Job']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'porkweb.hashtype': {
'Meta': {'object_name': 'HashType'},
'hashcat': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hashcatType': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'ocllite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'oclplus': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'porkweb.job': {
'Meta': {'object_name': 'Job'},
'attackType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.AttackType']"}),
'eta': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hashType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.HashType']"}),
'hashes': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jobServer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.JobServer']", 'null': 'True', 'blank': 'True'}),
'progress': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'results': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'speed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'New'", 'max_length': '16'})
},
u'porkweb.jobparam': {
'Meta': {'object_name': 'JobParam', '_ormbases': [u'porkweb.Param']},
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'params'", 'to': u"orm['porkweb.Job']"}),
u'param_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['porkweb.Param']", 'unique': 'True', 'primary_key': 'True'})
},
u'porkweb.jobserver': {
'Meta': {'object_name': 'JobServer'},
'details': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipaddr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '8117'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Offline'", 'max_length': '16'})
},
u'porkweb.log': {
'Meta': {'object_name': 'Log'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'porkweb.param': {
'Meta': {'object_name': 'Param'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['porkweb']
|
|
#!/usr/bin/python
import cgi
import os
import sys
import json
import urllib
import requests
import networkx as nx
from networkx.readwrite import json_graph
from flask import Flask, render_template, url_for, request, redirect
import settings
app = Flask(__name__)
enable_ganglia=settings.enable_ganglia
ganglia_url=settings.ganglia_url
enable_prometheus=settings.enable_prometheus
prometheus_url=settings.prometheus_url
kibana_url=settings.kibana_url
pathprefix=settings.pathprefix
json_filepath=settings.json_filepath
def errorhtml(txt):
return ("Error: " + txt)
def drawimage(filename):
os.system('dot -Tsvg %s/%s.txt -o %s/%s.svg' % (pathprefix, filename, pathprefix, filename))
@app.route("/applconn", methods=["POST"])
def applconn():
# Initialize Graph
G=nx.DiGraph()
with open(json_filepath) as f:
jsondata=json.loads(f.read())
G=json_graph.node_link_graph(jsondata)
fs=request.form
if (not fs.has_key('key')):
errorhtml('No key defined')
key=fs['key']
if (not key in G.nodes()):
errorhtml('No Such key')
compute_mode="dfs" # "dfs", "distance"
if fs.has_key("dfsmode"):
compute_mode="dfs"
elif fs.has_key("distancemode"):
compute_mode="distance"
if (fs.has_key('distance')):
distance=fs['distance']
else:
distance=None # infinite large number
if (fs.has_key('graphtype')):
graphtype=fs['graphtype'] # 'undirectional' or 'directional'
else:
graphtype='directional'
elif fs.has_key("shortestpathmode"):
compute_mode="shortestpath"
if (fs.has_key('shortest_path_target')):
shortest_path_target=fs['shortest_path_target']
else:
errorhtml("No shortest_path_target is given")
else:
errorhtml("No computation mode is specified")
# if reversed is specified, create reversed graph
if (fs.has_key('reversed')):
reversed=True
else:
reversed=False
if (reversed):
G=G.reverse()
searchtags=['All']
if (fs.has_key('SearchDev')):
searchtags.append('Dev')
if (fs.has_key('SearchOps')):
searchtags.append('Ops')
if (fs.has_key('SearchNet')):
searchtags.append('Net')
if (fs.has_key('SearchSdn')):
searchtags.append('Sdn')
for nodeid in G.nodes():
if (not 'searchtag' in G.node[nodeid] or not G.node[nodeid]['searchtag'] in searchtags):
G.remove_node(nodeid)
if (not key in G.nodes()):
errorhtml('No Such key in given search codition')
## Compute Tree from key node
if (compute_mode=="dfs"):
st=nx.dfs_tree(G, key) # "st" means "spanning tree"
# add other edge if other paths are there:
for node1 in st.nodes():
for node2 in st.nodes():
if G.has_edge(node1, node2):
st.add_edge(node1,node2)
elif (compute_mode=="distance"):
if (graphtype == "undirectional"):
G=nx.Graph(G)
st=nx.Graph()
else:
st=nx.DiGraph()
if (distance == None):
paths = nx.single_source_shortest_path(G, key)
else:
paths = nx.single_source_shortest_path(G, key, cutoff=distance)
for target_node in paths.keys():
st.add_path(paths[target_node])
elif (compute_mode=="shortestpath"):
st=nx.DiGraph()
try:
for path in nx.all_shortest_paths(G, key, shortest_path_target):
st.add_path(path)
except(nx.exception.NetworkXNoPath):
errorhtml('No path found in given search codition')
### add attribute
for n in st:
tmp = st.node[n]
tmp['name'] = n
if (enable_ganglia or settings.enable_prometheus):
def get_url_for_server_metric_func_ganglia(key):
return ('{0}/api/metrics.php?host={1}&metric_name=load_one'.format(ganglia_url, key))
def get_url_for_haproxy_metric_func_ganglia(key):
# 172.17.0.3-haproxy-main1081
tmp=key.split('-')
hostname=tmp[0]
applname='-'.join(tmp[1:])
return ('{0}/api/metrics.php?host={1}&metric_name={2}'.format(ganglia_url, hostname, applname))
def tmp_metric_func_ganglia(key, kind_of_node):
if (kind_of_node == "server"):
metric_url = get_url_for_server_metric_func_ganglia(key)
elif (kind_of_node == "haproxy"):
metric_url = get_url_for_haproxy_metric_func_ganglia(key)
else:
Exception("No Such kind_of_node: {0}", kind_of_node)
f = urllib.urlopen(metric_url)
js=json.loads(f.read()) # {"status":"ok","message":{"metric_value":"0.51","units":" "}}
f.close()
#print (js)
if (js['status']=='ok'):
load_one=float(js['message']['metric_value'])
else:
load_one=0 # hmm ...
return load_one
def server_metric_func_ganglia(key):
return tmp_metric_func_ganglia(key, "server")
def haproxy_metric_func_ganglia(key):
return tmp_metric_func_ganglia(key, "haproxy")
def tmp_metric_func_prometheus(key, kind_of_node):
# TODO: return value is json, not float
if (kind_of_node == "server"):
metric_url='{0}/api/v1/query?query=node_load1{{instance="{1}:9100"}}'.format(prometheus_url, key)
elif (kind_of_node == "haproxy"):
tmp=key.split('-')
nodename=tmp[0]
applname=tmp[2]
metric_url='{0}/api/v1/query?query=haproxy_frontend_current_sessions{{frontend="{1}",instance="{2}:9101"}}'.format(prometheus_url, applname, nodename)
else:
Exception("No Such kind_of_node: {0}", kind_of_node)
returned = requests.get(metric_url)
js=json.loads(returned.content) # {"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"node_load1","instance":"172.17.0.3:9100","job":"prometheus"},"value":[1495462713.021,"0.91"]}]}}
#print (js)
if (js['status']=='success'):
if (len(js['data']['result']) > 0):
load_one=float(js['data']['result'][0]["value"][1])
else:
load_one=0 # hmmm ...
else:
load_one=0 # hmm ...
return load_one
def server_metric_func_prometheus(key):
return tmp_metric_func_prometheus(key, "server")
def haproxy_metric_func_prometheus(key):
return tmp_metric_func_prometheus(key, "haproxy")
if (settings.enable_ganglia):
server_metric_func=server_metric_func_ganglia
haproxy_metric_func=haproxy_metric_func_ganglia
if (settings.enable_prometheus):
server_metric_func=server_metric_func_prometheus
haproxy_metric_func=haproxy_metric_func_prometheus
node_types=[
{"type": "server",
"metric_func": server_metric_func,
"lower_bound": 2.0,
"upper_bound": 5.0,
},
{"type": "haproxy-thread",
"metric_func": haproxy_metric_func,
"lower_bound": 5,
"upper_bound": 15,
}
]
try:
if (n.find('haproxy-') > -1):
node_type = 'haproxy-thread'
else:
node_type = 'server'
ctx = filter (lambda x: x["type"] == node_type, node_types) [0]
#print (ctx)
load_one=ctx["metric_func"](n)
#print (load_one)
if (ctx["upper_bound"] < load_one):
tmp['color'] = '#ff634f'
elif (ctx["lower_bound"] < load_one < ctx["upper_bound"]):
tmp['color'] = '#ffde5e'
else:
tmp['color'] = '#e2ecff'
except (IOError):
pass # ganglia is not available
else:
if (G.node[n].has_key('color')):
tmp['color'] = G.node[n]['color']
# set other things
tmp['href'] = './node-hrefs?key={0}'.format(n)
if (G.node[n].has_key('searchtag')):
tmp['searchtag'] = G.node[n]['searchtag']
# json output
js=json_graph.node_link_data(st)
with open('{0}/1.json'.format(pathprefix), 'w') as f:
f.write(json.dumps(js, sort_keys=True, indent=4))
## Graphviz
A=nx.nx_agraph.to_agraph(st)
A.write('%s/1.txt' % (pathprefix))
## SVG
drawimage('1')
### Write down all the data
#A=nx.nx_agraph.to_agraph(G)
#A.write(pathprefix+'/a.txt')
#drawimage('a')
## inline svg
with open(pathprefix+'/1.svg') as svgfile:
svgdata=svgfile.read()
## CGI
return ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title></title>
</head>
<body>
<h2>Summary</h2>
<div id="d3">
<div id="chart"></div>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.4.11/d3.min.js"></script>
<link type="text/css" rel="stylesheet" href="static/applconn.css"/>
<script>var jsonpath="static/1.json";</script>
<script type="text/javascript" src="static/applconn.js"></script>
<a href="static/1.json">d3-graph-data</a>
</div>
<h2>Detail</h2>
%s
<div id="data">
<a href="static/1.txt">Data</a>
</div>
</body>
</html>
""" % (svgdata)
)
@app.route("/node-hrefs")
def node_hrefs():
# Initialize Graph
G=nx.DiGraph()
with open(json_filepath) as f:
jsondata=json.loads(f.read())
G=json_graph.node_link_graph(jsondata)
fs=request.args # when GET, use this
if (not fs.has_key('key')):
errorhtml('No key defined')
key=fs['key']
if (not key in G.nodes()):
errorhtml('No Such key')
# create urls:
urls = []
# default url
if (G.node[key].has_key('href')):
urls.append(G.node[key]['href'])
# ganglia url
if (settings.enable_ganglia):
if (key.find('_cpu') > -1):
urls.append('{0}/graph_all_periods.php?hreg%5B%5D={1}&mreg%5B%5D=cpu_&aggregate=1'.format(ganglia_url, key[:-4]))
elif (key.find('-haproxy') > -1):
urls.append('{0}/graph_all_periods.php?hreg%5B%5D={1}&mreg%5B%5D=haproxy&aggregate=1'.format(ganglia_url, key.split('-')[0]))
else:
urls.append('{0}?c=unspecified&h={1}'.format(ganglia_url, key))
if (settings.enable_prometheus):
if (key.find('-haproxy') > -1):
tmp=key.split('-')
nodename=tmp[0]
applname=tmp[2]
urls.append('{0}/graph?g0.range_input=1h&g0.expr=haproxy_frontend_current_sessions{{instance%3D"{1}%3A9101",frontend%3D"{2}"}}&g0.tab=0'.format(prometheus_url, nodename, applname))
else:
urls.append('{0}/graph?g0.range_input=1h&g0.expr=node_load1{{instance%3D"{1}%3A9100"}}&g0.tab=0'.format(prometheus_url, key))
# kibana url
if (settings.enable_elasticsearch):
urls.append('{0}{1}?id={2}'.format(kibana_url, key, G.node[key]['kibanaid']))
# if only one url is found, return that
if (len(urls) == 1):
return (redirect(urls[0], code=302))
# join urls
urlhtml='<br/>'.join(
'<a href="{0}">link</a>'.format(url) for url in urls
)
## CGI
return ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title></title>
</head>
<body>
{0}
</body>
</html>
""".format (urlhtml)
)
@app.route("/")
def index():
return render_template('index.html')
@app.route("/prom-sd-file")
def prom_sd_file():
with open(json_filepath) as f:
jsondata=json.loads(f.read())
tmp=[node["name"]+":9100" for node in jsondata["nodes"] if node["searchtag"] == "All" and node["name"].find ("172.17") > -1]
return json.dumps([{"targets": tmp}])
if __name__ == "__main__":
app.run()
|
|
import asyncio
import base64
import binascii
import io
import json
import mimetypes
import os
import re
import uuid
import warnings
import zlib
from collections import Mapping, Sequence, deque
from pathlib import Path
from urllib.parse import parse_qsl, quote, unquote, urlencode
from multidict import CIMultiDict
from .hdrs import (CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LENGTH,
CONTENT_TRANSFER_ENCODING, CONTENT_TYPE)
from .helpers import parse_mimetype
from .protocol import HttpParser
__all__ = ('MultipartReader', 'MultipartWriter',
'BodyPartReader', 'BodyPartWriter',
'BadContentDispositionHeader', 'BadContentDispositionParam',
'parse_content_disposition', 'content_disposition_filename')
CHAR = set(chr(i) for i in range(0, 128))
CTL = set(chr(i) for i in range(0, 32)) | {chr(127), }
SEPARATORS = {'(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']',
'?', '=', '{', '}', ' ', chr(9)}
TOKEN = CHAR ^ CTL ^ SEPARATORS
class BadContentDispositionHeader(RuntimeWarning):
pass
class BadContentDispositionParam(RuntimeWarning):
pass
def parse_content_disposition(header):
def is_token(string):
return string and TOKEN >= set(string)
def is_quoted(string):
return string[0] == string[-1] == '"'
def is_rfc5987(string):
return is_token(string) and string.count("'") == 2
def is_extended_param(string):
return string.endswith('*')
def is_continuous_param(string):
pos = string.find('*') + 1
if not pos:
return False
substring = string[pos:-1] if string.endswith('*') else string[pos:]
return substring.isdigit()
def unescape(text, *, chars=''.join(map(re.escape, CHAR))):
return re.sub('\\\\([{}])'.format(chars), '\\1', text)
if not header:
return None, {}
disptype, *parts = header.split(';')
if not is_token(disptype):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params = {}
for item in parts:
if '=' not in item:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
key, value = item.split('=', 1)
key = key.lower().strip()
value = value.lstrip()
if key in params:
warnings.warn(BadContentDispositionHeader(header))
return None, {}
if not is_token(key):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_continuous_param(key):
if is_quoted(value):
value = unescape(value[1:-1])
elif not is_token(value):
warnings.warn(BadContentDispositionParam(item))
continue
elif is_extended_param(key):
if is_rfc5987(value):
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
else:
warnings.warn(BadContentDispositionParam(item))
continue
try:
value = unquote(value, encoding, 'strict')
except UnicodeDecodeError: # pragma: nocover
warnings.warn(BadContentDispositionParam(item))
continue
else:
if is_quoted(value):
value = unescape(value[1:-1].lstrip('\\/'))
elif not is_token(value):
warnings.warn(BadContentDispositionHeader(header))
return None, {}
params[key] = value
return disptype.lower(), params
def content_disposition_filename(params):
if not params:
return None
elif 'filename*' in params:
return params['filename*']
elif 'filename' in params:
return params['filename']
else:
parts = []
fnparams = sorted((key, value)
for key, value in params.items()
if key.startswith('filename*'))
for num, (key, value) in enumerate(fnparams):
_, tail = key.split('*', 1)
if tail.endswith('*'):
tail = tail[:-1]
if tail == str(num):
parts.append(value)
else:
break
if not parts:
return None
value = ''.join(parts)
if "'" in value:
encoding, _, value = value.split("'", 2)
encoding = encoding or 'utf-8'
return unquote(value, encoding, 'strict')
return value
class MultipartResponseWrapper(object):
"""Wrapper around the :class:`MultipartBodyReader` to take care about
underlying connection and close it when it needs in."""
def __init__(self, resp, stream):
self.resp = resp
self.stream = stream
@asyncio.coroutine
def __aiter__(self):
return self
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
def at_eof(self):
"""Returns ``True`` when all response data had been read.
:rtype: bool
"""
return self.resp.content.at_eof()
@asyncio.coroutine
def next(self):
"""Emits next multipart reader object."""
item = yield from self.stream.next()
if self.stream.at_eof():
yield from self.release()
return item
@asyncio.coroutine
def release(self):
"""Releases the connection gracefully, reading all the content
to the void."""
yield from self.resp.release()
class BodyPartReader(object):
"""Multipart reader for single body part."""
chunk_size = 8192
def __init__(self, boundary, headers, content):
self.headers = headers
self._boundary = boundary
self._content = content
self._at_eof = False
length = self.headers.get(CONTENT_LENGTH, None)
self._length = int(length) if length is not None else None
self._read_bytes = 0
self._unread = deque()
self._prev_chunk = None
self._content_eof = 0
@asyncio.coroutine
def __aiter__(self):
return self
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
@asyncio.coroutine
def next(self):
item = yield from self.read()
if not item:
return None
return item
@asyncio.coroutine
def read(self, *, decode=False):
"""Reads body part data.
:param bool decode: Decodes data following by encoding
method from `Content-Encoding` header. If it missed
data remains untouched
:rtype: bytearray
"""
if self._at_eof:
return b''
data = bytearray()
if self._length is None:
while not self._at_eof:
data.extend((yield from self.readline()))
else:
while not self._at_eof:
data.extend((yield from self.read_chunk(self.chunk_size)))
if decode:
return self.decode(data)
return data
@asyncio.coroutine
def read_chunk(self, size=chunk_size):
"""Reads body part content chunk of the specified size.
:param int size: chunk size
:rtype: bytearray
"""
if self._at_eof:
return b''
if self._length:
chunk = yield from self._read_chunk_from_length(size)
else:
chunk = yield from self._read_chunk_from_stream(size)
self._read_bytes += len(chunk)
if self._read_bytes == self._length:
self._at_eof = True
if self._at_eof:
assert b'\r\n' == (yield from self._content.readline()), \
'reader did not read all the data or it is malformed'
return chunk
@asyncio.coroutine
def _read_chunk_from_length(self, size):
"""Reads body part content chunk of the specified size.
The body part must has `Content-Length` header with proper value.
:param int size: chunk size
:rtype: bytearray
"""
assert self._length is not None, \
'Content-Length required for chunked read'
chunk_size = min(size, self._length - self._read_bytes)
chunk = yield from self._content.read(chunk_size)
return chunk
@asyncio.coroutine
def _read_chunk_from_stream(self, size):
"""Reads content chunk of body part with unknown length.
The `Content-Length` header for body part is not necessary.
:param int size: chunk size
:rtype: bytearray
"""
assert size >= len(self._boundary) + 2, \
'Chunk size must be greater or equal than boundary length + 2'
first_chunk = self._prev_chunk is None
if first_chunk:
self._prev_chunk = yield from self._content.read(size)
chunk = yield from self._content.read(size)
self._content_eof += int(self._content.at_eof())
assert self._content_eof < 3, "Reading after EOF"
window = self._prev_chunk + chunk
sub = b'\r\n' + self._boundary
if first_chunk:
idx = window.find(sub)
else:
idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub)))
if idx >= 0:
# pushing boundary back to content
self._content.unread_data(window[idx:])
if size > idx:
self._prev_chunk = self._prev_chunk[:idx]
chunk = window[len(self._prev_chunk):idx]
if not chunk:
self._at_eof = True
if 0 < len(chunk) < len(sub) and not self._content_eof:
self._prev_chunk += chunk
self._at_eof = False
return b''
result = self._prev_chunk
self._prev_chunk = chunk
return result
@asyncio.coroutine
def readline(self):
"""Reads body part by line by line.
:rtype: bytearray
"""
if self._at_eof:
return b''
if self._unread:
line = self._unread.popleft()
else:
line = yield from self._content.readline()
if line.startswith(self._boundary):
# the very last boundary may not come with \r\n,
# so set single rules for everyone
sline = line.rstrip(b'\r\n')
boundary = self._boundary
last_boundary = self._boundary + b'--'
# ensure that we read exactly the boundary, not something alike
if sline == boundary or sline == last_boundary:
self._at_eof = True
self._unread.append(line)
return b''
else:
next_line = yield from self._content.readline()
if next_line.startswith(self._boundary):
line = line[:-2] # strip CRLF but only once
self._unread.append(next_line)
return line
@asyncio.coroutine
def release(self):
"""Lke :meth:`read`, but reads all the data to the void.
:rtype: None
"""
if self._at_eof:
return
if self._length is None:
while not self._at_eof:
yield from self.readline()
else:
while not self._at_eof:
yield from self.read_chunk(self.chunk_size)
@asyncio.coroutine
def text(self, *, encoding=None):
"""Lke :meth:`read`, but assumes that body part contains text data.
:param str encoding: Custom text encoding. Overrides specified
in charset param of `Content-Type` header
:rtype: str
"""
data = yield from self.read(decode=True)
encoding = encoding or self.get_charset(default='latin1')
return data.decode(encoding)
@asyncio.coroutine
def json(self, *, encoding=None):
"""Lke :meth:`read`, but assumes that body parts contains JSON data.
:param str encoding: Custom JSON encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return json.loads(data.decode(encoding))
@asyncio.coroutine
def form(self, *, encoding=None):
"""Lke :meth:`read`, but assumes that body parts contains form
urlencoded data.
:param str encoding: Custom form encoding. Overrides specified
in charset param of `Content-Type` header
"""
data = yield from self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return parse_qsl(data.rstrip().decode(encoding), encoding=encoding)
def at_eof(self):
"""Returns ``True`` if the boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
def decode(self, data):
"""Decodes data according the specified `Content-Encoding`
or `Content-Transfer-Encoding` headers value.
Supports ``gzip``, ``deflate`` and ``identity`` encodings for
`Content-Encoding` header.
Supports ``base64``, ``quoted-printable`` encodings for
`Content-Transfer-Encoding` header.
:param bytearray data: Data to decode.
:raises: :exc:`RuntimeError` - if encoding is unknown.
:rtype: bytes
"""
if CONTENT_TRANSFER_ENCODING in self.headers:
data = self._decode_content_transfer(data)
if CONTENT_ENCODING in self.headers:
return self._decode_content(data)
return data
def _decode_content(self, data):
encoding = self.headers[CONTENT_ENCODING].lower()
if encoding == 'deflate':
return zlib.decompress(data, -zlib.MAX_WBITS)
elif encoding == 'gzip':
return zlib.decompress(data, 16 + zlib.MAX_WBITS)
elif encoding == 'identity':
return data
else:
raise RuntimeError('unknown content encoding: {}'.format(encoding))
def _decode_content_transfer(self, data):
encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()
if encoding == 'base64':
return base64.b64decode(data)
elif encoding == 'quoted-printable':
return binascii.a2b_qp(data)
else:
raise RuntimeError('unknown content transfer encoding: {}'
''.format(encoding))
def get_charset(self, default=None):
"""Returns charset parameter from ``Content-Type`` header or default.
"""
ctype = self.headers.get(CONTENT_TYPE, '')
*_, params = parse_mimetype(ctype)
return params.get('charset', default)
@property
def filename(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed or header is malformed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params)
class MultipartReader(object):
"""Multipart body reader."""
#: Response wrapper, used when multipart readers constructs from response.
response_wrapper_cls = MultipartResponseWrapper
#: Multipart reader class, used to handle multipart/* body parts.
#: None points to type(self)
multipart_reader_cls = None
#: Body part reader class for non multipart/* content types.
part_reader_cls = BodyPartReader
def __init__(self, headers, content):
self.headers = headers
self._boundary = ('--' + self._get_boundary()).encode()
self._content = content
self._last_part = None
self._at_eof = False
self._at_bof = True
self._unread = []
@asyncio.coroutine
def __aiter__(self):
return self
@asyncio.coroutine
def __anext__(self):
part = yield from self.next()
if part is None:
raise StopAsyncIteration # NOQA
return part
@classmethod
def from_response(cls, response):
"""Constructs reader instance from HTTP response.
:param response: :class:`~aiohttp.client.ClientResponse` instance
"""
obj = cls.response_wrapper_cls(response, cls(response.headers,
response.content))
return obj
def at_eof(self):
"""Returns ``True`` if the final boundary was reached or
``False`` otherwise.
:rtype: bool
"""
return self._at_eof
@asyncio.coroutine
def next(self):
"""Emits the next multipart body part."""
# So, if we're at BOF, we need to skip till the boundary.
if self._at_eof:
return
yield from self._maybe_release_last_part()
if self._at_bof:
yield from self._read_until_first_boundary()
self._at_bof = False
else:
yield from self._read_boundary()
if self._at_eof: # we just read the last boundary, nothing to do there
return
self._last_part = yield from self.fetch_next_part()
return self._last_part
@asyncio.coroutine
def release(self):
"""Reads all the body parts to the void till the final boundary."""
while not self._at_eof:
item = yield from self.next()
if item is None:
break
yield from item.release()
@asyncio.coroutine
def fetch_next_part(self):
"""Returns the next body part reader."""
headers = yield from self._read_headers()
return self._get_part_reader(headers)
def _get_part_reader(self, headers):
"""Dispatches the response by the `Content-Type` header, returning
suitable reader instance.
:param dict headers: Response headers
"""
ctype = headers.get(CONTENT_TYPE, '')
mtype, *_ = parse_mimetype(ctype)
if mtype == 'multipart':
if self.multipart_reader_cls is None:
return type(self)(headers, self._content)
return self.multipart_reader_cls(headers, self._content)
else:
return self.part_reader_cls(self._boundary, headers, self._content)
def _get_boundary(self):
mtype, *_, params = parse_mimetype(self.headers[CONTENT_TYPE])
assert mtype == 'multipart', 'multipart/* content type expected'
if 'boundary' not in params:
raise ValueError('boundary missed for Content-Type: %s'
% self.headers[CONTENT_TYPE])
boundary = params['boundary']
if len(boundary) > 70:
raise ValueError('boundary %r is too long (70 chars max)'
% boundary)
return boundary
@asyncio.coroutine
def _readline(self):
if self._unread:
return self._unread.pop()
return (yield from self._content.readline())
@asyncio.coroutine
def _read_until_first_boundary(self):
while True:
chunk = yield from self._readline()
if chunk == b'':
raise ValueError("Could not find starting boundary %r"
% (self._boundary))
chunk = chunk.rstrip()
if chunk == self._boundary:
return
elif chunk == self._boundary + b'--':
self._at_eof = True
return
@asyncio.coroutine
def _read_boundary(self):
chunk = (yield from self._readline()).rstrip()
if chunk == self._boundary:
pass
elif chunk == self._boundary + b'--':
self._at_eof = True
else:
raise ValueError('Invalid boundary %r, expected %r'
% (chunk, self._boundary))
@asyncio.coroutine
def _read_headers(self):
lines = [b'']
while True:
chunk = yield from self._content.readline()
chunk = chunk.strip()
lines.append(chunk)
if not chunk:
break
parser = HttpParser()
headers, *_ = parser.parse_headers(lines)
return headers
@asyncio.coroutine
def _maybe_release_last_part(self):
"""Ensures that the last read body part is read completely."""
if self._last_part is not None:
if not self._last_part.at_eof():
yield from self._last_part.release()
self._unread.extend(self._last_part._unread)
self._last_part = None
class BodyPartWriter(object):
"""Multipart writer for single body part."""
def __init__(self, obj, headers=None, *, chunk_size=8192):
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, CIMultiDict):
headers = CIMultiDict(headers)
self.obj = obj
self.headers = headers
self._chunk_size = chunk_size
self._fill_headers_with_defaults()
self._serialize_map = {
bytes: self._serialize_bytes,
str: self._serialize_str,
io.IOBase: self._serialize_io,
MultipartWriter: self._serialize_multipart,
('application', 'json'): self._serialize_json,
('application', 'x-www-form-urlencoded'): self._serialize_form
}
def _fill_headers_with_defaults(self):
if CONTENT_TYPE not in self.headers:
content_type = self._guess_content_type(self.obj)
if content_type is not None:
self.headers[CONTENT_TYPE] = content_type
if CONTENT_LENGTH not in self.headers:
content_length = self._guess_content_length(self.obj)
if content_length is not None:
self.headers[CONTENT_LENGTH] = str(content_length)
if CONTENT_DISPOSITION not in self.headers:
filename = self._guess_filename(self.obj)
if filename is not None:
self.set_content_disposition('attachment', filename=filename)
def _guess_content_length(self, obj):
if isinstance(obj, bytes):
return len(obj)
elif isinstance(obj, str):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
charset = params.get('charset', 'us-ascii')
return len(obj.encode(charset))
elif isinstance(obj, io.StringIO):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
charset = params.get('charset', 'us-ascii')
return len(obj.getvalue().encode(charset)) - obj.tell()
elif isinstance(obj, io.BytesIO):
return len(obj.getvalue()) - obj.tell()
elif isinstance(obj, io.IOBase):
try:
return os.fstat(obj.fileno()).st_size - obj.tell()
except (AttributeError, OSError):
return None
else:
return None
def _guess_content_type(self, obj, default='application/octet-stream'):
if hasattr(obj, 'name'):
name = getattr(obj, 'name')
return mimetypes.guess_type(name)[0]
elif isinstance(obj, (str, io.StringIO)):
return 'text/plain; charset=utf-8'
else:
return default
def _guess_filename(self, obj):
if isinstance(obj, io.IOBase):
name = getattr(obj, 'name', None)
if name is not None:
return Path(name).name
def serialize(self):
"""Yields byte chunks for body part."""
has_encoding = (
CONTENT_ENCODING in self.headers and
self.headers[CONTENT_ENCODING] != 'identity' or
CONTENT_TRANSFER_ENCODING in self.headers
)
if has_encoding:
# since we're following streaming approach which doesn't assumes
# any intermediate buffers, we cannot calculate real content length
# with the specified content encoding scheme. So, instead of lying
# about content length and cause reading issues, we have to strip
# this information.
self.headers.pop(CONTENT_LENGTH, None)
if self.headers:
yield b'\r\n'.join(
b': '.join(map(lambda i: i.encode('latin1'), item))
for item in self.headers.items()
)
yield b'\r\n\r\n'
yield from self._maybe_encode_stream(self._serialize_obj())
yield b'\r\n'
def _serialize_obj(self):
obj = self.obj
mtype, stype, *_ = parse_mimetype(self.headers.get(CONTENT_TYPE))
serializer = self._serialize_map.get((mtype, stype))
if serializer is not None:
return serializer(obj)
for key in self._serialize_map:
if not isinstance(key, tuple) and isinstance(obj, key):
return self._serialize_map[key](obj)
return self._serialize_default(obj)
def _serialize_bytes(self, obj):
yield obj
def _serialize_str(self, obj):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
yield obj.encode(params.get('charset', 'us-ascii'))
def _serialize_io(self, obj):
while True:
chunk = obj.read(self._chunk_size)
if not chunk:
break
if isinstance(chunk, str):
yield from self._serialize_str(chunk)
else:
yield from self._serialize_bytes(chunk)
def _serialize_multipart(self, obj):
yield from obj.serialize()
def _serialize_json(self, obj):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
yield json.dumps(obj).encode(params.get('charset', 'utf-8'))
def _serialize_form(self, obj):
if isinstance(obj, Mapping):
obj = list(obj.items())
return self._serialize_str(urlencode(obj, doseq=True))
def _serialize_default(self, obj):
raise TypeError('unknown body part type %r' % type(obj))
def _maybe_encode_stream(self, stream):
if CONTENT_ENCODING in self.headers:
stream = self._apply_content_encoding(stream)
if CONTENT_TRANSFER_ENCODING in self.headers:
stream = self._apply_content_transfer_encoding(stream)
yield from stream
def _apply_content_encoding(self, stream):
encoding = self.headers[CONTENT_ENCODING].lower()
if encoding == 'identity':
yield from stream
elif encoding in ('deflate', 'gzip'):
if encoding == 'gzip':
zlib_mode = 16 + zlib.MAX_WBITS
else:
zlib_mode = -zlib.MAX_WBITS
zcomp = zlib.compressobj(wbits=zlib_mode)
for chunk in stream:
yield zcomp.compress(chunk)
else:
yield zcomp.flush()
else:
raise RuntimeError('unknown content encoding: {}'
''.format(encoding))
def _apply_content_transfer_encoding(self, stream):
encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()
if encoding == 'base64':
buffer = bytearray()
while True:
if buffer:
div, mod = divmod(len(buffer), 3)
chunk, buffer = buffer[:div * 3], buffer[div * 3:]
if chunk:
yield base64.b64encode(chunk)
chunk = next(stream, None)
if not chunk:
if buffer:
yield base64.b64encode(buffer[:])
return
buffer.extend(chunk)
elif encoding == 'quoted-printable':
for chunk in stream:
yield binascii.b2a_qp(chunk)
else:
raise RuntimeError('unknown content transfer encoding: {}'
''.format(encoding))
def set_content_disposition(self, disptype, **params):
"""Sets ``Content-Disposition`` header.
:param str disptype: Disposition type: inline, attachment, form-data.
Should be valid extension token (see RFC 2183)
:param dict params: Disposition params
"""
if not disptype or not (TOKEN > set(disptype)):
raise ValueError('bad content disposition type {!r}'
''.format(disptype))
value = disptype
if params:
lparams = []
for key, val in params.items():
if not key or not (TOKEN > set(key)):
raise ValueError('bad content disposition parameter'
' {!r}={!r}'.format(key, val))
qval = quote(val, '')
lparams.append((key, '"%s"' % qval))
if key == 'filename':
lparams.append(('filename*', "utf-8''" + qval))
sparams = '; '.join('='.join(pair) for pair in lparams)
value = '; '.join((value, sparams))
self.headers[CONTENT_DISPOSITION] = value
@property
def filename(self):
"""Returns filename specified in Content-Disposition header or ``None``
if missed."""
_, params = parse_content_disposition(
self.headers.get(CONTENT_DISPOSITION))
return content_disposition_filename(params)
class MultipartWriter(object):
"""Multipart body writer."""
#: Body part reader class for non multipart/* content types.
part_writer_cls = BodyPartWriter
def __init__(self, subtype='mixed', boundary=None):
boundary = boundary if boundary is not None else uuid.uuid4().hex
try:
boundary.encode('us-ascii')
except UnicodeEncodeError:
raise ValueError('boundary should contains ASCII only chars')
self.headers = CIMultiDict()
self.headers[CONTENT_TYPE] = 'multipart/{}; boundary="{}"'.format(
subtype, boundary
)
self.parts = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return iter(self.parts)
def __len__(self):
return len(self.parts)
@property
def boundary(self):
*_, params = parse_mimetype(self.headers.get(CONTENT_TYPE))
return params['boundary'].encode('us-ascii')
def append(self, obj, headers=None):
"""Adds a new body part to multipart writer."""
if isinstance(obj, self.part_writer_cls):
if headers:
obj.headers.update(headers)
self.parts.append(obj)
else:
if not headers:
headers = CIMultiDict()
self.parts.append(self.part_writer_cls(obj, headers))
return self.parts[-1]
def append_json(self, obj, headers=None):
"""Helper to append JSON part."""
if not headers:
headers = CIMultiDict()
headers[CONTENT_TYPE] = 'application/json'
return self.append(obj, headers)
def append_form(self, obj, headers=None):
"""Helper to append form urlencoded part."""
if not headers:
headers = CIMultiDict()
headers[CONTENT_TYPE] = 'application/x-www-form-urlencoded'
assert isinstance(obj, (Sequence, Mapping))
return self.append(obj, headers)
def serialize(self):
"""Yields multipart byte chunks."""
if not self.parts:
yield b''
return
for part in self.parts:
yield b'--' + self.boundary + b'\r\n'
yield from part.serialize()
else:
yield b'--' + self.boundary + b'--\r\n'
yield b''
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""VGG16 model for Keras.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition]
(https://arxiv.org/abs/1409.1556) (ICLR 2015)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/keras-applications/'
'vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/vgg16/'
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')
layers = VersionAwareLayers()
@keras_export('keras.applications.vgg16.VGG16', 'keras.applications.VGG16')
def VGG16(
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the VGG16 model.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](
https://arxiv.org/abs/1409.1556) (ICLR 2015)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
The default input size for this model is 224x224.
Note: each Keras Application expects a specific kind of input preprocessing.
For VGG16, call `tf.keras.applications.vgg16.preprocess_input` on your
inputs before passing them to the model.
`vgg16.preprocess_input` will convert the input images from RGB to BGR,
then will zero-center each color channel with respect to the ImageNet dataset,
without scaling.
Args:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv1')(
img_input)
x = layers.Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = layers.Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = layers.Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = layers.Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='vgg16')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = data_utils.get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='64373286793e3c8b2b4e3219cbf3544b')
else:
weights_path = data_utils.get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='6d6bbae143d832006294945121d1f1fc')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.vgg16.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode='caffe')
@keras_export('keras.applications.vgg16.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_CAFFE,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Matt Flood
"""
MediumBot's configuration GUI to make it easier for non-technical users to use
the bot.
Note: this is a starting point for the GUI and is not finished.
"""
from Tkinter import *
from ttk import *
from tempfile import mkstemp
from shutil import move
from os import remove, close
import os
import re
FILE_PATH = "MediumBot.py"
class MediumBotGUI(Frame):
def __init__(self, parent):
"""
Initilaize the MediumBotGUI object.
"""
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
"""
Initialize the user interface.
"""
mediumBotVariables = self.parseMediumBot()
self.parent.title("Medium Bot")
self.pack(fill=BOTH, expand=True)
self.columnconfigure(1, weight=1)
self.columnconfigure(3, pad=7)
self.rowconfigure(3, weight=1)
self.rowconfigure(5, pad=7)
self.initEmailUI(mediumBotVariables)
self.initPasswordUI(mediumBotVariables)
self.initLoginServiceUI(mediumBotVariables)
self.initDriverUI(mediumBotVariables)
self.initLikePostsUI(mediumBotVariables)
self.initRandomizeLikesUI(mediumBotVariables)
self.initMaxLikesUI(mediumBotVariables)
self.initCommentUI(mediumBotVariables)
self.initRandomizeCommentsUI(mediumBotVariables)
self.initCommentsUI(mediumBotVariables)
self.initArticleBlackListUI(mediumBotVariables)
self.initFollowUsersUI(mediumBotVariables)
self.initRandomizeFollowingUI(mediumBotVariables)
self.initUnFollowUsersUI(mediumBotVariables)
self.initRandomizeUnFollowingUI(mediumBotVariables)
self.initUnFollowingUsersBlackListUI(mediumBotVariables)
self.initUseRelatedTagsUI(mediumBotVariables)
self.initArticlesPerTagUI(mediumBotVariables)
self.initVerboseUI(mediumBotVariables)
self.initStartButton()
def initEmailUI(self, mediumBotVariables):
"""
Initialize the Email UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Email: ").grid(sticky=W, pady=4, padx=5)
self.emailField = Entry(self, width = 100)
self.emailField.grid(row=0, column=1, columnspan = 3)
self.emailField.insert(10, mediumBotVariables["EMAIL"])
def initPasswordUI(self, mediumBotVariables):
"""
Initialize the Password UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Password: ").grid(sticky=W, pady=4, padx=5)
self.passwordField = Entry(self, show="*", width = 100)
self.passwordField.grid(row=1, column=1, columnspan = 3)
self.passwordField.insert(10, mediumBotVariables["PASSWORD"])
def initLoginServiceUI(self, mediumBotVariables):
"""
Initialize the Login Service UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Service: ").grid(sticky=W, pady=4, padx=5)
self.serviceDropDown = Combobox(self, values=["Google", "Facebook", "Twitter"], width = 100)
self.serviceDropDown.grid(row=2, column=1, columnspan = 3)
if "Google, Twitter, or Facebook" not in mediumBotVariables["LOGIN_SERVICE"]:
if "google" in mediumBotVariables["LOGIN_SERVICE"].lower():
self.serviceDropDown.current(0)
elif "facebook" in mediumBotVariables["LOGIN_SERVICE"].lower() or "iceweasel" in mediumBotVariables["LOGIN_SERVICE"].lower():
self.serviceDropDown.current(1)
elif "twitter" in mediumBotVariables["LOGIN_SERVICE"].lower():
self.serviceDropDown.current(2)
def initDriverUI(self, mediumBotVariables):
"""
Initialize the Driver UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Driver: ").grid(sticky=W, pady=4, padx=5)
self.driverDropDown = Combobox(self, values=["Chrome", "Firefox/Iceweasel", "PhantomJS"], width = 100)
self.driverDropDown.grid(row=3, column=1, columnspan = 3)
self.driverDropDown.current(0)
if "Google, Twitter, or Facebook" not in mediumBotVariables["LOGIN_SERVICE"].lower():
if "chrome" in mediumBotVariables["DRIVER"].lower():
self.driverDropDown.current(0)
elif "firefox" in mediumBotVariables["DRIVER"].lower() or "iceweasel" in mediumBotVariables["DRIVER"].lower():
self.driverDropDown.current(1)
elif "phantomjs" in mediumBotVariables["DRIVER"].lower():
self.driverDropDown.current(2)
def initLikePostsUI(self, mediumBotVariables):
"""
Initialize the Like Posts UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Like Posts: ").grid(sticky=W, pady=4, padx=5)
self.likePosts = StringVar(value=mediumBotVariables["LIKE_POSTS"])
self.likePostsCheckBox = Checkbutton(self, text="", variable=self.likePosts, onvalue='True', offvalue='False')
self.likePostsCheckBox.grid(row=4, column = 1, columnspan = 3)
def initRandomizeLikesUI(self, mediumBotVariables):
"""
Initialize the Randomize Likes UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Randomize Likes: ").grid(sticky=W, pady=4, padx=5)
self.randomizeLikes = StringVar(value=mediumBotVariables["RANDOMIZE_LIKING_POSTS"])
self.randomizeLikesCheckBox = Checkbutton(self, text="", variable=self.randomizeLikes, onvalue='True', offvalue='False')
self.randomizeLikesCheckBox.grid(row=5, column = 1, columnspan = 3)
def initMaxLikesUI(self, mediumBotVariables):
"""
Initialize the Max Likes UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Max Likes On Posts: ").grid(sticky=W, pady=4, padx=5)
self.maxLikesField = Entry(self, width = 100)
self.maxLikesField.grid(row=6, column=1, columnspan = 3)
self.maxLikesField.insert(10, mediumBotVariables["MAX_LIKES_ON_POST"])
def initCommentUI(self, mediumBotVariables):
"""
Initialize the Comment UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Comment On Posts: ").grid(sticky=W, pady=4, padx=5)
self.commentOnPosts = StringVar(value=mediumBotVariables["COMMENT_ON_POSTS"])
self.commentPostsCheckBox = Checkbutton(self, text="", variable=self.commentOnPosts, onvalue='True', offvalue='False')
self.commentPostsCheckBox.grid(row=7, column = 1, columnspan = 3)
def initRandomizeCommentsUI(self, mediumBotVariables):
"""
Initialize the Randomize Comments UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Randomize Comments: ").grid(sticky=W, pady=4, padx=5)
self.randomizeComments = StringVar(value=mediumBotVariables["RANDOMIZE_COMMENTING_ON_POSTS"])
self.randomizeCommentsCheckBox = Checkbutton(self, text="", variable=self.randomizeComments, onvalue='True', offvalue='False')
self.randomizeCommentsCheckBox.grid(row=8, column = 1, columnspan = 3)
def initCommentsUI(self, mediumBotVariables):
"""
Initialize the Comments UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Comments: ").grid(sticky=W, pady=4, padx=5)
self.commentsField = Entry(self, width = 100)
self.commentsField.grid(row=9, column=1, columnspan = 3)
self.commentsField.insert(10, mediumBotVariables["COMMENTS"])
def initArticleBlackListUI(self, mediumBotVariables):
"""
Initialize the Article Black List UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Article Black List: ").grid(sticky=W, pady=4, padx=5)
self.articleBlackListField = Entry(self, width = 100)
self.articleBlackListField.grid(row=10, column=1, columnspan = 3)
self.articleBlackListField.insert(10, mediumBotVariables["ARTICLE_BLACK_LIST"])
def initFollowUsersUI(self, mediumBotVariables):
"""
Initialize the Follow Users UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Follow Users: ").grid(sticky=W, pady=4, padx=5)
self.followUsers = StringVar(value=mediumBotVariables["FOLLOW_USERS"])
self.followUsersCheckBox = Checkbutton(self, text="", variable=self.followUsers, onvalue='True', offvalue='False')
self.followUsersCheckBox.grid(row=11, column = 1, columnspan = 3)
def initRandomizeFollowingUI(self, mediumBotVariables):
"""
Initialize the Randomize Following UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Randomize Following: ").grid(sticky=W, pady=4, padx=5)
self.randomizeFollowingUsers = StringVar(value=mediumBotVariables["RANDOMIZE_FOLLOWING_USERS"])
self.randomizeFollowingCheckBox = Checkbutton(self, text="", variable=self.randomizeFollowingUsers, onvalue='True', offvalue='False')
self.randomizeFollowingCheckBox.grid(row=12, column = 1, columnspan = 3)
def initUnFollowUsersUI(self, mediumBotVariables):
"""
Initialize the UnFollow Users UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Unfollow Users: ").grid(sticky=W, pady=4, padx=5)
self.unfollowUsers = StringVar(value=mediumBotVariables["UNFOLLOW_USERS"])
self.unfollowUsersCheckBox = Checkbutton(self, text="", variable=self.unfollowUsers, onvalue='True', offvalue='False')
self.unfollowUsersCheckBox.grid(row=13, column = 1, columnspan = 3)
def initRandomizeUnFollowingUI(self, mediumBotVariables):
"""
Initialize the Randomize UnFollowing UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Randomize Unfollowing: ").grid(sticky=W, pady=4, padx=5)
self.randomizeUnfollowingUsers = StringVar(value=mediumBotVariables["RANDOMIZE_UNFOLLOWING_USERS"])
self.randomizeUnfollowingCheckBox = Checkbutton(self, text="", variable=self.randomizeUnfollowingUsers, onvalue='True', offvalue='False')
self.randomizeUnfollowingCheckBox.grid(row=14, column = 1, columnspan = 3)
def initUnFollowingUsersBlackListUI(self, mediumBotVariables):
"""
Initialize the UnFollowing Users Black List UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Unfollow Black List: ").grid(sticky=W, pady=4, padx=5)
self.unfollowBlackListField = Entry(self, width = 100)
self.unfollowBlackListField.grid(row=15, column=1, columnspan = 3)
self.unfollowBlackListField.insert(10, mediumBotVariables["UNFOLLOW_USERS_BLACK_LIST"])
def initUseRelatedTagsUI(self, mediumBotVariables):
"""
Initialize the Use Related Tags UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Use Related Tags: ").grid(sticky=W, pady=4, padx=5)
self.useRelatedTags = StringVar(value=mediumBotVariables["USE_RELATED_TAGS"])
self.useRelatedTagsCheckBox = Checkbutton(self, text="", variable=self.useRelatedTags, onvalue='True', offvalue='False')
self.useRelatedTagsCheckBox.grid(row=16, column = 1, columnspan = 3)
def initArticlesPerTagUI(self, mediumBotVariables):
"""
Initialize the Articles Per Tag UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Articles Per Tag: ").grid(sticky=W, pady=4, padx=5)
self.articlesPerTagField = Entry(self, width = 100)
self.articlesPerTagField.grid(row=17, column=1, columnspan = 3)
self.articlesPerTagField.insert(10, mediumBotVariables["ARTICLES_PER_TAG"])
def initVerboseUI(self, mediumBotVariables):
"""
Initialize the Verbose UI
mediumBotVariables: variables pulled from the MediumBot.py file
"""
Label(self, text="Verbose Output: ").grid(sticky=W, pady=4, padx=5)
self.verbose = StringVar(value=mediumBotVariables["VERBOSE"])
self.verboseCheckBox = Checkbutton(self, text="", variable=self.verbose, onvalue='True', offvalue='False')
self.verboseCheckBox.grid(row=18, column = 1, columnspan = 3)
def initStartButton(self):
"""
Initialize the Start Button UI
"""
startButton = Button(self, text="Start Bot", command=self.runMediumBot)
startButton.grid(row=20, column=3)
def parseMediumBot(self):
"""
Get the user's current set values in MediumBot.py to display in the fields.
"""
lines = [line.rstrip('\n') for line in open(FILE_PATH)]
charsToRemove = ["'", "[", "]", '"']
mediumBotVariables = {}
atStartOfVariables = False
for line in lines:
if not atStartOfVariables and "=" in line:
atStartOfVariables = True
elif atStartOfVariables and "=" not in line:
break
if atStartOfVariables:
mediumBotVar = line.split(" = ")
for charToRemove in charsToRemove:
mediumBotVar[1] = mediumBotVar[1].replace(charToRemove,"")
mediumBotVariables[mediumBotVar[0]] = mediumBotVar[1]
return mediumBotVariables
def runMediumBot(self):
"""
Run the MediumBot
"""
if self.validateFieldValues():
self.updateMediumBot()
self.parent.destroy()
os.system("python "+FILE_PATH)
def validateFieldValues(self):
"""
Validate the values entered in the fields
"""
return True
def updateMediumBot(self):
"""
Update the MediumBot with the values in the GUI. Called when the start buttton
is clicked.
"""
self.updateMediumBotVariable("EMAIL = '"+self.emailField.get()+"'")
self.updateMediumBotVariable("PASSWORD = '"+self.passwordField.get()+"'")
self.updateMediumBotVariable("LOGIN_SERVICE = '"+self.serviceDropDown.get()+"'")
self.updateMediumBotVariable("DRIVER = '"+self.driverDropDown.get()+"'")
self.updateMediumBotVariable("LIKE_POSTS = "+self.likePosts.get())
self.updateMediumBotVariable("RANDOMIZE_LIKING_POSTS = "+self.randomizeLikes.get())
self.updateMediumBotVariable("MAX_LIKES_ON_POST = "+self.maxLikesField.get())
self.updateMediumBotVariable("COMMENT_ON_POSTS = "+self.commentOnPosts.get())
self.updateMediumBotVariable("RANDOMIZE_LIKING_POSTS = "+self.randomizeComments.get())
commentsConverted = self.convertStringToArrayString(self.commentsField.get())
self.updateMediumBotVariable("COMMENTS = "+commentsConverted)
articlesConverted = self.convertStringToArrayString(self.articleBlackListField.get())
self.updateMediumBotVariable("ARTICLE_BLACK_LIST = "+articlesConverted)
self.updateMediumBotVariable("FOLLOW_USERS = "+self.followUsers.get())
self.updateMediumBotVariable("RANDOMIZE_FOLLOWING_USERS = "+self.randomizeFollowingUsers.get())
self.updateMediumBotVariable("UNFOLLOW_USERS = "+self.unfollowUsers.get())
self.updateMediumBotVariable("RANDOMIZE_UNFOLLOWING_USERS = "+self.randomizeUnfollowingUsers.get())
unfollowConverted = self.convertStringToArrayString(self.unfollowBlackListField.get())
self.updateMediumBotVariable("UNFOLLOW_USERS_BLACK_LIST = "+unfollowConverted)
self.updateMediumBotVariable("USE_RELATED_TAGS = "+self.useRelatedTags.get())
self.updateMediumBotVariable("ARTICLES_PER_TAG = "+self.articlesPerTagField.get())
self.updateMediumBotVariable("VERBOSE = "+self.verbose.get())
def validateEmail(self):
"""
Validate the email address passed is a valid email.
return: true if the email is valid : false if the email is not valid.
"""
result = False
email = self.emailField.get()
if email:
result = re.match(r"[^@]+@[^@]+\.[^@]+", email)
return result
def validatePassword(self):
"""
Validate the password passed is ot empty.
return: true if the password is valid : false if the password is not valid.
"""
result = False
if self.passwordField.get():
result = True
return result
def validateMaxLikesOnPost(self):
"""
Validate the max likes value passed is valid.
return: true if the max likes value is valid : false if the max likes value is not valid.
"""
return self.isNumberValid(self.maxLikesField.get())
def validateComments(self):
"""
Validate the comments passed are valid comments.
return: true if the comments are valid : false if the comments are not valid.
"""
return self.notContainSpecialChars(self.commentsField.get())
def validateArticleBlackList(self):
"""
Validate the article black list passed is valid.
return: true if the article black list is valid : false if the article black list is not valid.
"""
return self.notContainSpecialChars(self.articleBlackListField.get())
def validateUnfollowBlackList(self):
"""
Validate the unfollow black list passed is valid.
return: true if the unfollow blacklist is valid : false if the unfollow blacklist is not valid.
"""
return self.notContainSpecialChars(self.unfollowBlackListField.get())
def validateArticlesPerTag(self):
"""
Validate the articles per tag value passed is valid.
return: true if the articles per tag value is valid : false if the articles per tag value is not valid.
"""
return self.isNumberValid(self.articlesPerTagField.get())
def notContainSpecialChars(self, value):
"""
Validate the string does not have any special characters.
value: string to validate that special characters do not exist.
"""
return not set('[~!@#$%^&*()_+{}":;\']+$').intersection(value)
def isNumberValid(self, value):
"""
Validate the number passed is a valid number.
value: the number to validate.
return: true if the number is valid : false if the number is not valid.
"""
result = False
if value:
result = isinstance(value, int)
return result
def updateMediumBotVariable(self, value):
"""
Update a variable in the MediumBot.py file.
value: value to update the variable in MediumBot.py to.
"""
variableToUpdate = value.split()[0]
fh, abs_path = mkstemp()
with open(abs_path,'w') as newFile:
with open(FILE_PATH) as oldFile:
for line in oldFile:
if (variableToUpdate+" =" in line and " = " in line
and "if" not in line and "elif" not in line
and ".lower()" not in line and "(" not in line
and ((variableToUpdate == "FOLLOW_USERS"
and "UNFOLLOW_USERS" not in line) or variableToUpdate != "FOLLOW_USERS")):
newFile.write(value+"\n")
else:
newFile.write(line)
close(fh)
remove(FILE_PATH)
move(abs_path, FILE_PATH)
def convertStringToArrayString(self, valueToConvert):
"""
Convert the comma deliminated string to an array formatted string.
valueToConvert: string to convert in to an array formatted string.
return: the formatted string.
"""
array = [x.strip() for x in valueToConvert.split(',')]
result = ""
for val in array:
if result != "":
result = result+", '"+val+"'"
else:
result = "'"+val+"'"
return "["+result+"]"
def main():
"""
Set the gui's window size, initialize and launch
"""
root = Tk()
root.geometry("500x570+300+300")
app = MediumBotGUI(root)
root.mainloop()
if __name__ == '__main__':
main()
|
|
from __future__ import print_function
from six import string_types as _string_types
import numpy as np
import tensorflow as tf
import coremltools
from tensorflow.python.util import compat
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models import datatypes, utils, MLModel
from warnings import warn
from ._ops_to_layers import convert_ops_to_layers
from . import _ops_to_layers
from ._interpret_shapes import _interpret_shape as interpret_shape
from ._tf_graph_transform import _topological_sort_ops, _find_unused_ops
from .optimizations._optimize_nn_spec import optimize_nn_spec
class SupportedVersion():
# Supported iOS Version
# New OS Version must be added at the end to maintain backward version index
supported_ios_version = ['11.2', '12']
@staticmethod
def ios_support_check(target_ios):
return target_ios in SupportedVersion.supported_ios_version
@staticmethod
def get_supported_ios():
return SupportedVersion.supported_ios_version
# Context stores useful information about TF graph and the conversion process
class Context(object):
def __init__(self, consts, shape_dict, ops, blob_graph, output_features):
self.builder = None
self.consts = consts
self.shape_dict = shape_dict #Tensor name --> shape ({str: list})
self.translated = {x: True for x in self.consts.keys()}
self.out_name_to_in_name = {} #for blobs which come from a no-op
self.all_ops = ops
self.output_names = []
for out in output_features:
self.output_names.append(out[0])
self.skip_map_names = {}
# Set of all load constants added to the CoreML graph
self.load_constants_mlmodel = {}
# Tensor name to list of ops it feeds into
self.blob_graph = blob_graph
# Tensor name sto and their inferred rank 4 shape (Batch/Sequennce, C, H, W)
self.shape_dict_rank_4 = {}
# Tensor name to labeled shapes (one of 'S','C','H','W').
# e.g.: 'input' tensor which has shape (1,224,224,3) --> ('S','H','W','C')
self.dim_labels = {}
# Whether to use DFS search to infer shapes on the path to conv layers
self.use_dfs_shape_infer = True #True
self.session = None
self.input_feed_dict = None
self.unused_ops = [] # list of op names that can be skipped for conversion as they do not connect to the output
self.effectively_constant_ops = [] # list of ops that are not of type "Const", but their output does not change with differently valued graph input
self.skip_ops = []
self.add_custom_layers = False
self.custom_conversion_functions = {}
self.ops_converted_to_custom_layers = [] # list of ops that have been converted to custom coreml layers
def _infer_coreml_input_shape(tf_shape):
"""Infer CoreML input shape from TensorFlow shape.
"""
if len(tf_shape) == 0:
shape = [1, 1, 1]
elif len(tf_shape) == 1:
# TODO - remove style transfer 1D hack
# Input is 1D but it goes to the width dimension: (1,1,W)
shape = [1, 1, tf_shape[0]] #(C,H,W)
elif len(tf_shape) == 2:
# assume (Batch, Channels) - Batch dimension should be dropped
shape = [tf_shape[1]]
elif len(tf_shape) == 3:
# assume (Batch, Sequence-Length, channels)
shape = [tf_shape[2], 1, tf_shape[1]]
elif len(tf_shape) == 4: #(B,H,W,C) --> (C,H,W)
shape = [tf_shape[3], tf_shape[1], tf_shape[2]] #(C,H,W)
else:
raise ValueError('Unrecognized TensorFlow input shape' + str(tf_shape))
return shape
def _infer_coreml_output_shape(tf_shape):
"""Infer CoreML output shape from TensorFlow shape.
"""
shape = []
if len(tf_shape) == 1:
shape = [tf_shape[0], 1, 1]
elif len(tf_shape) == 2:
if tf_shape[0] == 1:
# (B,C)
shape = [tf_shape[1]]
else:
shape = None
elif len(tf_shape) == 3:
# since output shape is not required by CoreML and rank-3 tensor in TF is ambiguous, we do not assign a shape
shape = None
elif len(tf_shape) == 4:
shape = [tf_shape[3], tf_shape[1], tf_shape[2]] #(C,H,W)
elif len(tf_shape) == 0: # scalar
shape = [1]
else:
raise ValueError('Unrecognized TensorFlow output shape ' + str(tf_shape))
return shape
def _check_unsupported_ops(ops, output_feature_names, skip_ops):
'''
Checks all the ops till the desired outputs are reached.
From these ops it collects all the ops that are unsupported.
Error out if there is at least one unsupported op.
:param ops: ops of the TF graph
:param output_feature_names: [str]: list of output names
:param skip_ops: [str]: list of op names that can be skipped since they either do not depend on the
actual value of the input or do not connect to the final output
'''
unsupported_op_types = []
outputs_encountered = {}
for op in ops:
all_outputs_reached = True
for out in output_feature_names:
if out not in outputs_encountered:
all_outputs_reached = False
break
if all_outputs_reached:
break
if op.type not in _ops_to_layers._OP_REGISTRY and \
op.type not in unsupported_op_types and \
op.name not in skip_ops:
unsupported_op_types.append(op.type)
for out in op.outputs:
outputs_encountered[out.name] = True
if len(unsupported_op_types) > 0:
raise NotImplementedError("Unsupported Ops of type: %s" % (
','.join(unsupported_op_types)))
def _convert_pb_to_mlmodel(tf_model_path,
mlmodel_path,
output_feature_names,
input_name_shape_dict={},
image_input_names=None,
is_bgr=False,
red_bias=0.0,
green_bias=0.0,
blue_bias=0.0,
gray_bias=0.0,
image_scale=1.0,
class_labels=None,
predicted_feature_name=None,
predicted_probabilities_output='',
add_custom_layers=False, # type: bool
custom_conversion_functions={} # type: Dict[Text, Any]
):
# Load the TF graph
print('')
print('Loading the TF graph...')
with open(tf_model_path, 'rb') as f:
serialized = f.read()
gdef = tf.GraphDef()
gdef.ParseFromString(serialized)
with tf.Graph().as_default() as g:
tf.import_graph_def(gdef, name='')
sess = tf.Session(graph=g)
OPS = g.get_operations()
if 'DecodeJpeg' in [op.type for op in OPS]:
raise NotImplementedError("Unsupported Op of type: DecodeJpeg. "
"Kindly refer to the \"examples/inception_v3.ipynb\" notebook, "
"on the tfcoreml github page, to see how to strip input "
"pre-processing from the TF graph before conversion to CoreML.")
print('Graph Loaded.')
# Sort the ops in topological order and check whether the graph has cycles, if yes, error out
OPS = _topological_sort_ops(OPS)
SHAPE_DICT = {} #Tensor name --> shape ({str: list})
CONSTS = {} #Const Tensor name --> value
BLOB_GRAPH = {} #Blob name to list of ops it feeds into
# Make Dictionary of Input blob to the list of ops it feeds into
for op in OPS:
for inp in op.inputs:
if inp.name in BLOB_GRAPH:
BLOB_GRAPH[inp.name].append(op)
for out in op.outputs:
if out.name not in BLOB_GRAPH:
BLOB_GRAPH[out.name] = []
# Fill in input information
input_features = []
output_features = []
input_feed_dict = dict() #Input tensors' values
input_feed_dict2 = dict() # used later to find skippable ops
# run through all placeholders
for op in OPS:
output_names = set([compat.as_str_any(x.name) for x in op.outputs])
if op.type == 'Placeholder':
# Handle placeholders -- all placeholders are inputs
assert not any(filter(output_names.__contains__, output_feature_names)), \
('Output feature cannot be a placeholder')
input_name = compat.as_str_any(op.outputs[0].name)
shape = op.outputs[0].get_shape()
if input_name in input_name_shape_dict:
shape = input_name_shape_dict[input_name]
elif shape.is_fully_defined():
shape = shape.as_list()
else:
try:
shape_list = shape.as_list()
except:
raise ValueError('Please provide the shape for the input {} through the argument \'input_name_shape_dict\''.format(input_name))
if shape_list[0] is None and None not in shape_list[1:]:
shape = [1] + shape_list[1:]
else:
raise ValueError("%s is a placeholder with incomplete shape %s. Please provide the 'input_name_shape_dict' "
"argument to the convert function, with the fully defined shape." %(input_name, str(shape)))
if len(shape) == 0: # scalar - use a 1
input_feed_dict[op.outputs[0]] = 1
input_feed_dict2[op.outputs[0]] = 1
else:
input_feed_dict[op.outputs[0]] = np.random.rand(*shape)
input_feed_dict2[op.outputs[0]] = 255*np.random.rand(*shape)
SHAPE_DICT[input_name] = list(shape)
# Find "effectively_constant_ops": ops whose output(s) do not change with different valued Graph level inputs
# Find "unused_ops" : ops that are not connected to the output(s)
unused_ops = []
effectively_constant_ops = []
try:
print("Now finding ops in the TF graph that can be dropped for inference")
unused_ops, effectively_constant_ops = _find_unused_ops(OPS, sess, output_feature_names, input_feed_dict, input_feed_dict2) # return type: List[str], List[str]
except:
pass
# Populate SHAPE_DICT: Dictionary for all tensor blobs in the graph and their shapes
shapes_wanted = [] # list of output names
consts_wanted = []
for op in OPS:
for out in op.outputs:
shape = out.get_shape()
if not shape.is_fully_defined():
shapes_wanted.append((compat.as_str_any(out.name), out))
else:
SHAPE_DICT[compat.as_str_any(out.name)] = shape.as_list()
is_const = False
if op.type == 'Const':
is_const = True
if op.type == 'Dequantize' and op.name in effectively_constant_ops:
is_const = True
if is_const:
const = op.outputs[0]
consts_wanted.append((compat.as_str_any(const.name), const))
print('Collecting all the \'Const\' ops from the graph, by running it....')
if len(shapes_wanted) > 0 or len(consts_wanted) > 0:
tensor_names, tensors = zip(*(shapes_wanted+consts_wanted))
if len(consts_wanted) > 0:
const_tensor_names, _ = zip(*consts_wanted)
else:
const_tensor_names = []
tensors_evaluated = sess.run(tensors, feed_dict=input_feed_dict)
for i in range(len(tensor_names)):
if tensor_names[i] not in SHAPE_DICT:
SHAPE_DICT[tensor_names[i]] = list(tensors_evaluated[i].shape)
if tensor_names[i] in const_tensor_names and tensor_names[i] not in CONSTS:
CONSTS[tensor_names[i]] = tensors_evaluated[i]
print('Done.')
# Fill in output information
for op in OPS:
output_names = set([compat.as_str_any(x.name) for x in op.outputs])
if any(filter(output_names.__contains__, output_feature_names)):
# retrieve model outputs
for output in [x for x in op.outputs if x.name in output_feature_names]:
#infer shape for Core ML
tf_shape = SHAPE_DICT[compat.as_str_any(output.name)]
shape = _infer_coreml_output_shape(tf_shape)
out_name = output.name
if shape is None:
output_features.append(
(compat.as_str_any(out_name), None))
else:
output_features.append(
(compat.as_str_any(out_name), datatypes.Array(*shape)))
if len(output_features) != len(output_feature_names):
all_out_names_in_graph = [out_[0] for out_ in output_features]
for given_out_name in output_feature_names:
if given_out_name not in all_out_names_in_graph:
raise ValueError("output name: {}, was provided, but the Tensorflow graph does not contain a tensor with this name.".format(given_out_name))
if not add_custom_layers:
_check_unsupported_ops(OPS, output_feature_names, effectively_constant_ops + unused_ops)
print('Now starting translation to CoreML graph.')
# Load all the dictionaries in the object of the class "context"
context = Context(CONSTS, SHAPE_DICT, OPS, BLOB_GRAPH, output_features)
# Interpret Input shapes and fill in input information for Core ML
# (now that SHAPE_DICT and CONSTS are complete)
sequence_inputs = dict()
for input_tensor in input_feed_dict:
input_name = compat.as_str_any(input_tensor.name)
shape = SHAPE_DICT[input_name]
if context.use_dfs_shape_infer:
status = interpret_shape(input_name, context)
else:
status = False
if status:
print('Automatic shape interpretation succeeded for input blob %s' \
%(input_name))
shape = context.shape_dict_rank_4[input_name]
if len(shape) == 4 and shape[0] != 1:
sequence_inputs[input_name] = shape[0]
# if the consumer of input_tensor is an one-hot encoding op,
# treat it as a sequence.
consumer_op = input_tensor.consumers()[0]
if consumer_op.type == 'OneHot':
shape = [1,]
sequence_inputs[input_name] = -1
else:
shape = _infer_coreml_input_shape(shape)
input_features.append(
(compat.as_str_any(input_name), datatypes.Array(*shape)))
# Set classifier flag
is_classifier = class_labels is not None
mode = 'classifier' if is_classifier else None
# Convert the TF graph with builder
input_features = list(input_features)
output_features = list(output_features)
builder = NeuralNetworkBuilder(input_features, output_features, mode=mode)
context.builder = builder
context.session = sess
context.input_feed_dict = input_feed_dict
context.unused_ops = unused_ops
context.effectively_constant_ops = effectively_constant_ops
context.add_custom_layers = add_custom_layers
context.custom_conversion_functions = custom_conversion_functions
convert_ops_to_layers(context)
sess.close()
#optimizations on the nn spec
optimize_nn_spec(spec=builder.spec)
#Add a description for inputs that are sequences
for i, inputs in enumerate(builder.spec.description.input):
if inputs.name in sequence_inputs:
seq_length = sequence_inputs[inputs.name]
proto_shape = []
if inputs.type.HasField('multiArrayType'):
proto_shape = [int(s) for s in inputs.type.multiArrayType.shape]
if seq_length == -1:
msg = 'This input is a sequence'
if len(proto_shape):
msg += '. Feed it an MLMultiArray of shape {} at runtime'.format(str(['Seq_size', '1'] + proto_shape))
else:
msg = 'This input is a sequence of length ' + str(seq_length)
if len(proto_shape):
msg += '. Feed it an MLMultiArray of shape {} at runtime'.format(str([seq_length, 1] + proto_shape))
builder.spec.description.input[i].shortDescription = msg
# Add image input identifier
if image_input_names is not None and isinstance(
image_input_names, _string_types):
image_input_names = [image_input_names]
# Replace all input/output blob names with ":" to "__" for compatible
# auto-generated Objective C / Swift code
interface_blob_names = []
for idx, in_blob in enumerate(builder.spec.description.input):
interface_blob_names.append(in_blob.name)
builder.spec.description.input[idx].name = in_blob.name.replace(':', '__').replace('/', '__')
for idx, out_blob in enumerate(builder.spec.description.output):
interface_blob_names.append(out_blob.name)
builder.spec.description.output[idx].name = out_blob.name.replace(':', '__').replace('/', '__')
nn_spec = builder.nn_spec
for i, spec_layer in enumerate(nn_spec.layers):
for j, blob in enumerate(spec_layer.input):
name = spec_layer.input[j]
if name in interface_blob_names:
spec_layer.input[j] = name.replace(':', '__').replace('/', '__')
for j, blob in enumerate(spec_layer.output):
name = spec_layer.output[j]
if name in interface_blob_names:
spec_layer.output[j] = name.replace(':', '__').replace('/', '__')
# replace ':' and '/' in input names by '__'
def rename_input_dict_or_list(array):
if isinstance(array, list):
for i, name in enumerate(array):
array[i] = name.replace(':', '__').replace('/', '__')
elif isinstance(array, dict):
array = {name.replace(':','__').replace('/', '__'):value for name, value in array.items()}
return array
image_input_names = rename_input_dict_or_list(image_input_names)
is_bgr = rename_input_dict_or_list(is_bgr)
red_bias = rename_input_dict_or_list(red_bias)
blue_bias = rename_input_dict_or_list(blue_bias)
green_bias = rename_input_dict_or_list(green_bias)
gray_bias = rename_input_dict_or_list(gray_bias)
image_scale = rename_input_dict_or_list(image_scale)
# Add classifier classes (if applicable)
if is_classifier:
classes_in = class_labels
if isinstance(classes_in, _string_types):
import os
if not os.path.isfile(classes_in):
raise ValueError("Path to class labels (%s) does not exist." % \
classes_in)
with open(classes_in, 'r') as f:
classes = f.read()
classes = classes.splitlines()
elif type(classes_in) is list: # list[int or str]
classes = classes_in
else:
raise ValueError('Class labels must be a list of integers / strings,'\
' or a file path')
if predicted_feature_name is not None:
builder.set_class_labels(
classes, predicted_feature_name=predicted_feature_name,
prediction_blob=predicted_probabilities_output)
else:
builder.set_class_labels(classes)
# Set pre-processing parameters
builder.set_pre_processing_parameters(image_input_names=image_input_names,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale)
print("Translation to CoreML spec completed. Now compiling and saving the CoreML model.")
try:
import coremltools
if mlmodel_path is not None:
coremltools.models.utils.save_spec(builder.spec, mlmodel_path)
print("\n Core ML model generated. Saved at location: %s \n" % (mlmodel_path))
mlmodel = MLModel(builder.spec)
except RuntimeError as e:
raise ValueError('Compilation failed: {}'.format(str(e)))
print('Core ML input(s): \n', builder.spec.description.input)
print('Core ML output(s): \n', builder.spec.description.output)
# print information about all ops for which custom layers have been added
if len(context.ops_converted_to_custom_layers) > 0:
print('\n')
print("Custom layers have been added to the CoreML model "
"corresponding to the following ops in the TF graph: ")
for i, op in enumerate(context.ops_converted_to_custom_layers):
input_info = []
for input_ in op.inputs:
input_info.append((str(input_.name), context.shape_dict.get(input_.name, str("Shape not available"))))
output_info = []
for output_ in op.outputs:
output_info.append((str(output_.name), context.shape_dict.get(output_.name, str("Shape not available"))))
print("{}/{}: op type: {}, op input names and shapes: {}, op output names and shapes: {}".
format(i + 1, len(context.ops_converted_to_custom_layers), op.type, str(input_info), str(output_info)))
# Return the protobuf model
return mlmodel
def convert(tf_model_path,
mlmodel_path=None,
output_feature_names=None,
input_name_shape_dict=None,
image_input_names=None,
tf_image_format=None,
is_bgr=False,
red_bias=0.0,
green_bias=0.0,
blue_bias=0.0,
gray_bias=0.0,
image_scale=1.0,
class_labels=None,
predicted_feature_name=None,
predicted_probabilities_output='',
add_custom_layers=False, # type: bool
custom_conversion_functions={}, # type: Dict[Text, Any]
minimum_ios_deployment_target='12',
):
"""
Convert a frozen TensorFlow grpah (.pb format) to the CoreML format (.mlmodel)
Parameters
----------
tf_model_path: str
tf_model_path must be path to the frozen .pb model.
mlmodel_path: str
Path to where the generated .mlmodel will be stored
output_feature_names: [str]
List of strings. Names of the output tensors.
input_name_shape_dict: {str: [int]}
Dictionary of input tensor names and their corresponding shapes expressed
as a list of ints
image_input_names: [str] | str
Input names (a subset of the keys of input_name_shape_dict)
that can be treated as images by Core ML. All other inputs
are treated as MultiArrays.
tf_image_format: str
Optional. Specify either 'NCHW' or 'NHWC' to set or override the image format. Without this
field set, the image format may be determined from the input model. Only valid for
minimum_ios_deployment_target > '12'
is_bgr: bool | dict():
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
red_bias: float | dict()
Bias value to be added to the red channel of the input image, after applying scale.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image, after applying scale.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image, after applying scale.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale), after applying scale.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
filepath where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function.
add_custom_layers: bool
Flag to turn on addition of custom CoreML layers for unsupported TF ops or attributes within
a supported op.
custom_conversion_functions: dict(): {Text: func(**kwargs)}
Argument to provide user-defined functions for converting Tensorflow operations (op, for short).
A dictionary with keys corresponding to the names or types of the TF ops and values as handle to user-defined functions.
The keys can be either the type of the op or the name of the op. If former, then the function is called whenever the op
of that type is encountered during conversion. By using op names, specific ops can be targeted which is
useful for handling unsupported configuration in an op.
The function receives multiple arguments: TF operation, the CoreML Neural network builder object,
dictionary containing the op's inputs that are constants and their values (as numpy arrays).
The function can add custom layers or any other combination of CoreML layers to translate the TF op.
See "examples/custom_layer_examples.ipynb" jupyter-notebook for examples on using this argument.
minimum_ios_deployment_target: str
Minimum target deployment iOS version (default: '12'). Supported iOS version options: '11.2', '12'.
Core ML model produced by the converter will be compatible with the iOS version specified in this
argument and the versions after it. e.g., if minimum_ios_deployment_target='12', the converter would
only utilize layers released till iOS 12 (equivalently macOS 10.14, watchOS 5 etc.), and the produced
model can be deployed to iOS12+ (iOS 12, iOS 13 and later).
iOS 11.2 (Core ML 0.8): https://github.com/apple/coremltools/releases/tag/v0.8
iOS 12 (Core ML 2.0): https://github.com/apple/coremltools/releases/tag/v2.0
Returns
-------
model: MLModel
Model in Core ML format.
"""
if not SupportedVersion.ios_support_check(minimum_ios_deployment_target):
msg = '{} not supported. Please provide one of target iOS: {}\n'.format(minimum_ios_deployment_target, SupportedVersion.get_supported_ios())
msg += "For minimum deployment target iOS 13 and later, use Unified API 'coremltools.convert' introduced in coremltools 4.0"
raise TypeError(msg)
if input_name_shape_dict is None:
input_name_shape_dict = {}
if output_feature_names is None:
raise ValueError('Output feature names must be provided.')
if tf_image_format is not None:
warn('tf_image_format not honored when minimum_ios_deployment_target < 13')
return _convert_pb_to_mlmodel(
tf_model_path,
mlmodel_path,
output_feature_names,
input_name_shape_dict,
image_input_names=image_input_names,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
class_labels=class_labels,
predicted_feature_name=predicted_feature_name,
predicted_probabilities_output=predicted_probabilities_output,
add_custom_layers=add_custom_layers,
custom_conversion_functions=custom_conversion_functions)
|
|
"""Generated message classes for testing version v1.
Allows developers to run automated tests for their mobile applications on
Google infrastructure.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'testing'
class Account(_messages.Message):
"""Identifies an account and how to log into it
Fields:
googleAuto: An automatic google login account
"""
googleAuto = _messages.MessageField('GoogleAuto', 1)
class AndroidDevice(_messages.Message):
"""A single Android device.
Fields:
androidModelId: The id of the Android device to be used. Use the
EnvironmentDiscoveryService to get supported options. Required
androidVersionId: The id of the Android OS version to be used. Use the
EnvironmentDiscoveryService to get supported options. Required
locale: The locale the test device used for testing. Use the
EnvironmentDiscoveryService to get supported options. Required
orientation: How the device is oriented during the test. Use the
EnvironmentDiscoveryService to get supported options. Required
"""
androidModelId = _messages.StringField(1)
androidVersionId = _messages.StringField(2)
locale = _messages.StringField(3)
orientation = _messages.StringField(4)
class AndroidDeviceCatalog(_messages.Message):
"""The currently supported Android devices.
Fields:
models: The set of supported Android device models. @OutputOnly
runtimeConfiguration: The set of supported runtime configurations.
@OutputOnly
versions: The set of supported Android OS versions. @OutputOnly
"""
models = _messages.MessageField('AndroidModel', 1, repeated=True)
runtimeConfiguration = _messages.MessageField('AndroidRuntimeConfiguration', 2)
versions = _messages.MessageField('AndroidVersion', 3, repeated=True)
class AndroidInstrumentationTest(_messages.Message):
"""A test of an Android application that can control an Android component
independently of its normal lifecycle. Android instrumentation tests run an
application APK and test APK inside the same process on a virtual or
physical AndroidDevice. They also specify a test runner class, such as
com.google.GoogleTestRunner, which can vary on the specific instrumentation
framework chosen. See
<http://developer.android.com/tools/testing/testing_android.html> for more
information on types of Android tests.
Fields:
appApk: The APK for the application under test. Required
appPackageId: The java package for the application under test. Optional,
default is determined by examining the application's manifest.
testApk: The APK containing the test code to be executed. Required
testPackageId: The java package for the test to be executed. Optional,
default is determined by examining the application's manifest.
testRunnerClass: The InstrumentationTestRunner class. Optional, default is
determined by examining the application's manifest.
testTargets: Each target must be fully qualified with the package name or
class name, in one of these formats: - "package package_name" - "class
package_name.class_name" - "class package_name.class_name#method_name"
Optional, if empty, all targets in the module will be run.
"""
appApk = _messages.MessageField('FileReference', 1)
appPackageId = _messages.StringField(2)
testApk = _messages.MessageField('FileReference', 3)
testPackageId = _messages.StringField(4)
testRunnerClass = _messages.StringField(5)
testTargets = _messages.StringField(6, repeated=True)
class AndroidMatrix(_messages.Message):
"""A set of Android device configuration permutations is defined by the the
cross-product of the given axes. Internally, the given AndroidMatrix will
be expanded into a set of AndroidDevices. Only supported permutations will
be instantiated. Invalid permutations (e.g., incompatible models/versions)
are ignored.
Fields:
androidModelIds: The ids of the set of Android device to be used. Use the
EnvironmentDiscoveryService to get supported options. Required
androidVersionIds: The ids of the set of Android OS version to be used.
Use the EnvironmentDiscoveryService to get supported options. Required
locales: The set of locales the test device will enable for testing. Use
the EnvironmentDiscoveryService to get supported options. Required
orientations: The set of orientations to test with. Use the
EnvironmentDiscoveryService to get supported options. Required
"""
androidModelIds = _messages.StringField(1, repeated=True)
androidVersionIds = _messages.StringField(2, repeated=True)
locales = _messages.StringField(3, repeated=True)
orientations = _messages.StringField(4, repeated=True)
class AndroidModel(_messages.Message):
"""A description of an Android device tests may be run on.
Enums:
FormValueValuesEnum: Whether this device is virtual or physical.
@OutputOnly
Fields:
brand: The company that this device is branded with. Example: "Google",
"Samsung" @OutputOnly
codename: The name of the industrial design. This corresponds to
android.os.Build.DEVICE @OutputOnly
form: Whether this device is virtual or physical. @OutputOnly
id: The unique opaque id for this model. Use this for invoking the
TestExecutionService. @OutputOnly
manufacturer: The manufacturer of this device. @OutputOnly
name: The human-readable marketing name for this device model. Examples:
"Nexus 5", "Galaxy S5" @OutputOnly
screenDensity: Screen density in DPI. This corresponds to
ro.sf.lcd_density @OutputOnly
screenX: Screen size in the horizontal (X) dimension measured in pixels.
@OutputOnly
screenY: Screen size in the vertical (Y) dimension measured in pixels.
@OutputOnly
supportedAbis: The list of supported ABIs for this device. This
corresponds to either android.os.Build.SUPPORTED_ABIS (for API level 21
and above) or android.os.Build.CPU_ABI/CPU_ABI2. The most preferred ABI
is the first element in the list. Elements are optionally prefixed by
"version_id:" (where version_id is the id of an AndroidVersion),
denoting an ABI that is supported only on a particular version.
@OutputOnly
supportedVersionIds: The set of Android versions this device supports.
@OutputOnly
tags: Tags for this dimension. Examples: "default", "preview",
"deprecated"
"""
class FormValueValuesEnum(_messages.Enum):
"""Whether this device is virtual or physical. @OutputOnly
Values:
DEVICE_FORM_UNSPECIFIED: Do not use. For proto versioning only.
VIRTUAL: A software stack that simulates the device
PHYSICAL: Actual hardware
"""
DEVICE_FORM_UNSPECIFIED = 0
VIRTUAL = 1
PHYSICAL = 2
brand = _messages.StringField(1)
codename = _messages.StringField(2)
form = _messages.EnumField('FormValueValuesEnum', 3)
id = _messages.StringField(4)
manufacturer = _messages.StringField(5)
name = _messages.StringField(6)
screenDensity = _messages.IntegerField(7, variant=_messages.Variant.INT32)
screenX = _messages.IntegerField(8, variant=_messages.Variant.INT32)
screenY = _messages.IntegerField(9, variant=_messages.Variant.INT32)
supportedAbis = _messages.StringField(10, repeated=True)
supportedVersionIds = _messages.StringField(11, repeated=True)
tags = _messages.StringField(12, repeated=True)
class AndroidRoboTest(_messages.Message):
"""A test of an android application that explores the application on a
virtual or physical Android Device, finding culprits and crashes as it goes.
Fields:
appApk: The APK for the application under test. Required
appInitialActivity: The initial activity that should be used to start the
app. Optional
appPackageId: The java package for the application under test. Optional,
default is determined by examining the application's manifest.
maxDepth: The max depth of the traversal stack Robo can explore. Needs to
be at least 2 to make Robo explore the app beyond the first activity.
Default is 50. Optional
maxSteps: The max number of steps Robo can execute. Default is no limit.
Optional
roboDirectives: A set of directives Robo should apply during the crawl.
This allows users to customize the crawl. For example, the username and
password for a test account can be provided. Optional
"""
appApk = _messages.MessageField('FileReference', 1)
appInitialActivity = _messages.StringField(2)
appPackageId = _messages.StringField(3)
maxDepth = _messages.IntegerField(4, variant=_messages.Variant.INT32)
maxSteps = _messages.IntegerField(5, variant=_messages.Variant.INT32)
roboDirectives = _messages.MessageField('RoboDirective', 6, repeated=True)
class AndroidRuntimeConfiguration(_messages.Message):
"""Configuration that can be selected at the time a test is run.
Fields:
locales: The set of available locales. @OutputOnly
orientations: The set of available orientations. @OutputOnly
"""
locales = _messages.MessageField('Locale', 1, repeated=True)
orientations = _messages.MessageField('Orientation', 2, repeated=True)
class AndroidVersion(_messages.Message):
"""A version of the Android OS
Fields:
apiLevel: The API level for this Android version. Examples: 18, 19
@OutputOnly
codeName: The code name for this Android version. Examples: "JellyBean",
"KitKat" @OutputOnly
distribution: Market share for this version. @OutputOnly
id: An opaque id for this Android version. Use this id to invoke the
TestExecutionService. @OutputOnly
releaseDate: The date this Android version became available in the market.
@OutputOnly
tags: Tags for this dimension. Examples: "default", "preview",
"deprecated"
versionString: A string representing this version of the Android OS.
Examples: "4.3", "4.4" @OutputOnly
"""
apiLevel = _messages.IntegerField(1, variant=_messages.Variant.INT32)
codeName = _messages.StringField(2)
distribution = _messages.MessageField('Distribution', 3)
id = _messages.StringField(4)
releaseDate = _messages.MessageField('Date', 5)
tags = _messages.StringField(6, repeated=True)
versionString = _messages.StringField(7)
class CancelTestMatrixResponse(_messages.Message):
"""Response containing the current state of the specified test matrix.
Enums:
TestStateValueValuesEnum: The current rolled-up state of the test matrix.
If this state is already final, then the cancelation request will have
no effect.
Fields:
testState: The current rolled-up state of the test matrix. If this state
is already final, then the cancelation request will have no effect.
"""
class TestStateValueValuesEnum(_messages.Enum):
"""The current rolled-up state of the test matrix. If this state is
already final, then the cancelation request will have no effect.
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution is currently being processed. Can only be set on
an execution.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution. Can only be set on an
execution.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
testState = _messages.EnumField('TestStateValueValuesEnum', 1)
class ClientInfo(_messages.Message):
"""Information about the client which invoked the test.
Fields:
name: Client name, such as gcloud. Required
"""
name = _messages.StringField(1)
class Date(_messages.Message):
"""Represents a whole calendar date, e.g. date of birth. The time of day and
time zone are either specified elsewhere or are not significant. The date is
relative to the Proleptic Gregorian Calendar. The day may be 0 to represent
a year and month where the day is not significant, e.g. credit card
expiration date. The year may be 0 to represent a month and day independent
of year, e.g. anniversary date. Related types are google.type.TimeOfDay and
`google.protobuf.Timestamp`.
Fields:
day: Day of month. Must be from 1 to 31 and valid for the year and month,
or 0 if specifying a year/month where the day is not significant.
month: Month of year. Must be from 1 to 12.
year: Year of date. Must be from 1 to 9999, or 0 if specifying a date
without a year.
"""
day = _messages.IntegerField(1, variant=_messages.Variant.INT32)
month = _messages.IntegerField(2, variant=_messages.Variant.INT32)
year = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class DeviceFile(_messages.Message):
"""A single device file description.
Fields:
obbFile: A reference to an opaque binary blob file
"""
obbFile = _messages.MessageField('ObbFile', 1)
class Distribution(_messages.Message):
"""Data about the relative number of devices running a given configuration
of the Android platform.
Fields:
marketShare: The estimated fraction (0-1) of the total market with this
configuration. @OutputOnly
measurementTime: The time this distribution was measured. @OutputOnly
"""
marketShare = _messages.FloatField(1)
measurementTime = _messages.StringField(2)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class Environment(_messages.Message):
"""The environment in which the test is run.
Fields:
androidDevice: An Android device which must be used with an Android test.
"""
androidDevice = _messages.MessageField('AndroidDevice', 1)
class EnvironmentMatrix(_messages.Message):
"""The matrix of environments in which the test is to be executed.
Fields:
androidMatrix: A matrix of Android devices.
"""
androidMatrix = _messages.MessageField('AndroidMatrix', 1)
class EnvironmentVariable(_messages.Message):
"""A key-value pair passed as an environment variable to the test
Fields:
key: Key for the environment variable
value: Value for the environment variable
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class FileReference(_messages.Message):
"""A reference to a file, used for user inputs.
Fields:
gcsPath: A path to a file in Google Cloud Storage. Example: gs://build-
app-1414623860166/app-debug-unaligned.apk
"""
gcsPath = _messages.StringField(1)
class GoogleAuto(_messages.Message):
"""Enables automatic Google account login. If set, the service will
automatically generate a Google test account and add it to the device,
before executing the test. Note that test accounts might be reused. Many
applications show their full set of functionalities when an account is
present on the device. Logging into the device with these generated accounts
allows testing more functionalities.
"""
class GoogleCloudStorage(_messages.Message):
"""A storage location within Google cloud storage (GCS).
Fields:
gcsPath: The path to a directory in GCS that will eventually contain the
results for this test. The requesting user must have write access on the
bucket in the supplied path. Required
"""
gcsPath = _messages.StringField(1)
class ListTestMatricesResponse(_messages.Message):
"""Response contain a list of Test Matrices.
Fields:
testMatrices: The set of test matrices.
"""
testMatrices = _messages.MessageField('TestMatrix', 1, repeated=True)
class Locale(_messages.Message):
"""A location/region designation for language.
Fields:
id: The id for this locale. Example: "en_US" @OutputOnly
name: A human-friendly name for this language/locale. Example: "English"
@OutputOnly
region: A human-friendy string representing the region for this locale.
Example: "United States" Not present for every locale. @OutputOnly
tags: Tags for this dimension. Examples: "default"
"""
id = _messages.StringField(1)
name = _messages.StringField(2)
region = _messages.StringField(3)
tags = _messages.StringField(4, repeated=True)
class ObbFile(_messages.Message):
"""An opaque binary blob file to install on the device before the test
starts
Fields:
obb: Opaque Binary Blob (OBB) file(s) to install on the device Required
obbFileName: OBB file name which must conform to the format as specified
by Android e.g. [main|patch].0300110.com.example.android.obb which will
be installed into <shared-storage>/Android/obb/<package-name>/ on the
device Required
"""
obb = _messages.MessageField('FileReference', 1)
obbFileName = _messages.StringField(2)
class Orientation(_messages.Message):
"""Screen orientation of the device.
Fields:
id: The id for this orientation. Example: "portrait" @OutputOnly
name: A human-friendly name for this orientation. Example: "portrait"
@OutputOnly
tags: Tags for this dimension. Examples: "default"
"""
id = _messages.StringField(1)
name = _messages.StringField(2)
tags = _messages.StringField(3, repeated=True)
class ResultStorage(_messages.Message):
"""Locations where the results of running the test are stored.
Fields:
googleCloudStorage: Required.
toolResultsExecution: The tool results execution that results are written
to. @OutputOnly
toolResultsHistory: The tool results history that contains the tool
results execution that results are written to. Optional, if not
provided the service will choose an appropriate value.
"""
googleCloudStorage = _messages.MessageField('GoogleCloudStorage', 1)
toolResultsExecution = _messages.MessageField('ToolResultsExecution', 2)
toolResultsHistory = _messages.MessageField('ToolResultsHistory', 3)
class RoboDirective(_messages.Message):
"""Directs Robo to interact with a specific UI element if it is encountered
during the crawl. Currently, Robo can set text in text fields.
Fields:
inputText: The text that Robo is directed to set. Required
resourceName: The android resource name of the target UI element For
example, in Java: R.string.foo in xml: @string/foo Only the \u201cfoo\u201d
part is needed. Reference doc:
https://developer.android.com/guide/topics/resources/accessing-
resources.html Required
"""
inputText = _messages.StringField(1)
resourceName = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class TestDetails(_messages.Message):
"""Additional details about the progress of the running test.
Fields:
errorMessage: If the TestState is ERROR, then this string will contain
human-readable details about the error. @OutputOnly
progressMessages: Human-readable, detailed descriptions of the test's
progress. For example: "Provisioning a device", "Starting Test". During
the course of execution new data may be appended to the end of
progress_messages. @OutputOnly
"""
errorMessage = _messages.StringField(1)
progressMessages = _messages.StringField(2, repeated=True)
class TestEnvironmentCatalog(_messages.Message):
"""A description of a test environment.
Fields:
androidDeviceCatalog: Android devices suitable for running Android
Instrumentation Tests.
"""
androidDeviceCatalog = _messages.MessageField('AndroidDeviceCatalog', 1)
class TestExecution(_messages.Message):
"""Specifies a single test to be executed in a single environment.
Enums:
StateValueValuesEnum: Indicates the current progress of the test execution
(e.g., FINISHED). @OutputOnly
Fields:
environment: How the host machine(s) are configured. @OutputOnly
id: Unique id set by the backend. @OutputOnly
matrixId: Id of the containing TestMatrix. @OutputOnly
projectId: The cloud project that owns the test execution. @OutputOnly
state: Indicates the current progress of the test execution (e.g.,
FINISHED). @OutputOnly
testDetails: Additional details about the running test. @OutputOnly
testSpecification: How to run the test. @OutputOnly
timestamp: The time this test execution was initially created. @OutputOnly
toolResultsStep: Where the results for this execution are written.
@OutputOnly
"""
class StateValueValuesEnum(_messages.Enum):
"""Indicates the current progress of the test execution (e.g., FINISHED).
@OutputOnly
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution is currently being processed. Can only be set on
an execution.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution. Can only be set on an
execution.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
environment = _messages.MessageField('Environment', 1)
id = _messages.StringField(2)
matrixId = _messages.StringField(3)
projectId = _messages.StringField(4)
state = _messages.EnumField('StateValueValuesEnum', 5)
testDetails = _messages.MessageField('TestDetails', 6)
testSpecification = _messages.MessageField('TestSpecification', 7)
timestamp = _messages.StringField(8)
toolResultsStep = _messages.MessageField('ToolResultsStep', 9)
class TestMatrix(_messages.Message):
"""A group of one or more TestExecutions, built by taking a product of
values over a pre-defined set of axes.
Enums:
InvalidMatrixDetailsValueValuesEnum: Describes why the matrix is
considered invalid. Only useful for matrices in the INVALID state.
@OutputOnly
StateValueValuesEnum: Indicates the current progress of the test matrix
(e.g., FINISHED) @OutputOnly
Fields:
clientInfo: Information about the client which invoked the test. Optional
environmentMatrix: How the host machine(s) are configured. Required
invalidMatrixDetails: Describes why the matrix is considered invalid. Only
useful for matrices in the INVALID state. @OutputOnly
projectId: The cloud project that owns the test matrix. @OutputOnly
resultStorage: Where the results for the matrix are written. Required
state: Indicates the current progress of the test matrix (e.g., FINISHED)
@OutputOnly
testExecutions: The list of test executions that the service creates for
this matrix. @OutputOnly
testMatrixId: Unique id set by the service. @OutputOnly
testSpecification: How to run the test. Required
timestamp: The time this test matrix was initially created. @OutputOnly
"""
class InvalidMatrixDetailsValueValuesEnum(_messages.Enum):
"""Describes why the matrix is considered invalid. Only useful for
matrices in the INVALID state. @OutputOnly
Values:
INVALID_MATRIX_DETAILS_UNSPECIFIED: Do not use. For proto versioning
only.
DETAILS_UNAVAILABLE: The matrix is INVALID, but there are no further
details available.
MALFORMED_APK: The input app APK could not be parsed.
MALFORMED_TEST_APK: The input test APK could not be parsed.
NO_MANIFEST: The AndroidManifest.xml could not be found.
NO_PACKAGE_NAME: The APK manifest does not declare a package name.
TEST_SAME_AS_APP: The test package and app package are the same.
NO_INSTRUMENTATION: The test apk does not declare an instrumentation.
NO_LAUNCHER_ACTIVITY: A main launcher activity could not be found.
FORBIDDEN_PERMISSIONS: The app declares one or more permissions that are
not allowed.
INVALID_ROBO_DIRECTIVES: There is a conflict in the provided
robo_directives.
"""
INVALID_MATRIX_DETAILS_UNSPECIFIED = 0
DETAILS_UNAVAILABLE = 1
MALFORMED_APK = 2
MALFORMED_TEST_APK = 3
NO_MANIFEST = 4
NO_PACKAGE_NAME = 5
TEST_SAME_AS_APP = 6
NO_INSTRUMENTATION = 7
NO_LAUNCHER_ACTIVITY = 8
FORBIDDEN_PERMISSIONS = 9
INVALID_ROBO_DIRECTIVES = 10
class StateValueValuesEnum(_messages.Enum):
"""Indicates the current progress of the test matrix (e.g., FINISHED)
@OutputOnly
Values:
TEST_STATE_UNSPECIFIED: Do not use. For proto versioning only.
VALIDATING: The execution or matrix is being validated.
PENDING: The execution or matrix is waiting for resources to become
available.
RUNNING: The execution is currently being processed. Can only be set on
an execution.
FINISHED: The execution or matrix has terminated normally. On a matrix
this means that the matrix level processing completed normally, but
individual executions may be in an ERROR state.
ERROR: The execution or matrix has stopped because it encountered an
infrastructure failure.
UNSUPPORTED_ENVIRONMENT: The execution was not run because it
corresponds to a unsupported environment. Can only be set on an
execution.
INCOMPATIBLE_ENVIRONMENT: The execution was not run because the provided
inputs are incompatible with the requested environment. Example:
requested AndroidVersion is lower than APK's minSdkVersion Can only
be set on an execution.
INCOMPATIBLE_ARCHITECTURE: The execution was not run because the
provided inputs are incompatible with the requested architecture.
Example: requested device does not support running the native code in
the supplied APK Can only be set on an execution.
CANCELLED: The user cancelled the execution. Can only be set on an
execution.
INVALID: The execution or matrix was not run because the provided inputs
are not valid. Examples: input file is not of the expected type, is
malformed/corrupt, or was flagged as malware
"""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 1
PENDING = 2
RUNNING = 3
FINISHED = 4
ERROR = 5
UNSUPPORTED_ENVIRONMENT = 6
INCOMPATIBLE_ENVIRONMENT = 7
INCOMPATIBLE_ARCHITECTURE = 8
CANCELLED = 9
INVALID = 10
clientInfo = _messages.MessageField('ClientInfo', 1)
environmentMatrix = _messages.MessageField('EnvironmentMatrix', 2)
invalidMatrixDetails = _messages.EnumField('InvalidMatrixDetailsValueValuesEnum', 3)
projectId = _messages.StringField(4)
resultStorage = _messages.MessageField('ResultStorage', 5)
state = _messages.EnumField('StateValueValuesEnum', 6)
testExecutions = _messages.MessageField('TestExecution', 7, repeated=True)
testMatrixId = _messages.StringField(8)
testSpecification = _messages.MessageField('TestSpecification', 9)
timestamp = _messages.StringField(10)
class TestSetup(_messages.Message):
"""A description of how to set up the device prior to running the test
Fields:
account: The device will be logged in on this account for the duration of
the test. Optional
directoriesToPull: The directories on the device to upload to GCS at the
end of the test; they must be absolute, whitelisted paths. Refer to
RegularFile for whitelisted paths. Optional
environmentVariables: Environment variables to set for the test.
filesToPush: Optional
"""
account = _messages.MessageField('Account', 1)
directoriesToPull = _messages.StringField(2, repeated=True)
environmentVariables = _messages.MessageField('EnvironmentVariable', 3, repeated=True)
filesToPush = _messages.MessageField('DeviceFile', 4, repeated=True)
class TestSpecification(_messages.Message):
"""A description of how to run the test.
Fields:
androidInstrumentationTest: An Android instrumentation test.
androidRoboTest: An Android robo test.
autoGoogleLogin: Enables automatic Google account login. If set, the
service will automatically generate a Google test account and add it to
the device, before executing the test. Note that test accounts might be
reused. Many applications show their full set of functionalities when an
account is present on the device. Logging into the device with these
generated accounts allows testing more functionalities. Default is
false. Optional
testSetup: Test setup requirements e.g. files to install, bootstrap
scripts Optional
testTimeout: Max time a test execution is allowed to run before it is
automatically cancelled. Optional, default is 5 min.
"""
androidInstrumentationTest = _messages.MessageField('AndroidInstrumentationTest', 1)
androidRoboTest = _messages.MessageField('AndroidRoboTest', 2)
autoGoogleLogin = _messages.BooleanField(3)
testSetup = _messages.MessageField('TestSetup', 4)
testTimeout = _messages.StringField(5)
class TestingProjectsTestMatricesCancelRequest(_messages.Message):
"""A TestingProjectsTestMatricesCancelRequest object.
Fields:
projectId: Cloud project that owns the test.
testMatrixId: Test matrix that will be canceled.
"""
projectId = _messages.StringField(1, required=True)
testMatrixId = _messages.StringField(2, required=True)
class TestingProjectsTestMatricesCreateRequest(_messages.Message):
"""A TestingProjectsTestMatricesCreateRequest object.
Fields:
projectId: The GCE project under which this job will run.
requestId: A string id used to detect duplicated requests. Ids are
automatically scoped to a project, so users should ensure the ID is
unique per-project. A UUID is recommended. Optional, but strongly
recommended.
testMatrix: A TestMatrix resource to be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
requestId = _messages.StringField(2)
testMatrix = _messages.MessageField('TestMatrix', 3)
class TestingProjectsTestMatricesDeleteRequest(_messages.Message):
"""A TestingProjectsTestMatricesDeleteRequest object.
Fields:
projectId: Cloud project that owns the test.
testMatrixId: Test matrix that will be canceled.
"""
projectId = _messages.StringField(1, required=True)
testMatrixId = _messages.StringField(2, required=True)
class TestingProjectsTestMatricesGetRequest(_messages.Message):
"""A TestingProjectsTestMatricesGetRequest object.
Fields:
projectId: Cloud project that owns the test matrix.
testMatrixId: Unique test matrix id which was assigned by the service.
"""
projectId = _messages.StringField(1, required=True)
testMatrixId = _messages.StringField(2, required=True)
class TestingProjectsTestMatricesListRequest(_messages.Message):
"""A TestingProjectsTestMatricesListRequest object.
Fields:
projectId: Cloud project that owns the tests.
"""
projectId = _messages.StringField(1, required=True)
class TestingTestEnvironmentCatalogGetRequest(_messages.Message):
"""A TestingTestEnvironmentCatalogGetRequest object.
Enums:
EnvironmentTypeValueValuesEnum: The type of environment that should be
listed.
Fields:
environmentType: The type of environment that should be listed.
"""
class EnvironmentTypeValueValuesEnum(_messages.Enum):
"""The type of environment that should be listed.
Values:
ENVIRONMENT_TYPE_UNSPECIFIED: <no description>
ANDROID: <no description>
"""
ENVIRONMENT_TYPE_UNSPECIFIED = 0
ANDROID = 1
environmentType = _messages.EnumField('EnvironmentTypeValueValuesEnum', 1, required=True)
class ToolResultsExecution(_messages.Message):
"""Represents a tool results execution resource. This has the results of a
TestMatrix.
Fields:
executionId: A tool results execution ID. @OutputOnly
historyId: A tool results history ID. @OutputOnly
projectId: The cloud project that owns the tool results execution.
@OutputOnly
"""
executionId = _messages.StringField(1)
historyId = _messages.StringField(2)
projectId = _messages.StringField(3)
class ToolResultsHistory(_messages.Message):
"""Represents a tool results history resource.
Fields:
historyId: A tool results history ID. Required
projectId: The cloud project that owns the tool results history. Required
"""
historyId = _messages.StringField(1)
projectId = _messages.StringField(2)
class ToolResultsStep(_messages.Message):
"""Represents a tool results step resource. This has the results of a
TestExecution.
Fields:
executionId: A tool results execution ID. @OutputOnly
historyId: A tool results history ID. @OutputOnly
projectId: The cloud project that owns the tool results step. @OutputOnly
stepId: A tool results step ID. @OutputOnly
"""
executionId = _messages.StringField(1)
historyId = _messages.StringField(2)
projectId = _messages.StringField(3)
stepId = _messages.StringField(4)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'testing')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'testing')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'testing')
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Violin Memory tests for common driver functions
by Ryan Lucio
Senior Software Engineer
Violin Memory
Note: python documentation for unit testing can be found at
http://docs.python.org/2/library/unittest.html
Note: cinder documentation for development can be found at
http://docs.openstack.org/developer/cinder/devref/development.environment.html
"""
import mox
import unittest
# TODO(rdl): import and use test utils (cinder.tests.utils)
from cinder import context
from cinder.db.sqlalchemy import models
from cinder.volume import volume_types
from cinder.volume.drivers.violin.vxg.core.session import XGSession
from cinder.volume.drivers.violin.vxg.vshare.igroup import IGroupManager
from cinder.volume.drivers.violin.vxg.vshare.iscsi import ISCSIManager
from cinder.volume.drivers.violin.vxg.vshare.lun import LUNManager_3 as LUNManager
from cinder.volume.drivers.violin.vxg.vshare.snapshot import SnapshotManager
from cinder.volume.drivers.violin.vxg.vshare.vshare import VShare
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v6000_common
class testV6000Common(unittest.TestCase):
"""A test class for the VMEM V6000 common driver module."""
def setUp(self):
self.m = mox.Mox()
self.m_conn = self.m.CreateMock(VShare)
self.m_conn.basic = self.m.CreateMock(XGSession)
self.m_conn.lun = self.m.CreateMock(LUNManager)
self.m_conn.iscsi = self.m.CreateMock(ISCSIManager)
self.m_conn.igroup = self.m.CreateMock(IGroupManager)
self.m_conn.snapshot = self.m.CreateMock(SnapshotManager)
self.m_conn.version = '1.1.1'
self.config = mox.MockObject(conf.Configuration)
self.config.append_config_values(mox.IgnoreArg())
self.config.gateway_vip = '1.1.1.1'
self.config.gateway_mga = '2.2.2.2'
self.config.gateway_mgb = '3.3.3.3'
self.config.gateway_user = 'admin'
self.config.gateway_password = ''
self.config.volume_backend_name = 'violin'
self.config.use_igroups = False
self.config.use_thin_luns = False
self.config.san_is_local = False
self.driver = v6000_common.V6000CommonDriver(configuration=self.config)
self.driver.vmem_vip = self.m_conn
self.driver.vmem_mga = self.m_conn
self.driver.vmem_mgb = self.m_conn
self.driver.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.stats = {}
self.driver.gateway_fc_wwns = ['wwn.21:00:00:24:ff:45:fb:22',
'wwn.21:00:00:24:ff:45:fb:23',
'wwn.21:00:00:24:ff:45:f1:be',
'wwn.21:00:00:24:ff:45:f1:bf',
'wwn.21:00:00:24:ff:45:e2:30',
'wwn.21:00:00:24:ff:45:e2:31',
'wwn.21:00:00:24:ff:45:e2:5e',
'wwn.21:00:00:24:ff:45:e2:5f']
self.volume1 = mox.MockObject(models.Volume)
self.volume1.id = '3d31af29-6d7d-443f-b451-6f0040d3c9a9'
self.volume1.size = 1
self.volume2 = mox.MockObject(models.Volume)
self.volume2.id = '4c1af784-b328-43d2-84c8-db02158b922d'
self.volume2.size = 2
self.snapshot1 = mox.MockObject(models.Snapshot)
self.snapshot1.name = 'snap-01'
self.snapshot1.snapshot_id = 'f8849c41-6d72-4f5a-8339-2cd6b52b5e5a'
self.snapshot1.volume_id = 1
self.snapshot1.volume_name = 'vol-01'
self.snapshot2 = mox.MockObject(models.Snapshot)
self.snapshot2.name = 'snap-02'
self.snapshot2.snapshot_id = '23e44fad-8840-46f1-99d3-5605a08fb289'
self.snapshot2.volume_id = 2
self.snapshot2.volume_name = 'vol-02'
def tearDown(self):
self.m.UnsetStubs()
def testCheckForSetupError(self):
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
"/threshold_hard_val" % self.driver.container)
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
"/threshold_hard_val" % self.driver.container)
bn_thresholds = {bn1: 0, bn2: 100}
self.m.StubOutWithMock(self.driver, '_is_supported_vmos_version')
self.driver._is_supported_vmos_version(mox.IsA(str)).AndReturn(True)
self.m_conn.basic.get_node_values([bn1, bn2]).AndReturn(bn_thresholds)
self.m.ReplayAll()
self.assertTrue(self.driver.check_for_setup_error() is None)
self.m.VerifyAll()
def testCheckForSetupError_NoContainer(self):
'''Container name is empty.'''
self.driver.container = ""
self.assertRaises(v6000_common.InvalidBackendConfig,
self.driver.check_for_setup_error)
def testCheckForSetupError_InvalidUsedSpaceThreshold(self):
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
"/threshold_hard_val" % self.driver.container)
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
"/threshold_hard_val" % self.driver.container)
bn_thresholds = {bn1: 99, bn2: 100}
self.m.StubOutWithMock(self.driver, '_is_supported_vmos_version')
self.driver._is_supported_vmos_version(mox.IsA(str)).AndReturn(True)
self.m_conn.basic.get_node_values([bn1, bn2]).AndReturn(bn_thresholds)
self.m.ReplayAll()
self.assertRaises(v6000_common.InvalidBackendConfig,
self.driver.check_for_setup_error)
self.m.VerifyAll()
def testCheckForSetupError_InvalidProvisionedSpaceThreshold(self):
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
"/threshold_hard_val" % self.driver.container)
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
"/threshold_hard_val" % self.driver.container)
bn_thresholds = {bn1: 0, bn2: 99}
self.m.StubOutWithMock(self.driver, '_is_supported_vmos_version')
self.driver._is_supported_vmos_version(mox.IsA(str)).AndReturn(True)
self.m_conn.basic.get_node_values([bn1, bn2]).AndReturn(bn_thresholds)
self.m.ReplayAll()
self.assertRaises(v6000_common.InvalidBackendConfig,
self.driver.check_for_setup_error)
self.m.VerifyAll()
def testCreateVolume(self):
volume = self.volume1
self.m.StubOutWithMock(self.driver, '_create_lun')
self.driver._create_lun(volume)
self.m.ReplayAll()
self.assertTrue(self.driver.create_volume(volume) is None)
self.m.VerifyAll()
def testDeleteVolume(self):
volume = self.volume1
self.m.StubOutWithMock(self.driver, '_delete_lun')
self.driver._delete_lun(volume)
self.m.ReplayAll()
self.assertTrue(self.driver.delete_volume(volume) is None)
self.m.VerifyAll()
def testCreateSnapshot(self):
snapshot = self.snapshot1
self.m.StubOutWithMock(self.driver, '_create_lun_snapshot')
self.driver._create_lun_snapshot(snapshot)
self.m.ReplayAll()
self.assertTrue(self.driver.create_snapshot(snapshot) is None)
self.m.VerifyAll()
def testDeleteSnapshot(self):
snapshot = self.snapshot1
self.m.StubOutWithMock(self.driver, '_delete_lun_snapshot')
self.driver._delete_lun_snapshot(snapshot)
self.m.ReplayAll()
self.assertTrue(self.driver.delete_snapshot(snapshot) is None)
self.m.VerifyAll()
#def testCreateVolumeFromSnapshot(self):
# src_snap = self.snapshot1
# dest_vol = self.volume2
# self.m.StubOutWithMock(self.driver, '_create_lun')
# self.m.StubOutWithMock(self.driver, 'copy_volume_data')
# self.driver._create_lun(dest_vol)
# self.driver.copy_volume_data(self.driver.context, dest_vol, src_snap)
# self.m.ReplayAll()
# self.assertTrue(self.driver.create_volume_from_snapshot
# (dest_vol, src_snap) is None)
# self.m.VerifyAll()
def testCreateClonedVolume(self):
src_vol = self.volume1
dest_vol = self.volume2
self.m.StubOutWithMock(self.driver, '_create_lun')
self.m.StubOutWithMock(self.driver, 'copy_volume_data')
self.driver._create_lun(dest_vol)
self.driver.copy_volume_data(self.driver.context, src_vol, dest_vol)
self.m.ReplayAll()
self.assertTrue(self.driver.create_cloned_volume
(src_vol, dest_vol) is None)
self.m.VerifyAll()
def testExtendVolume(self):
volume = self.volume1
new_volume_size = 10
response = {'code': 0, 'message': 'Success '}
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.driver._send_cmd(self.m_conn.lun.resize_lun,
mox.IsA(str),
self.driver.container, volume['id'],
new_volume_size).AndReturn(response)
self.m.ReplayAll()
self.assertTrue(self.driver.extend_volume(volume, new_volume_size)
is None)
self.m.VerifyAll()
def testExtendVolume_NewSizeIsTooSmall(self):
volume = self.volume1
new_volume_size = 0
response = {'code': 14036, 'message': 'Failure'}
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.driver._send_cmd(self.m_conn.lun.resize_lun,
mox.IsA(str),
self.driver.container, volume['id'],
new_volume_size
).AndRaise(v6000_common.ViolinBackendErr())
self.m.ReplayAll()
self.assertRaises(v6000_common.ViolinBackendErr,
self.driver.extend_volume, volume, new_volume_size)
self.m.VerifyAll()
def testCreateLun(self):
volume = self.volume1
response = {'code': 0, 'message': 'LUN create: success!'}
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.driver._send_cmd(self.m_conn.lun.create_lun,
mox.IsA(str),
self.driver.container, volume['id'],
volume['size'], 1, "0", "0", "w", 1,
512, False, False, None).AndReturn(response)
self.m.ReplayAll()
self.assertTrue(self.driver._create_lun(volume) is None)
self.m.VerifyAll()
def testCreateLun_LunAlreadyExists(self):
volume = self.volume1
response = {'code': 0, 'message': 'LUN create: success!'}
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.driver._send_cmd(self.m_conn.lun.create_lun,
mox.IsA(str),
self.driver.container, volume['id'],
volume['size'], 1, "0", "0", "w", 1,
512, False, False, None
).AndRaise(v6000_common.ViolinBackendErrExists())
self.m.ReplayAll()
self.assertTrue(self.driver._create_lun(volume) is None)
self.m.VerifyAll()
def testCreateLun_CreateFailsWithException(self):
volume = self.volume1
response = {'code': 0, 'message': 'LUN create: success!'}
exception = v6000_common.ViolinBackendErr
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.driver._send_cmd(self.m_conn.lun.create_lun, mox.IsA(str),
self.driver.container, volume['id'],
volume['size'], 1, "0", "0", "w", 1,
512, False, False, None
).AndRaise(exception('failed'))
self.m.ReplayAll()
self.assertRaises(exception, self.driver._create_lun, volume)
self.m.VerifyAll()
def testDeleteLun(self):
volume = self.volume1
response = {'code': 0, 'message': 'lun deletion started'}
success_msgs = ['lun deletion started', '']
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.m.StubOutWithMock(self.driver.lun_tracker,
'free_lun_id_for_volume')
self.driver._send_cmd(self.m_conn.lun.bulk_delete_luns,
success_msgs,
self.driver.container,
volume['id']).AndReturn(response)
self.driver.lun_tracker.free_lun_id_for_volume(volume)
self.m.ReplayAll()
self.assertTrue(self.driver._delete_lun(volume) is None)
self.m.VerifyAll()
def testDeleteLun_EmptyResponseMessage(self):
volume = self.volume1
response = {'code': 0, 'message': ''}
success_msgs = ['lun deletion started', '']
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.m.StubOutWithMock(self.driver.lun_tracker,
'free_lun_id_for_volume')
self.driver._send_cmd(self.m_conn.lun.bulk_delete_luns,
success_msgs,
self.driver.container,
volume['id']).AndReturn(response)
self.driver.lun_tracker.free_lun_id_for_volume(volume)
self.m.ReplayAll()
self.assertTrue(self.driver._delete_lun(volume) is None)
self.m.VerifyAll()
def testDeleteLun_LunAlreadyDeleted(self):
volume = self.volume1
response = {'code': 0, 'message': 'lun deletion started'}
success_msgs = ['lun deletion started', '']
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.m.StubOutWithMock(self.driver.lun_tracker,
'free_lun_id_for_volume')
self.driver._send_cmd(self.m_conn.lun.bulk_delete_luns,
success_msgs,
self.driver.container,
volume['id']
).AndRaise(v6000_common.ViolinBackendErrNotFound)
self.driver.lun_tracker.free_lun_id_for_volume(volume)
self.m.ReplayAll()
self.assertTrue(self.driver._delete_lun(volume) is None)
self.m.VerifyAll()
def testDeleteLun_DeleteFailsWithException(self):
volume = self.volume1
response = {'code': 0, 'message': 'lun deletion started'}
success_msgs = ['lun deletion started', '']
exception = v6000_common.ViolinBackendErr
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.driver._send_cmd(self.m_conn.lun.bulk_delete_luns,
success_msgs,
self.driver.container, volume['id']
).AndRaise(exception('failed!'))
self.m.ReplayAll()
self.assertRaises(exception, self.driver._delete_lun, volume)
self.m.VerifyAll()
def testCreateLunSnapshot(self):
snapshot = self.snapshot1
response = {'code': 0, 'message': 'success'}
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.driver._send_cmd(self.m_conn.snapshot.create_lun_snapshot,
mox.IsA(str),
self.driver.container,
snapshot['volume_id'],
snapshot['id']).AndReturn(response)
self.m.ReplayAll()
self.assertTrue(self.driver._create_lun_snapshot(snapshot) is None)
self.m.VerifyAll()
def testDeleteLunSnapshot(self):
snapshot = self.snapshot1
response = {'code': 0, 'message': 'success'}
self.m.StubOutWithMock(self.driver, '_send_cmd')
self.m.StubOutWithMock(self.driver.lun_tracker,
'free_lun_id_for_snapshot')
self.driver._send_cmd(self.m_conn.snapshot.delete_lun_snapshot,
mox.IsA(str),
self.driver.container,
snapshot['volume_id'],
snapshot['id']).AndReturn(response)
self.driver.lun_tracker.free_lun_id_for_snapshot(snapshot)
self.m.ReplayAll()
self.assertTrue(self.driver._delete_lun_snapshot(snapshot) is None)
self.m.VerifyAll()
def testSendCmd(self):
request_func = self.m.CreateMockAnything()
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response = {'code': 0, 'message': 'success'}
request_func(request_args).AndReturn(response)
self.m.ReplayAll()
self.assertEqual(self.driver._send_cmd
(request_func, success_msg, request_args),
response)
self.m.VerifyAll()
def testSendCmd_RequestTimedout(self):
'''The retry timeout is hit.'''
request_func = self.m.CreateMockAnything()
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
self.driver.request_timeout = 0
self.m.ReplayAll()
self.assertRaises(v6000_common.RequestRetryTimeout,
self.driver._send_cmd,
request_func, success_msg, request_args)
self.m.VerifyAll()
def testSendCmd_ResponseHasNoMessage(self):
'''The callback response dict has a NULL message field.'''
request_func = self.m.CreateMockAnything()
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response1 = {'code': 0, 'message': None}
response2 = {'code': 0, 'message': 'success'}
request_func(request_args).AndReturn(response1)
request_func(request_args).AndReturn(response2)
self.m.ReplayAll()
self.assertEqual(self.driver._send_cmd
(request_func, success_msg, request_args),
response2)
self.m.VerifyAll()
def testSendCmd_ResponseHasFatalError(self):
'''The callback response dict contains a fatal error code.'''
request_func = self.m.CreateMockAnything()
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response = {'code': 14000, 'message': 'try again later.'}
request_func(request_args).AndReturn(response)
self.m.ReplayAll()
self.assertRaises(v6000_common.ViolinBackendErr,
self.driver._send_cmd,
request_func, success_msg, request_args)
self.m.VerifyAll()
def testGetIgroup(self):
volume = self.volume1
connector = {'host': 'h1',
'wwpns': [u'50014380186b3f65', u'50014380186b3f67']}
bn = '/vshare/config/igroup/%s' % connector['host']
resp = {bn: connector['host']}
self.m_conn.basic.get_node_values(bn).AndReturn(resp)
self.m.ReplayAll()
self.assertEqual(self.driver._get_igroup(volume, connector),
connector['host'])
self.m.VerifyAll()
def testGetIgroup_WithNewName(self):
volume = self.volume1
connector = {'host': 'h1',
'wwpns': [u'50014380186b3f65', u'50014380186b3f67']}
bn = '/vshare/config/igroup/%s' % connector['host']
resp = {}
self.m_conn.basic.get_node_values(bn).AndReturn(resp)
self.m_conn.igroup.create_igroup(connector['host'])
self.m.ReplayAll()
self.assertEqual(self.driver._get_igroup(volume, connector),
connector['host'])
self.m.VerifyAll()
def testGetVolumeTypeExtraSpec(self):
volume = {'volume_type_id': 1}
volume_type = {'extra_specs': {'override:test_key': 'test_value'}}
self.m.StubOutWithMock(context, 'get_admin_context')
self.m.StubOutWithMock(volume_types, 'get_volume_type')
context.get_admin_context().AndReturn(None)
volume_types.get_volume_type(None, 1).AndReturn(volume_type)
self.m.ReplayAll()
result = self.driver._get_volume_type_extra_spec(volume, 'test_key')
self.assertEqual(result, 'test_value')
self.m.VerifyAll()
def testGetVolumeTypeExtraSpec_NoVolumeType(self):
volume = {'volume_type_id': None}
self.m.StubOutWithMock(context, 'get_admin_context')
context.get_admin_context().AndReturn(None)
self.m.ReplayAll()
result = self.driver._get_volume_type_extra_spec(volume, 'test_key')
self.assertEqual(result, None)
self.m.VerifyAll()
def testGetVolumeTypeExtraSpec_NoExtraSpecs(self):
volume = {'volume_type_id': 1}
volume_type = {'extra_specs': {}}
self.m.StubOutWithMock(context, 'get_admin_context')
self.m.StubOutWithMock(volume_types, 'get_volume_type')
context.get_admin_context().AndReturn(None)
volume_types.get_volume_type(None, 1).AndReturn(volume_type)
self.m.ReplayAll()
result = self.driver._get_volume_type_extra_spec(volume, 'test_key')
self.assertEqual(result, None)
self.m.VerifyAll()
def testGetVolumeTypeExtraSpec_NoOverridePrefixInExtraSpecKey(self):
volume = {'volume_type_id': 1}
volume_type = {'extra_specs': {'test_key': 'test_value'}}
self.m.StubOutWithMock(context, 'get_admin_context')
self.m.StubOutWithMock(volume_types, 'get_volume_type')
context.get_admin_context().AndReturn(None)
volume_types.get_volume_type(None, 1).AndReturn(volume_type)
self.m.ReplayAll()
result = self.driver._get_volume_type_extra_spec(volume, 'test_key')
self.assertEqual(result, 'test_value')
self.m.VerifyAll()
def testWaitForExportState(self):
bn = '/vshare/config/export/container/myContainer/lun/vol-01'
resp = {'/vshare/config/export/container/myContainer/lun/vol-01':
'vol-01'}
self.m_conn.basic.get_node_values(bn).AndReturn(resp)
self.m_conn.basic.get_node_values(bn).AndReturn(resp)
self.m.ReplayAll()
self.assertTrue(self.driver._wait_for_exportstate('vol-01', True))
self.m.VerifyAll()
def testWaitForExportState_NoState(self):
self.m_conn.basic.get_node_values(mox.IsA(str)).AndReturn({})
self.m_conn.basic.get_node_values(mox.IsA(str)).AndReturn({})
self.m.ReplayAll()
self.assertTrue(self.driver._wait_for_exportstate("vol-01", False))
self.m.VerifyAll()
def test_is_supported_vmos_version(self):
version = 'V6.3.1'
self.assertTrue(self.driver._is_supported_vmos_version(version))
def testIsSupportedVMOSVersion_SupportedFutureVersion(self):
version = 'V6.3.7'
self.assertTrue(self.driver._is_supported_vmos_version(version))
def testIsSupportedVmosVersion_UnsupportedPastVMOSVersion(self):
version = 'G5.5.2'
self.assertFalse(self.driver._is_supported_vmos_version(version))
def testIsSupportedVMOSVersion_UnsupportedFutureVersion(self):
version = 'V7.0.0'
self.assertFalse(self.driver._is_supported_vmos_version(version))
def testFatalErrorCode(self):
# NYI
#
pass
|
|
from typing import Any, Dict, List, Mapping, Set
import orjson
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.user_status import get_user_info_dict, update_user_status
from zerver.models import UserProfile, UserStatus, get_client
def get_away_user_ids(realm_id: int) -> Set[int]:
user_dict = get_user_info_dict(realm_id)
return {int(user_id) for user_id in user_dict if user_dict[user_id].get("away")}
def user_info(user: UserProfile) -> Dict[str, Any]:
user_dict = get_user_info_dict(user.realm_id)
return user_dict.get(str(user.id), {})
class UserStatusTest(ZulipTestCase):
def test_basics(self) -> None:
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
king_lear = self.lear_user("king")
realm_id = hamlet.realm_id
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, set())
client1 = get_client("web")
client2 = get_client("ZT")
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.AWAY,
status_text=None,
client_id=client1.id,
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {hamlet.id})
# Test that second client just updates
# the record. We only store one record
# per user. The user's status transcends
# clients; we only store the client for
# reference and to maybe reconcile timeout
# situations.
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.AWAY,
status_text="out to lunch",
client_id=client2.id,
)
self.assertEqual(
user_info(hamlet),
dict(away=True, status_text="out to lunch"),
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {hamlet.id})
rec_count = UserStatus.objects.filter(user_profile_id=hamlet.id).count()
self.assertEqual(rec_count, 1)
# Setting status_text to None causes it be ignored.
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.NORMAL,
status_text=None,
client_id=client2.id,
)
self.assertEqual(
user_info(hamlet),
dict(status_text="out to lunch"),
)
# Clear the status_text now.
update_user_status(
user_profile_id=hamlet.id,
status=None,
status_text="",
client_id=client2.id,
)
self.assertEqual(
user_info(hamlet),
{},
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, set())
# Now set away status for three different users across
# two realms.
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.AWAY,
status_text=None,
client_id=client1.id,
)
update_user_status(
user_profile_id=cordelia.id,
status=UserStatus.AWAY,
status_text=None,
client_id=client2.id,
)
update_user_status(
user_profile_id=king_lear.id,
status=UserStatus.AWAY,
status_text=None,
client_id=client2.id,
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {cordelia.id, hamlet.id})
away_user_ids = get_away_user_ids(realm_id=king_lear.realm.id)
self.assertEqual(away_user_ids, {king_lear.id})
# Set Hamlet to NORMAL but in a meeting.
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.NORMAL,
status_text="in a meeting",
client_id=client2.id,
)
self.assertEqual(
user_info(hamlet),
dict(status_text="in a meeting"),
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {cordelia.id})
def update_status_and_assert_event(
self, payload: Dict[str, Any], expected_event: Dict[str, Any]
) -> None:
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=1):
result = self.client_post("/json/users/me/status", payload)
self.assert_json_success(result)
self.assertEqual(events[0]["event"], expected_event)
def test_endpoints(self) -> None:
hamlet = self.example_user("hamlet")
realm_id = hamlet.realm_id
self.login_user(hamlet)
# Try to omit parameter--this should be an error.
payload: Dict[str, Any] = {}
result = self.client_post("/json/users/me/status", payload)
self.assert_json_error(result, "Client did not pass any new values.")
# Try a long message.
long_text = "x" * 61
payload = dict(status_text=long_text)
result = self.client_post("/json/users/me/status", payload)
self.assert_json_error(result, "status_text is too long (limit: 60 characters)")
self.update_status_and_assert_event(
payload=dict(
away=orjson.dumps(True).decode(),
status_text="on vacation",
),
expected_event=dict(
type="user_status", user_id=hamlet.id, away=True, status_text="on vacation"
),
)
self.assertEqual(
user_info(hamlet),
dict(away=True, status_text="on vacation"),
)
# Now revoke "away" status.
self.update_status_and_assert_event(
payload=dict(away=orjson.dumps(False).decode()),
expected_event=dict(type="user_status", user_id=hamlet.id, away=False),
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, set())
# And now just update your info.
# The server will trim the whitespace here.
self.update_status_and_assert_event(
payload=dict(status_text=" in office "),
expected_event=dict(type="user_status", user_id=hamlet.id, status_text="in office"),
)
self.assertEqual(
user_info(hamlet),
dict(status_text="in office"),
)
# And finally clear your info.
self.update_status_and_assert_event(
payload=dict(status_text=""),
expected_event=dict(type="user_status", user_id=hamlet.id, status_text=""),
)
self.assertEqual(
get_user_info_dict(realm_id=realm_id),
{},
)
# Turn on "away" status again.
self.update_status_and_assert_event(
payload=dict(away=orjson.dumps(True).decode()),
expected_event=dict(type="user_status", user_id=hamlet.id, away=True),
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {hamlet.id})
# And set status text while away.
self.update_status_and_assert_event(
payload=dict(status_text=" at the beach "),
expected_event=dict(type="user_status", user_id=hamlet.id, status_text="at the beach"),
)
self.assertEqual(
user_info(hamlet),
dict(status_text="at the beach", away=True),
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {hamlet.id})
|
|
import os
import re
from peewee import *
from peewee import create_model_tables
from peewee import drop_model_tables
from peewee import mysql
from peewee import print_
from playhouse.reflection import *
from playhouse.tests.base import database_initializer
from playhouse.tests.base import PeeweeTestCase
sqlite_db = database_initializer.get_database('sqlite')
DATABASES = [sqlite_db]
if mysql:
DATABASES.append(database_initializer.get_database('mysql'))
try:
import psycopg2
DATABASES.append(database_initializer.get_database('postgres'))
except ImportError:
pass
class BaseModel(Model):
class Meta:
database = sqlite_db
class ColTypes(BaseModel):
f1 = BigIntegerField(index=True)
f2 = BlobField()
f3 = BooleanField()
f4 = CharField(max_length=50)
f5 = DateField()
f6 = DateTimeField()
f7 = DecimalField()
f8 = DoubleField()
f9 = FloatField()
f10 = IntegerField(unique=True)
f11 = PrimaryKeyField()
f12 = TextField()
f13 = TimeField()
class Meta:
indexes = (
(('f10', 'f11'), True),
(('f11', 'f12', 'f13'), False),
)
class Nullable(BaseModel):
nullable_cf = CharField(null=True)
nullable_if = IntegerField(null=True)
class RelModel(BaseModel):
col_types = ForeignKeyField(ColTypes, related_name='foo')
col_types_nullable = ForeignKeyField(ColTypes, null=True)
class FKPK(BaseModel):
col_types = ForeignKeyField(ColTypes, primary_key=True)
class Underscores(BaseModel):
_id = PrimaryKeyField()
_name = CharField()
class Category(BaseModel):
name = CharField(max_length=10)
parent = ForeignKeyField('self', null=True)
class Nugget(BaseModel):
category_id = ForeignKeyField(Category, db_column='category_id')
category = CharField()
MODELS = (
ColTypes,
Nullable,
RelModel,
FKPK,
Underscores,
Category,
Nugget)
class TestReflection(PeeweeTestCase):
def setUp(self):
super(TestReflection, self).setUp()
if os.path.exists(sqlite_db.database):
os.unlink(sqlite_db.database)
sqlite_db.connect()
for model in MODELS:
model._meta.database = sqlite_db
def tearDown(self):
sqlite_db.close()
def test_generate_models(self):
introspector = self.get_introspector()
self.assertEqual(introspector.generate_models(), {})
for model in MODELS:
model.create_table()
models = introspector.generate_models()
self.assertEqual(sorted(models.keys()), [
'category',
'coltypes',
'fkpk',
'nugget',
'nullable',
'relmodel',
'underscores'])
def assertIsInstance(obj, klass):
self.assertTrue(isinstance(obj, klass))
category = models['category']
self.assertEqual(
sorted(category._meta.fields),
['id', 'name', 'parent'])
assertIsInstance(category.id, PrimaryKeyField)
assertIsInstance(category.name, CharField)
assertIsInstance(category.parent, ForeignKeyField)
self.assertEqual(category.parent.rel_model, category)
fkpk = models['fkpk']
self.assertEqual(sorted(fkpk._meta.fields), ['col_types'])
assertIsInstance(fkpk.col_types, ForeignKeyField)
self.assertEqual(fkpk.col_types.rel_model, models['coltypes'])
self.assertTrue(fkpk.col_types.primary_key)
relmodel = models['relmodel']
self.assertEqual(
sorted(relmodel._meta.fields),
['col_types', 'col_types_nullable', 'id'])
assertIsInstance(relmodel.col_types, ForeignKeyField)
assertIsInstance(relmodel.col_types_nullable, ForeignKeyField)
self.assertFalse(relmodel.col_types.null)
self.assertTrue(relmodel.col_types_nullable.null)
self.assertEqual(relmodel.col_types.rel_model,
models['coltypes'])
self.assertEqual(relmodel.col_types_nullable.rel_model,
models['coltypes'])
def test_generate_models_indexes(self):
introspector = self.get_introspector()
self.assertEqual(introspector.generate_models(), {})
for model in MODELS:
model.create_table()
models = introspector.generate_models()
self.assertEqual(models['fkpk']._meta.indexes, [])
self.assertEqual(models['relmodel']._meta.indexes, [])
self.assertEqual(models['category']._meta.indexes, [])
col_types = models['coltypes']
indexed = set(['f1'])
unique = set(['f10'])
for field in col_types._meta.sorted_fields:
self.assertEqual(field.index, field.name in indexed)
self.assertEqual(field.unique, field.name in unique)
indexes = col_types._meta.indexes
self.assertEqual(sorted(indexes), [
(['f10', 'f11'], True),
(['f11', 'f12', 'f13'], False),
])
def test_table_subset(self):
for model in MODELS:
model.create_table()
introspector = self.get_introspector()
models = introspector.generate_models(table_names=[
'category',
'coltypes',
'foobarbaz'])
self.assertEqual(sorted(models.keys()), ['category', 'coltypes'])
def test_sqlite_fk_re(self):
user_id_tests = [
'FOREIGN KEY("user_id") REFERENCES "users"("id")',
'FOREIGN KEY(user_id) REFERENCES users(id)',
'FOREIGN KEY ([user_id]) REFERENCES [users] ([id])',
'"user_id" NOT NULL REFERENCES "users" ("id")',
'user_id not null references users (id)',
]
fk_pk_tests = [
('"col_types_id" INTEGER NOT NULL PRIMARY KEY REFERENCES '
'"coltypes" ("f11")'),
'FOREIGN KEY ("col_types_id") REFERENCES "coltypes" ("f11")',
]
regex = SqliteMetadata.re_foreign_key
for test in user_id_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'user_id', 'users', 'id',
))
for test in fk_pk_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'col_types_id', 'coltypes', 'f11',
))
def get_introspector(self):
return Introspector.from_database(sqlite_db)
def test_make_column_name(self):
introspector = self.get_introspector()
tests = (
('Column', 'column'),
('Foo_iD', 'foo'),
('foo_id', 'foo'),
('foo_id_id', 'foo_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_'),
)
for col_name, expected in tests:
self.assertEqual(
introspector.make_column_name(col_name), expected)
def test_make_model_name(self):
introspector = self.get_introspector()
tests = (
('Table', 'Table'),
('table', 'Table'),
('table_baz', 'TableBaz'),
('foo__bar__baz2', 'FooBarBaz2'),
('foo12_3', 'Foo123'),
)
for table_name, expected in tests:
self.assertEqual(
introspector.make_model_name(table_name), expected)
def create_tables(self, db):
for model in MODELS:
model._meta.database = db
drop_model_tables(MODELS, fail_silently=True)
create_model_tables(MODELS)
def generative_test(fn):
def inner(self):
for database in DATABASES:
try:
introspector = Introspector.from_database(database)
self.create_tables(database)
fn(self, introspector)
finally:
drop_model_tables(MODELS)
return inner
@generative_test
def test_col_types(self, introspector):
columns, primary_keys, foreign_keys, model_names, indexes =\
introspector.introspect()
expected = (
('coltypes', (
('f1', BigIntegerField, False),
('f2', (BlobField, TextField), False),
('f3', (BooleanField, IntegerField), False),
('f4', CharField, False),
('f5', DateField, False),
('f6', DateTimeField, False),
('f7', DecimalField, False),
('f8', (DoubleField, FloatField), False),
('f9', FloatField, False),
('f10', IntegerField, False),
('f11', PrimaryKeyField, False),
('f12', TextField, False),
('f13', TimeField, False))),
('relmodel', (
('col_types_id', ForeignKeyField, False),
('col_types_nullable_id', ForeignKeyField, True))),
('nugget', (
('category_id', ForeignKeyField, False),
('category', CharField, False))),
('nullable', (
('nullable_cf', CharField, True),
('nullable_if', IntegerField, True))),
('fkpk', (
('col_types_id', ForeignKeyField, False),)),
('underscores', (
('_id', PrimaryKeyField, False),
('_name', CharField, False))),
('category', (
('name', CharField, False),
('parent_id', ForeignKeyField, True))),
)
for table_name, expected_columns in expected:
introspected_columns = columns[table_name]
for field_name, field_class, is_null in expected_columns:
if not isinstance(field_class, (list, tuple)):
field_class = (field_class,)
column = introspected_columns[field_name]
self.assertTrue(column.field_class in field_class)
self.assertEqual(column.nullable, is_null)
@generative_test
def test_foreign_keys(self, introspector):
columns, primary_keys, foreign_keys, model_names, indexes =\
introspector.introspect()
self.assertEqual(foreign_keys['coltypes'], [])
rel_model = foreign_keys['relmodel']
self.assertEqual(len(rel_model), 2)
fkpk = foreign_keys['fkpk']
self.assertEqual(len(fkpk), 1)
fkpk_fk = fkpk[0]
self.assertEqual(fkpk_fk.table, 'fkpk')
self.assertEqual(fkpk_fk.column, 'col_types_id')
self.assertEqual(fkpk_fk.dest_table, 'coltypes')
self.assertEqual(fkpk_fk.dest_column, 'f11')
category = foreign_keys['category']
self.assertEqual(len(category), 1)
category_fk = category[0]
self.assertEqual(category_fk.table, 'category')
self.assertEqual(category_fk.column, 'parent_id')
self.assertEqual(category_fk.dest_table, 'category')
self.assertEqual(category_fk.dest_column, 'id')
@generative_test
def test_table_names(self, introspector):
columns, primary_keys, foreign_keys, model_names, indexes =\
introspector.introspect()
names = (
('coltypes', 'Coltypes'),
('nullable', 'Nullable'),
('relmodel', 'Relmodel'),
('fkpk', 'Fkpk'))
for k, v in names:
self.assertEqual(model_names[k], v)
@generative_test
def test_column_meta(self, introspector):
columns, primary_keys, foreign_keys, model_names, indexes =\
introspector.introspect()
rel_model = columns['relmodel']
col_types_id = rel_model['col_types_id']
self.assertEqual(col_types_id.get_field_parameters(), {
'db_column': "'col_types_id'",
'rel_model': 'Coltypes',
'to_field': "'f11'",
})
col_types_nullable_id = rel_model['col_types_nullable_id']
self.assertEqual(col_types_nullable_id.get_field_parameters(), {
'db_column': "'col_types_nullable_id'",
'null': True,
'related_name': "'coltypes_col_types_nullable_set'",
'rel_model': 'Coltypes',
'to_field': "'f11'",
})
fkpk = columns['fkpk']
self.assertEqual(fkpk['col_types_id'].get_field_parameters(), {
'db_column': "'col_types_id'",
'rel_model': 'Coltypes',
'primary_key': True,
'to_field': "'f11'"})
category = columns['category']
parent_id = category['parent_id']
self.assertEqual(parent_id.get_field_parameters(), {
'db_column': "'parent_id'",
'null': True,
'rel_model': "'self'",
'to_field': "'id'",
})
nugget = columns['nugget']
category_fk = nugget['category_id']
self.assertEqual(category_fk.name, 'category_id')
self.assertEqual(category_fk.get_field_parameters(), {
'to_field': "'id'",
'rel_model': 'Category',
'db_column': "'category_id'",
})
category = nugget['category']
self.assertEqual(category.name, 'category')
@generative_test
def test_get_field(self, introspector):
columns, primary_keys, foreign_keys, model_names, indexes =\
introspector.introspect()
expected = (
('coltypes', (
('f1', 'f1 = BigIntegerField(index=True)'),
#('f2', 'f2 = BlobField()'),
('f4', 'f4 = CharField()'),
('f5', 'f5 = DateField()'),
('f6', 'f6 = DateTimeField()'),
('f7', 'f7 = DecimalField()'),
('f10', 'f10 = IntegerField(unique=True)'),
('f11', 'f11 = PrimaryKeyField()'),
('f12', 'f12 = TextField()'),
('f13', 'f13 = TimeField()'),
)),
('nullable', (
('nullable_cf', 'nullable_cf = '
'CharField(null=True)'),
('nullable_if', 'nullable_if = IntegerField(null=True)'),
)),
('fkpk', (
('col_types_id', 'col_types = ForeignKeyField('
'db_column=\'col_types_id\', primary_key=True, '
'rel_model=Coltypes, to_field=\'f11\')'),
)),
('nugget', (
('category_id', 'category_id = ForeignKeyField('
'db_column=\'category_id\', rel_model=Category, '
'to_field=\'id\')'),
('category', 'category = CharField()'),
)),
('relmodel', (
('col_types_id', 'col_types = ForeignKeyField('
'db_column=\'col_types_id\', rel_model=Coltypes, '
'to_field=\'f11\')'),
('col_types_nullable_id', 'col_types_nullable = '
'ForeignKeyField(db_column=\'col_types_nullable_id\', '
'null=True, rel_model=Coltypes, '
'related_name=\'coltypes_col_types_nullable_set\', '
'to_field=\'f11\')'),
)),
('underscores', (
('_id', '_id = PrimaryKeyField()'),
('_name', '_name = CharField()'),
)),
('category', (
('name', 'name = CharField()'),
('parent_id', 'parent = ForeignKeyField('
'db_column=\'parent_id\', null=True, rel_model=\'self\', '
'to_field=\'id\')'),
)),
)
for table, field_data in expected:
for field_name, field_str in field_data:
self.assertEqual(
columns[table][field_name].get_field(),
field_str)
|
|
import logging
from screen_objects import ScreenObjectsManager, ScrollBar, \
TouchAndTextItem
from ..input import InputManager
logger = logging.getLogger(__name__)
class ListView():
def __init__(self, pos, size, base_size, font):
self.size = size
self.pos = pos
self.base_size = base_size
self.screen_objects = ScreenObjectsManager()
self.max_rows = self.size[1] / font.size("TEXT SIZE")[1]
self.current_item = 0
self.font = font
self.list_size = 0
self.list = []
self.scrollbar = False
self.selected = None
self.active = []
self.set_list([])
self.update_keys = []
# Sets the list for the lisview.
# It should be an iterable of strings
def set_list(self, item_list):
self.screen_objects.clear()
self.list = item_list
self.list_size = len(item_list)
if self.max_rows < self.list_size:
self.scrollbar = True
scroll_bar = ScrollBar(
(self.pos[0] + self.size[0] - self.base_size,
self.pos[1]),
(self.base_size, self.size[1]), self.list_size,
self.max_rows)
self.screen_objects.set_touch_object("scrollbar",
scroll_bar)
else:
self.scrollbar = False
if self.list_size > 0:
self.selected = 0
else:
self.selected = None
self.load_new_item_position(0)
# Will load items currently displaying in item_pos
def load_new_item_position(self, item_pos):
self.update_keys = []
self.current_item = item_pos
if self.scrollbar:
self.screen_objects.clear_touch(["scrollbar"])
else:
self.screen_objects.clear_touch(None)
i = self.current_item
z = 0
if self.scrollbar:
width = self.size[0] - self.base_size
else:
width = self.size[0]
self.should_update_always = False
current_y = self.pos[1]
while i < self.list_size and current_y <= self.pos[1] + self.size[1]:
item = TouchAndTextItem(self.font, self.list[i], (
self.pos[0],
current_y), (width, -1))
current_y += item.size[1]
if not item.fit_horizontal:
self.update_keys.append(str(i))
self.screen_objects.set_touch_object(str(i), item)
i += 1
z += 1
self.reload_selected()
def should_update(self):
if len(self.update_keys) > 0:
return True
else:
return False
def find_update_rects(self, rects):
for key in self.update_keys:
object = self.screen_objects.get_touch_object(key)
rects.append(object.rect_in_pos)
def render(self, surface, update_all, rects):
if update_all:
self.screen_objects.render(surface)
else:
for key in self.update_keys:
object = self.screen_objects.get_touch_object(key)
object.update()
object.render(surface)
def touch_event(self, touch_event):
self.must_update = True
if touch_event.type == InputManager.click \
or touch_event.type == InputManager.long_click:
objects = self.screen_objects.get_touch_objects_in_pos(
touch_event.current_pos)
if objects is not None:
for key in objects:
if key == "scrollbar":
direction = \
self.screen_objects.get_touch_object(
key).touch(touch_event.current_pos)
if direction != 0:
self.move_to(direction)
else:
return int(key)
elif (touch_event.type == InputManager.key and
self.selected is not None):
if touch_event.direction == InputManager.enter:
if self.selected is not None:
return self.selected
elif touch_event.direction == InputManager.up:
self.set_selected(self.selected-1)
elif touch_event.direction == InputManager.down:
self.set_selected(self.selected+1)
elif touch_event.type == InputManager.swipe:
if touch_event.direction == InputManager.up:
self.move_to(-1)
elif touch_event.direction == InputManager.down:
self.move_to(1)
# Scroll to direction
# direction == 1 will scroll down
# direction == -1 will scroll up
def move_to(self, direction):
if self.scrollbar:
if direction == 1:
self.current_item += self.max_rows
if self.current_item + self.max_rows > self.list_size:
self.current_item = self.list_size - self.max_rows
self.load_new_item_position(self.current_item)
self.screen_objects.get_touch_object(
"scrollbar").set_item(
self.current_item)
elif direction == -1:
self.current_item -= self.max_rows
if self.current_item < 0:
self.current_item = 0
self.load_new_item_position(self.current_item)
self.screen_objects.get_touch_object(
"scrollbar").set_item(
self.current_item)
self.set_active(self.active)
# Set active items
def set_active(self, active):
self.must_update = True
for number in self.active:
try:
self.screen_objects.get_touch_object(
str(number)).set_active(
False)
except KeyError:
pass
for number in active:
try:
self.screen_objects.get_touch_object(
str(number)).set_active(
True)
except KeyError:
pass
self.active = active
def set_selected(self, selected):
self.must_update = True
if selected > -1 and selected < len(self.list):
if self.selected is not None:
try:
self.screen_objects.get_touch_object(
str(self.selected)).set_selected(
False)
except KeyError:
pass
if selected is not None:
try:
self.screen_objects.get_touch_object(
str(selected)).set_selected(
True)
except KeyError:
pass
self.selected = selected
self.set_selected_on_screen()
def set_selected_on_screen(self):
self.must_update = True
if self.current_item + self.max_rows <= self.selected:
self.move_to(1)
self.set_selected_on_screen()
elif self.current_item > self.selected:
self.move_to(-1)
self.set_selected_on_screen()
def reload_selected(self):
self.must_update = True
if self.selected is not None:
try:
self.screen_objects.get_touch_object(
str(self.selected)).set_selected(
True)
except KeyError:
pass
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class DatasetsOperations(object):
"""DatasetsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version. Constant value: "2017-09-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01-preview"
self.config = config
def list_by_factory(
self, resource_group_name, factory_name, custom_headers=None, raw=False, **operation_config):
"""Lists datasets.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DatasetResource
:rtype:
~azure.mgmt.datafactory.models.DatasetResourcePaged[~azure.mgmt.datafactory.models.DatasetResource]
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_factory.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.DatasetResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DatasetResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_factory.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets'}
def create_or_update(
self, resource_group_name, factory_name, dataset_name, properties, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a dataset.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param dataset_name: The dataset name.
:type dataset_name: str
:param properties: Dataset properties.
:type properties: ~azure.mgmt.datafactory.models.Dataset
:param if_match: ETag of the dataset entity. Should only be specified
for update, for which it should match existing entity or can be * for
unconditional update.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatasetResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datafactory.models.DatasetResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
dataset = models.DatasetResource(properties=properties)
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'datasetName': self._serialize.url("dataset_name", dataset_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(dataset, 'DatasetResource')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatasetResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets/{datasetName}'}
def get(
self, resource_group_name, factory_name, dataset_name, custom_headers=None, raw=False, **operation_config):
"""Gets a dataset.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param dataset_name: The dataset name.
:type dataset_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatasetResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.datafactory.models.DatasetResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'datasetName': self._serialize.url("dataset_name", dataset_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatasetResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets/{datasetName}'}
def delete(
self, resource_group_name, factory_name, dataset_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a dataset.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param dataset_name: The dataset name.
:type dataset_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.datafactory.models.ErrorResponseException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'),
'datasetName': self._serialize.url("dataset_name", dataset_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/datasets/{datasetName}'}
|
|
# Generated from Lustre.g4 by ANTLR 4.7.2
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2")
buf.write(u"A\u01be\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4")
buf.write(u"\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r")
buf.write(u"\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22")
buf.write(u"\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4")
buf.write(u"\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35")
buf.write(u"\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4")
buf.write(u"$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t")
buf.write(u",\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63")
buf.write(u"\t\63\4\64\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\4")
buf.write(u"9\t9\4:\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\3\2\3")
buf.write(u"\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3")
buf.write(u"\5\3\5\3\5\3\5\3\5\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\b\3")
buf.write(u"\b\3\t\3\t\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3\13\3\13")
buf.write(u"\3\13\3\13\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\16\3\16")
buf.write(u"\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\21\3")
buf.write(u"\21\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\24")
buf.write(u"\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3")
buf.write(u"\26\3\26\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\31")
buf.write(u"\3\31\3\31\3\31\3\31\3\32\3\32\3\33\3\33\3\33\3\33\3")
buf.write(u"\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34")
buf.write(u"\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3")
buf.write(u"\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36")
buf.write(u"\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3")
buf.write(u"\37\3\37\3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3")
buf.write(u"\"\3\"\3#\3#\3#\3$\3$\3$\3$\3%\3%\3%\3%\3&\3&\3\'\3\'")
buf.write(u"\3(\3(\3(\3(\3)\3)\3)\3)\3*\3*\3+\3+\3,\3,\3,\3-\3-\3")
buf.write(u".\3.\3.\3/\3/\3/\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3")
buf.write(u"\62\3\62\3\62\3\62\3\63\3\63\3\63\3\64\3\64\3\64\3\65")
buf.write(u"\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\67\3\67\3\67\3")
buf.write(u"\67\3\67\38\38\38\38\39\39\39\39\39\39\39\39\39\59\u0177")
buf.write(u"\n9\3:\6:\u017a\n:\r:\16:\u017b\3;\3;\7;\u0180\n;\f;")
buf.write(u"\16;\u0183\13;\3<\6<\u0186\n<\r<\16<\u0187\3<\3<\3=\3")
buf.write(u"=\3=\3=\3=\7=\u0191\n=\f=\16=\u0194\13=\3=\5=\u0197\n")
buf.write(u"=\3=\5=\u019a\n=\3=\5=\u019d\n=\3=\3=\3>\3>\3>\3>\7>")
buf.write(u"\u01a5\n>\f>\16>\u01a8\13>\3>\3>\3>\3>\3>\3?\3?\3?\3")
buf.write(u"?\7?\u01b3\n?\f?\16?\u01b6\13?\3?\3?\3?\3?\3?\3@\3@\4")
buf.write(u"\u01a6\u01b4\2A\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23")
buf.write(u"\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25")
buf.write(u")\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!")
buf.write(u"A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65")
buf.write(u"i\66k\67m8o9q:s;u<w=y>{?}@\177A\3\2\b\3\2\62;\6\2C\\")
buf.write(u"aac|\u0080\u0080\7\2\62;C\\aac|\u0080\u0080\5\2\13\f")
buf.write(u"\16\17\"\"\5\2\f\f\17\17\'\'\4\2\f\f\17\17\2\u01c7\2")
buf.write(u"\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3")
buf.write(u"\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2")
buf.write(u"\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2")
buf.write(u"\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2")
buf.write(u"\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2")
buf.write(u"\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2")
buf.write(u"\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3")
buf.write(u"\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2")
buf.write(u"I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2")
buf.write(u"\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2")
buf.write(u"\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2")
buf.write(u"\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3")
buf.write(u"\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2")
buf.write(u"y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\3\u0081")
buf.write(u"\3\2\2\2\5\u008a\3\2\2\2\7\u008c\3\2\2\2\t\u008e\3\2")
buf.write(u"\2\2\13\u0094\3\2\2\2\r\u0096\3\2\2\2\17\u009b\3\2\2")
buf.write(u"\2\21\u009d\3\2\2\2\23\u009f\3\2\2\2\25\u00a7\3\2\2\2")
buf.write(u"\27\u00ab\3\2\2\2\31\u00af\3\2\2\2\33\u00b3\3\2\2\2\35")
buf.write(u"\u00b5\3\2\2\2\37\u00bc\3\2\2\2!\u00be\3\2\2\2#\u00c0")
buf.write(u"\3\2\2\2%\u00c5\3\2\2\2\'\u00c9\3\2\2\2)\u00d2\3\2\2")
buf.write(u"\2+\u00d4\3\2\2\2-\u00d6\3\2\2\2/\u00d9\3\2\2\2\61\u00de")
buf.write(u"\3\2\2\2\63\u00e3\3\2\2\2\65\u00e5\3\2\2\2\67\u00f1\3")
buf.write(u"\2\2\29\u00ff\3\2\2\2;\u0106\3\2\2\2=\u010e\3\2\2\2?")
buf.write(u"\u0115\3\2\2\2A\u011b\3\2\2\2C\u0123\3\2\2\2E\u0125\3")
buf.write(u"\2\2\2G\u0128\3\2\2\2I\u012c\3\2\2\2K\u0130\3\2\2\2M")
buf.write(u"\u0132\3\2\2\2O\u0134\3\2\2\2Q\u0138\3\2\2\2S\u013c\3")
buf.write(u"\2\2\2U\u013e\3\2\2\2W\u0140\3\2\2\2Y\u0143\3\2\2\2[")
buf.write(u"\u0145\3\2\2\2]\u0148\3\2\2\2_\u014b\3\2\2\2a\u014f\3")
buf.write(u"\2\2\2c\u0152\3\2\2\2e\u0156\3\2\2\2g\u0159\3\2\2\2i")
buf.write(u"\u015c\3\2\2\2k\u015f\3\2\2\2m\u0164\3\2\2\2o\u0169\3")
buf.write(u"\2\2\2q\u0176\3\2\2\2s\u0179\3\2\2\2u\u017d\3\2\2\2w")
buf.write(u"\u0185\3\2\2\2y\u018b\3\2\2\2{\u01a0\3\2\2\2}\u01ae\3")
buf.write(u"\2\2\2\177\u01bc\3\2\2\2\u0081\u0082\7f\2\2\u0082\u0083")
buf.write(u"\7c\2\2\u0083\u0084\7v\2\2\u0084\u0085\7c\2\2\u0085\u0086")
buf.write(u"\7v\2\2\u0086\u0087\7{\2\2\u0087\u0088\7r\2\2\u0088\u0089")
buf.write(u"\7g\2\2\u0089\4\3\2\2\2\u008a\u008b\7?\2\2\u008b\6\3")
buf.write(u"\2\2\2\u008c\u008d\7=\2\2\u008d\b\3\2\2\2\u008e\u008f")
buf.write(u"\7e\2\2\u008f\u0090\7q\2\2\u0090\u0091\7p\2\2\u0091\u0092")
buf.write(u"\7u\2\2\u0092\u0093\7v\2\2\u0093\n\3\2\2\2\u0094\u0095")
buf.write(u"\7<\2\2\u0095\f\3\2\2\2\u0096\u0097\7p\2\2\u0097\u0098")
buf.write(u"\7q\2\2\u0098\u0099\7f\2\2\u0099\u009a\7g\2\2\u009a\16")
buf.write(u"\3\2\2\2\u009b\u009c\7*\2\2\u009c\20\3\2\2\2\u009d\u009e")
buf.write(u"\7+\2\2\u009e\22\3\2\2\2\u009f\u00a0\7t\2\2\u00a0\u00a1")
buf.write(u"\7g\2\2\u00a1\u00a2\7v\2\2\u00a2\u00a3\7w\2\2\u00a3\u00a4")
buf.write(u"\7t\2\2\u00a4\u00a5\7p\2\2\u00a5\u00a6\7u\2\2\u00a6\24")
buf.write(u"\3\2\2\2\u00a7\u00a8\7x\2\2\u00a8\u00a9\7c\2\2\u00a9")
buf.write(u"\u00aa\7t\2\2\u00aa\26\3\2\2\2\u00ab\u00ac\7n\2\2\u00ac")
buf.write(u"\u00ad\7g\2\2\u00ad\u00ae\7v\2\2\u00ae\30\3\2\2\2\u00af")
buf.write(u"\u00b0\7v\2\2\u00b0\u00b1\7g\2\2\u00b1\u00b2\7n\2\2\u00b2")
buf.write(u"\32\3\2\2\2\u00b3\u00b4\7.\2\2\u00b4\34\3\2\2\2\u00b5")
buf.write(u"\u00b6\7u\2\2\u00b6\u00b7\7v\2\2\u00b7\u00b8\7t\2\2\u00b8")
buf.write(u"\u00b9\7w\2\2\u00b9\u00ba\7e\2\2\u00ba\u00bb\7v\2\2\u00bb")
buf.write(u"\36\3\2\2\2\u00bc\u00bd\7}\2\2\u00bd \3\2\2\2\u00be\u00bf")
buf.write(u"\7\177\2\2\u00bf\"\3\2\2\2\u00c0\u00c1\7g\2\2\u00c1\u00c2")
buf.write(u"\7p\2\2\u00c2\u00c3\7w\2\2\u00c3\u00c4\7o\2\2\u00c4$")
buf.write(u"\3\2\2\2\u00c5\u00c6\7k\2\2\u00c6\u00c7\7p\2\2\u00c7")
buf.write(u"\u00c8\7v\2\2\u00c8&\3\2\2\2\u00c9\u00ca\7u\2\2\u00ca")
buf.write(u"\u00cb\7w\2\2\u00cb\u00cc\7d\2\2\u00cc\u00cd\7t\2\2\u00cd")
buf.write(u"\u00ce\7c\2\2\u00ce\u00cf\7p\2\2\u00cf\u00d0\7i\2\2\u00d0")
buf.write(u"\u00d1\7g\2\2\u00d1(\3\2\2\2\u00d2\u00d3\7]\2\2\u00d3")
buf.write(u"*\3\2\2\2\u00d4\u00d5\7_\2\2\u00d5,\3\2\2\2\u00d6\u00d7")
buf.write(u"\7q\2\2\u00d7\u00d8\7h\2\2\u00d8.\3\2\2\2\u00d9\u00da")
buf.write(u"\7d\2\2\u00da\u00db\7q\2\2\u00db\u00dc\7q\2\2\u00dc\u00dd")
buf.write(u"\7n\2\2\u00dd\60\3\2\2\2\u00de\u00df\7t\2\2\u00df\u00e0")
buf.write(u"\7g\2\2\u00e0\u00e1\7c\2\2\u00e1\u00e2\7n\2\2\u00e2\62")
buf.write(u"\3\2\2\2\u00e3\u00e4\7/\2\2\u00e4\64\3\2\2\2\u00e5\u00e6")
buf.write(u"\7/\2\2\u00e6\u00e7\7/\2\2\u00e7\u00e8\7\'\2\2\u00e8")
buf.write(u"\u00e9\7R\2\2\u00e9\u00ea\7T\2\2\u00ea\u00eb\7Q\2\2\u00eb")
buf.write(u"\u00ec\7R\2\2\u00ec\u00ed\7G\2\2\u00ed\u00ee\7T\2\2\u00ee")
buf.write(u"\u00ef\7V\2\2\u00ef\u00f0\7[\2\2\u00f0\66\3\2\2\2\u00f1")
buf.write(u"\u00f2\7/\2\2\u00f2\u00f3\7/\2\2\u00f3\u00f4\7\'\2\2")
buf.write(u"\u00f4\u00f5\7T\2\2\u00f5\u00f6\7G\2\2\u00f6\u00f7\7")
buf.write(u"C\2\2\u00f7\u00f8\7N\2\2\u00f8\u00f9\7K\2\2\u00f9\u00fa")
buf.write(u"\7\\\2\2\u00fa\u00fb\7C\2\2\u00fb\u00fc\7D\2\2\u00fc")
buf.write(u"\u00fd\7N\2\2\u00fd\u00fe\7G\2\2\u00fe8\3\2\2\2\u00ff")
buf.write(u"\u0100\7/\2\2\u0100\u0101\7/\2\2\u0101\u0102\7\'\2\2")
buf.write(u"\u0102\u0103\7K\2\2\u0103\u0104\7X\2\2\u0104\u0105\7")
buf.write(u"E\2\2\u0105:\3\2\2\2\u0106\u0107\7/\2\2\u0107\u0108\7")
buf.write(u"/\2\2\u0108\u0109\7\'\2\2\u0109\u010a\7O\2\2\u010a\u010b")
buf.write(u"\7C\2\2\u010b\u010c\7K\2\2\u010c\u010d\7P\2\2\u010d<")
buf.write(u"\3\2\2\2\u010e\u010f\7c\2\2\u010f\u0110\7u\2\2\u0110")
buf.write(u"\u0111\7u\2\2\u0111\u0112\7g\2\2\u0112\u0113\7t\2\2\u0113")
buf.write(u"\u0114\7v\2\2\u0114>\3\2\2\2\u0115\u0116\7h\2\2\u0116")
buf.write(u"\u0117\7n\2\2\u0117\u0118\7q\2\2\u0118\u0119\7q\2\2\u0119")
buf.write(u"\u011a\7t\2\2\u011a@\3\2\2\2\u011b\u011c\7e\2\2\u011c")
buf.write(u"\u011d\7q\2\2\u011d\u011e\7p\2\2\u011e\u011f\7f\2\2\u011f")
buf.write(u"\u0120\7c\2\2\u0120\u0121\7e\2\2\u0121\u0122\7v\2\2\u0122")
buf.write(u"B\3\2\2\2\u0123\u0124\7\60\2\2\u0124D\3\2\2\2\u0125\u0126")
buf.write(u"\7<\2\2\u0126\u0127\7?\2\2\u0127F\3\2\2\2\u0128\u0129")
buf.write(u"\7r\2\2\u0129\u012a\7t\2\2\u012a\u012b\7g\2\2\u012bH")
buf.write(u"\3\2\2\2\u012c\u012d\7p\2\2\u012d\u012e\7q\2\2\u012e")
buf.write(u"\u012f\7v\2\2\u012fJ\3\2\2\2\u0130\u0131\7,\2\2\u0131")
buf.write(u"L\3\2\2\2\u0132\u0133\7\61\2\2\u0133N\3\2\2\2\u0134\u0135")
buf.write(u"\7f\2\2\u0135\u0136\7k\2\2\u0136\u0137\7x\2\2\u0137P")
buf.write(u"\3\2\2\2\u0138\u0139\7o\2\2\u0139\u013a\7q\2\2\u013a")
buf.write(u"\u013b\7f\2\2\u013bR\3\2\2\2\u013c\u013d\7-\2\2\u013d")
buf.write(u"T\3\2\2\2\u013e\u013f\7>\2\2\u013fV\3\2\2\2\u0140\u0141")
buf.write(u"\7>\2\2\u0141\u0142\7?\2\2\u0142X\3\2\2\2\u0143\u0144")
buf.write(u"\7@\2\2\u0144Z\3\2\2\2\u0145\u0146\7@\2\2\u0146\u0147")
buf.write(u"\7?\2\2\u0147\\\3\2\2\2\u0148\u0149\7>\2\2\u0149\u014a")
buf.write(u"\7@\2\2\u014a^\3\2\2\2\u014b\u014c\7c\2\2\u014c\u014d")
buf.write(u"\7p\2\2\u014d\u014e\7f\2\2\u014e`\3\2\2\2\u014f\u0150")
buf.write(u"\7q\2\2\u0150\u0151\7t\2\2\u0151b\3\2\2\2\u0152\u0153")
buf.write(u"\7z\2\2\u0153\u0154\7q\2\2\u0154\u0155\7t\2\2\u0155d")
buf.write(u"\3\2\2\2\u0156\u0157\7?\2\2\u0157\u0158\7@\2\2\u0158")
buf.write(u"f\3\2\2\2\u0159\u015a\7/\2\2\u015a\u015b\7@\2\2\u015b")
buf.write(u"h\3\2\2\2\u015c\u015d\7k\2\2\u015d\u015e\7h\2\2\u015e")
buf.write(u"j\3\2\2\2\u015f\u0160\7v\2\2\u0160\u0161\7j\2\2\u0161")
buf.write(u"\u0162\7g\2\2\u0162\u0163\7p\2\2\u0163l\3\2\2\2\u0164")
buf.write(u"\u0165\7g\2\2\u0165\u0166\7n\2\2\u0166\u0167\7u\2\2\u0167")
buf.write(u"\u0168\7g\2\2\u0168n\3\2\2\2\u0169\u016a\5s:\2\u016a")
buf.write(u"\u016b\7\60\2\2\u016b\u016c\5s:\2\u016cp\3\2\2\2\u016d")
buf.write(u"\u016e\7v\2\2\u016e\u016f\7t\2\2\u016f\u0170\7w\2\2\u0170")
buf.write(u"\u0177\7g\2\2\u0171\u0172\7h\2\2\u0172\u0173\7c\2\2\u0173")
buf.write(u"\u0174\7n\2\2\u0174\u0175\7u\2\2\u0175\u0177\7g\2\2\u0176")
buf.write(u"\u016d\3\2\2\2\u0176\u0171\3\2\2\2\u0177r\3\2\2\2\u0178")
buf.write(u"\u017a\t\2\2\2\u0179\u0178\3\2\2\2\u017a\u017b\3\2\2")
buf.write(u"\2\u017b\u0179\3\2\2\2\u017b\u017c\3\2\2\2\u017ct\3\2")
buf.write(u"\2\2\u017d\u0181\t\3\2\2\u017e\u0180\t\4\2\2\u017f\u017e")
buf.write(u"\3\2\2\2\u0180\u0183\3\2\2\2\u0181\u017f\3\2\2\2\u0181")
buf.write(u"\u0182\3\2\2\2\u0182v\3\2\2\2\u0183\u0181\3\2\2\2\u0184")
buf.write(u"\u0186\t\5\2\2\u0185\u0184\3\2\2\2\u0186\u0187\3\2\2")
buf.write(u"\2\u0187\u0185\3\2\2\2\u0187\u0188\3\2\2\2\u0188\u0189")
buf.write(u"\3\2\2\2\u0189\u018a\b<\2\2\u018ax\3\2\2\2\u018b\u018c")
buf.write(u"\7/\2\2\u018c\u018d\7/\2\2\u018d\u0196\3\2\2\2\u018e")
buf.write(u"\u0192\n\6\2\2\u018f\u0191\n\7\2\2\u0190\u018f\3\2\2")
buf.write(u"\2\u0191\u0194\3\2\2\2\u0192\u0190\3\2\2\2\u0192\u0193")
buf.write(u"\3\2\2\2\u0193\u0197\3\2\2\2\u0194\u0192\3\2\2\2\u0195")
buf.write(u"\u0197\3\2\2\2\u0196\u018e\3\2\2\2\u0196\u0195\3\2\2")
buf.write(u"\2\u0197\u019c\3\2\2\2\u0198\u019a\7\17\2\2\u0199\u0198")
buf.write(u"\3\2\2\2\u0199\u019a\3\2\2\2\u019a\u019b\3\2\2\2\u019b")
buf.write(u"\u019d\7\f\2\2\u019c\u0199\3\2\2\2\u019c\u019d\3\2\2")
buf.write(u"\2\u019d\u019e\3\2\2\2\u019e\u019f\b=\2\2\u019fz\3\2")
buf.write(u"\2\2\u01a0\u01a1\7*\2\2\u01a1\u01a2\7,\2\2\u01a2\u01a6")
buf.write(u"\3\2\2\2\u01a3\u01a5\13\2\2\2\u01a4\u01a3\3\2\2\2\u01a5")
buf.write(u"\u01a8\3\2\2\2\u01a6\u01a7\3\2\2\2\u01a6\u01a4\3\2\2")
buf.write(u"\2\u01a7\u01a9\3\2\2\2\u01a8\u01a6\3\2\2\2\u01a9\u01aa")
buf.write(u"\7,\2\2\u01aa\u01ab\7+\2\2\u01ab\u01ac\3\2\2\2\u01ac")
buf.write(u"\u01ad\b>\2\2\u01ad|\3\2\2\2\u01ae\u01af\7\61\2\2\u01af")
buf.write(u"\u01b0\7,\2\2\u01b0\u01b4\3\2\2\2\u01b1\u01b3\13\2\2")
buf.write(u"\2\u01b2\u01b1\3\2\2\2\u01b3\u01b6\3\2\2\2\u01b4\u01b5")
buf.write(u"\3\2\2\2\u01b4\u01b2\3\2\2\2\u01b5\u01b7\3\2\2\2\u01b6")
buf.write(u"\u01b4\3\2\2\2\u01b7\u01b8\7,\2\2\u01b8\u01b9\7\61\2")
buf.write(u"\2\u01b9\u01ba\3\2\2\2\u01ba\u01bb\b?\2\2\u01bb~\3\2")
buf.write(u"\2\2\u01bc\u01bd\13\2\2\2\u01bd\u0080\3\2\2\2\r\2\u0176")
buf.write(u"\u017b\u0181\u0187\u0192\u0196\u0199\u019c\u01a6\u01b4")
buf.write(u"\3\b\2\2")
return buf.getvalue()
class LustreLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
T__21 = 22
T__22 = 23
T__23 = 24
T__24 = 25
T__25 = 26
T__26 = 27
T__27 = 28
T__28 = 29
T__29 = 30
T__30 = 31
T__31 = 32
T__32 = 33
T__33 = 34
T__34 = 35
T__35 = 36
T__36 = 37
T__37 = 38
T__38 = 39
T__39 = 40
T__40 = 41
T__41 = 42
T__42 = 43
T__43 = 44
T__44 = 45
T__45 = 46
T__46 = 47
T__47 = 48
T__48 = 49
T__49 = 50
T__50 = 51
T__51 = 52
T__52 = 53
T__53 = 54
REAL = 55
BOOL = 56
INT = 57
ID = 58
WS = 59
SL_COMMENT = 60
ML_COMMENT = 61
C_COMMENT = 62
ERROR = 63
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"'datatype'", u"'='", u"';'", u"'const'", u"':'", u"'node'",
u"'('", u"')'", u"'returns'", u"'var'", u"'let'", u"'tel'",
u"','", u"'struct'", u"'{'", u"'}'", u"'enum'", u"'int'", u"'subrange'",
u"'['", u"']'", u"'of'", u"'bool'", u"'real'", u"'-'", u"'--%PROPERTY'",
u"'--%REALIZABLE'", u"'--%IVC'", u"'--%MAIN'", u"'assert'",
u"'floor'", u"'condact'", u"'.'", u"':='", u"'pre'", u"'not'",
u"'*'", u"'/'", u"'div'", u"'mod'", u"'+'", u"'<'", u"'<='",
u"'>'", u"'>='", u"'<>'", u"'and'", u"'or'", u"'xor'", u"'=>'",
u"'->'", u"'if'", u"'then'", u"'else'" ]
symbolicNames = [ u"<INVALID>",
u"REAL", u"BOOL", u"INT", u"ID", u"WS", u"SL_COMMENT", u"ML_COMMENT",
u"C_COMMENT", u"ERROR" ]
ruleNames = [ u"T__0", u"T__1", u"T__2", u"T__3", u"T__4", u"T__5",
u"T__6", u"T__7", u"T__8", u"T__9", u"T__10", u"T__11",
u"T__12", u"T__13", u"T__14", u"T__15", u"T__16", u"T__17",
u"T__18", u"T__19", u"T__20", u"T__21", u"T__22", u"T__23",
u"T__24", u"T__25", u"T__26", u"T__27", u"T__28", u"T__29",
u"T__30", u"T__31", u"T__32", u"T__33", u"T__34", u"T__35",
u"T__36", u"T__37", u"T__38", u"T__39", u"T__40", u"T__41",
u"T__42", u"T__43", u"T__44", u"T__45", u"T__46", u"T__47",
u"T__48", u"T__49", u"T__50", u"T__51", u"T__52", u"T__53",
u"REAL", u"BOOL", u"INT", u"ID", u"WS", u"SL_COMMENT",
u"ML_COMMENT", u"C_COMMENT", u"ERROR" ]
grammarFileName = u"Lustre.g4"
def __init__(self, input=None, output=sys.stdout):
super(LustreLexer, self).__init__(input, output=output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.