gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes, functions, exceptions."""
import logging
import stl.levenshtein
# python2 and python3 compatible way for detecting if something is a string.
try:
basestring # pylint: disable=pointless-statement
def IsString(x):
"""Returns if |x| is a string (compatible with python2 and python3)."""
return isinstance(x, basestring)
except NameError:
def IsString(x):
"""Returns if |x| is a string (compatible with python2 and python3)."""
return isinstance(x, str)
def GetCSV(array):
"""Return a comma-separated string."""
if not array:
return ''
return ','.join([str(e) for e in array])
class NamedObject(object):
"""Base class for all objects with a name.
Attributes:
name: Name of this object.
"""
def __init__(self, name):
self.name = name
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not self == other
def Resolve(self, env, resolved_params):
"""Resolve object.
It resolves any internal attributes with unresolved values, then returns
the resolved values which can be used for graph.
Args:
env: Environment with all information necessary to resolve internal
attributes.
resolved_params: Resolved values which will possibly be referenced by
internal attributes.
Raises:
NotImplementedError
"""
raise NotImplementedError('Resolve() is not implemented: ' + self.name)
class ParameterizedObject(NamedObject):
"""Base class of objects which has parameters.
A parameterized object can be used to expanded later with actual arguments.
Attributes:
params: List of parameters for this object.
"""
def __init__(self, name):
NamedObject.__init__(self, name)
self.params = []
def __eq__(self, other):
return NamedObject.__eq__(self, other) and self.params == other.params
class TypedObject(NamedObject):
"""Base class for all object with a type and name.
Attributes:
type_: Type of this object.
"""
def __init__(self, name, type_):
NamedObject.__init__(self, name)
self.type_ = type_
def __eq__(self, other):
return NamedObject.__eq__(self, other) and self.type_ == other.type_
class Const(TypedObject):
"""Constants.
A constant is defined with "const" keyword. For example,
const string kStringConstantExample = "string constant example";
Atrributes:
value: Value for this constant. Resolve() returns it.
"""
def __init__(self, name, type_, value=None):
TypedObject.__init__(self, name, type_)
self.value = value
def __str__(self):
return 'CONST %s(%s): %s' % (self.name, self.type_, repr(self.value))
def __eq__(self, other):
return TypedObject.__eq__(self, other) and self.value == other.value
def Resolve(self, env, resolved_params):
logging.log(1, 'Resolving ' + self.name)
if isinstance(self.value, NamedObject):
return self.value.Resolve(env, resolved_params)
return self.value
class Value(NamedObject):
"""Values.
A value is either integer, string, array, or dictionary. self.name can be None
which means the value doesn't have a name.
Attributes:
value: Actual value.
"""
def __init__(self, value):
NamedObject.__init__(self, value)
self.value = value
self.name = None # Clear name by default
def __eq__(self, other):
return NamedObject.__eq__(self, other) and self.value == other.value
def __str__(self):
if self.name is None:
return repr(self.value)
return 'VALUE: %s(%s)' % (self.name, repr(self.value))
def Resolve(self, env, resolved_params):
logging.log(1, 'Resolving ' + str(self))
if isinstance(self.value, NamedObject):
return self.value.Resolve(env, resolved_params)
# Expand (struct or array)
if isinstance(self.value, list):
if self.value and self.value[0].name: # dict
dict_ = {}
for v in self.value:
dict_[v.name] = v.Resolve(env, resolved_params)
return dict_
# list
return [v.Resolve(env, resolved_params) for v in self.value]
if isinstance(self.value, int) or self.value is None:
return self.value
if not IsString(self.value):
raise TypeError('Wrong value: ' + self.value)
# Get reference
if self.value.startswith('$'):
var = self.value[1:]
fields = var.split('.')
# Role field or not?
if len(fields) > 1: # Role field
assert len(fields) == 2
role = Role.FindStatic(fields[0], env, resolved_params)
return FuncGetField(role, fields[1])
logging.log(1, '** RESOLVEDPARAMS **: ' + str(resolved_params))
# Params or local vars?
if var in resolved_params:
v = resolved_params[var]
if isinstance(v, FuncSet):
if isinstance(v.obj, LocalVar):
return v.obj
else: # if isinstance(v.obj, Role):
return FuncGetField(v.obj, v.field)
return v
# Const?
consts = env['_current_module'].consts
if var in consts:
return consts[var].Resolve(env, {})
# Roles?
roles = env['_current_module'].roles
if var in roles:
return roles[var]
did_you_mean = stl.levenshtein.closest_candidate(
var, consts.keys() + roles.keys() + resolved_params.keys())
raise NameError(
'Cannot find a const, role or local var: %s. Did you mean %s?' %
(var, did_you_mean))
# Set reference
if self.value.startswith('&'):
var = self.value[1:]
fields = var.split('.')
# Role field or not?
if len(fields) > 1: # Role field
assert len(fields) == 2
role = Role.FindStatic(fields[0], env, resolved_params)
return FuncSet(role, fields[1])
# Local vars?
if var in resolved_params:
local = resolved_params[var]
if isinstance(local, FuncSet):
return local
if isinstance(local, LocalVar):
return FuncSet(local)
# Roles?
roles = env['_current_module'].roles
if var in roles:
return roles[var]
did_you_mean = stl.levenshtein.closest_candidate(
var, resolved_params.keys() + roles.keys())
raise NameError('Cannot find a local var or role: %s. Did you mean %s?' %
var, did_you_mean)
# Literal value (integer, boolean, or string)
return self.value
class QualifierValue(Value):
"""Value from a Qualifier.
This is an instance of a Qualifier in a MessageValue. The external Qualifier
can be used to generate values, or validate the value of a message field.
Attributes:
qualifier: qualifier.Qualifier instance.
params: List of parameters for this qualifier.
out_ref: Output variable for this qualifier. Qualifiers can be used to
assign values to local variables in a Transition.
"""
class Resolved(Value):
"""Resolved value from a QualifierValue.
This is resolved instance of a QualifierValue; it is used directly by the
test driver to validate and generate field values. This class accepts a
list of resolved arguments which can be python primitives, STL local
variables, or runnable STL functions.
Attributes:
qualifier: qualifier.Qualifier instance.
qual_type: The field type to be qualified (int, bool, string, message).
args: List of concrete arguments to be run in
self.qualifier.Validate()/Generate().
func_set: Output FuncSet for this qualifier. Qualifiers can be used to
assign values to local variables in a Transition or to fields in a
Role via func_set.
"""
def __init__(self, qualifier, args, func_set=None):
Value.__init__(self, None)
self.qualifier = qualifier
self.qual_type = qualifier.qual_type
self.args = args
self.func_set = func_set
def __eq__(self, other):
return Value.__eq__(self, other) and self.qualifier == other.qualifier
def __str__(self):
return 'QUALIFIER-RESOLVED: %s(%s) -> %s' % (self.qualifier.name,
self.args, self.func_set)
def ValidateAndSet(self, value):
if self.func_set:
self.func_set.SetValue(value)
return self.qualifier.external.Validate(value, *self._EvalArgs())
def Generate(self):
self.value = self.qualifier.external.Generate(*self._EvalArgs())
if self.func_set:
self.func_set.SetValue(self.value)
return self.value
def _EvalArgs(self):
args = []
for a in self.args:
if isinstance(a, LocalVar):
args.append(a.value)
elif isinstance(a, Func):
args.append(a.Run())
else:
args.append(a)
return args
def __init__(self, qualifier, params, out_ref=None):
Value.__init__(self, None)
self.qualifier = qualifier
self.params = params
self.out_ref = out_ref
def __eq__(self, other):
return NamedObject.__eq__(self, other) and self.name == other.name
def __str__(self):
return 'QUALIFIER-VALUE: %s(%s) -> %s' % (self.qualifier.name, self.params,
self.out_ref)
def Resolve(self, env, resolved_params):
args = []
for v in self.params:
args.append(v.Resolve(env, resolved_params))
func_set = None
if self.out_ref:
func_set = self.out_ref.Resolve(env, resolved_params)
return QualifierValue.Resolved(self.qualifier, args, func_set)
class Param(TypedObject):
"""Parameters for ParameterizedObject."""
def __init__(self, name, type_):
TypedObject.__init__(self, name, type_)
def __str__(self):
return 'PARAM %s(%s)' % (self.name, self.type_)
class LocalVar(TypedObject):
"""Local variables.
Local variables can be defined with types in transition spec. In most case,
transitions define local variables to store values temporarily which don't
change state transition results, but need to proceed and complete transitions.
Attributes:
value: Current value of this local variable.
"""
def __init__(self, name, type_):
TypedObject.__init__(self, name, type_)
self.value = None
def __str__(self):
return 'LOCAL %s(%s)' % (self.name, self.type_)
class Field(TypedObject):
"""Fields in messages or in roles.
Attributes:
optional: Whether or not this field is optional in the given message.
Meanless for roles.
repeated: Whether or not this field is repeated in the given message.
Meanless for roles.
encoding_props: Dictionary of miscellaneous property values, used for
custom encoding schemes.
"""
def __init__(self, name, type_, optional=False, repeated=False):
TypedObject.__init__(self, name, type_)
self.optional = optional
self.repeated = repeated
self.encoding_props = {}
if self.repeated:
self.optional = True
def __eq__(self, other):
return (TypedObject.__eq__(self, other) and
self.optional == other.optional and self.repeated == other.repeated)
def __str__(self):
if self.repeated:
return 'FIELD-REPEATED %s(%s)' % (self.name, self.type_)
if self.optional:
return 'FIELD-OPTIONAL %s(%s)' % (self.name, self.type_)
return 'FIELD %s(%s)' % (self.name, self.type_)
class Role(NamedObject):
"""Roles.
A role represents an endpoint of events which triggers events, i,e becomes
the source of events, or becomes the target of events. It has fields to
store values necessary to execute events, for example, address for a protocol.
Attributes:
fields: Map of fields and their names. A field is used to store values
necessary to execute events.
field_values: Map of field names and current values.
"""
def __init__(self, name):
NamedObject.__init__(self, name)
self.fields = {}
self.field_values = {}
def __eq__(self, other):
return (NamedObject.__eq__(self, other) and self.fields == other.fields and
self.field_values == other.field_values)
def __str__(self):
return 'ROLE ' + self.name
def __getitem__(self, key):
if key not in self.fields:
raise AttributeError("No field exists in role '%s': %s" % (self.name,
key))
if key in self.field_values:
return self.field_values[key]
return None
def __setitem__(self, key, value):
if key not in self.fields:
raise AttributeError("No field exists in role '%s': %s" % (self.name,
key))
# TODO(byungchul): Type checking.
self.field_values[key] = value
def Resolve(self, env, resolved_params):
logging.log(1, 'Resolving ' + self.name)
return self
@staticmethod
def FindStatic(name, env, resolved_params):
"""Find a role from |resolved_params| or |env|."""
# Find role in params
if name in resolved_params:
resolved_role = resolved_params[name]
if not isinstance(resolved_role, Role):
raise NameError('Not a role: ' + name)
return resolved_role
# Find role in current module
roles = env['_current_module'].roles
if name in roles:
return roles[name]
did_you_mean = stl.levenshtein.closest_candidate(
name, resolved_params.keys() + roles.keys())
raise NameError('Cannot find a role: %s. Did you mean %s?' %
(name, did_you_mean))
class Expand(NamedObject):
"""Expressions to expand to other objects.
Messages, events, states, transitions can be parameterized. A parameterized
object can be expanded with values.
Note that Resolve() is only used to expand parameterized messages. Other
parameterized objects are expanded differently.
Attributes:
values: List of values to be used when expanding to other object.
"""
def __init__(self, name):
NamedObject.__init__(self, name)
self.values = []
def __eq__(self, other):
return NamedObject.__eq__(self, other) and self.values == other.values
def __str__(self):
return 'EXPAND %s: v(%s)' % (self.name, GetCSV(self.values))
def Resolve(self, env, resolved_params):
# This function is called only for messages. Other expand is handled
# separately, for example, by Transition or Event.
messages = env['_current_module'].messages
if self.name not in messages:
did_you_mean = stl.levenshtein.closest_candidate(
self.name, messages.keys())
raise NameError('Cannot find a message: %s. Did you mean %s?' %
(self.name, did_you_mean))
msg = messages[self.name]
if msg.is_array:
assert len(self.values) == 1
msg_array = self.values[0]
assert isinstance(msg_array.value, list)
new_resolved_fields = []
for msg_element in msg_array.value:
new_resolved_fields.append({
v.name: v.Resolve(env, resolved_params)
for v in msg_element.value
})
else:
new_resolved_fields = {
v.name: v.Resolve(env, resolved_params)
for v in self.values
}
return msg.Resolve(env, new_resolved_fields)
class Func(NamedObject):
"""External function call.
Attributes:
func: The callable function this wraps and runs.
args: List of arguments for given function with |name|.
"""
def __init__(self, name, func=None):
NamedObject.__init__(self, name)
self.func = func
self.args = []
def __str__(self):
return 'FUNC %s(%s)' % (self.name, GetCSV(self.args))
def Run(self):
"""Execute the function with |args|.
Returns:
Whether the function succeeded to execute.
Raises:
RuntimeError: If self.func is None.
"""
if not self.func:
raise RuntimeError('Func does not contain a runnable function.')
return self.func(*self.args)
class FuncNoOp(Func):
"""External function doing nothing."""
def __init__(self, name):
Func.__init__(self, name)
def Run(self):
return True
class FuncGetField(Func):
"""External function to get the value of a field either of Role or dictionary.
Attributes:
obj: Object which has the field of |field|. It can be either Role or
dictionary.
field: Field name to get the value of.
"""
def __init__(self, obj, field):
Func.__init__(self, 'GetField')
self.obj = obj
self.field = field
def __str__(self):
if isinstance(self.obj, Role):
return 'GET %s.%s' % (self.obj.name, self.field)
return 'GET %s.%s' % (self.obj, self.field)
def Run(self):
return self.obj[self.field]
class FuncSet(Func):
"""External function to set a value to a field of Role or to a LocalVar.
Attributes:
obj: Object which has the field of |field|. It can be either Role or
LocalVar.
field: Field name to set a value to. |obj| must be a Role.
"""
def __init__(self, obj, field=None):
Func.__init__(self, 'Set')
self.obj = obj
self.field = field
if isinstance(self.obj, LocalVar):
if field:
raise TypeError("Local var '%s' cannot set a field: %s" %
(self.obj.name, field))
elif isinstance(self.obj, Role):
if not field:
raise TypeError('Cannot set role: ' + self.obj.name)
else:
raise TypeError('Cannot set ' + self.obj)
def __str__(self):
if isinstance(self.obj, LocalVar):
return 'SET ' + self.obj.name
# if isinstance(self.obj, Role):
return 'SET %s.%s' % (self.obj.name, self.field)
def Run(self):
"""Get the value to a field of Role or to a LocalVar."""
if isinstance(self.obj, LocalVar):
return self.obj.value
elif isinstance(self.obj, Role):
return self.obj[self.field]
else:
raise TypeError('Cannot GetValue on type %s' % type(self.obj))
def SetValue(self, value):
"""Set a value to a field of Role or to a LocalVar."""
if isinstance(self.obj, LocalVar):
self.obj.value = value
elif isinstance(self.obj, Role):
self.obj[self.field] = value
else:
raise TypeError('Cannot SetValue on type %s' % type(self.obj))
class FuncWithContext(Func):
"""External event function with context.
An event has a context consisting of source Role, target Role, and a flag
indication if this function is called to test source Role.
Attributes:
context: Context to run this event function.
"""
class Context(object):
"""Context to run a function as an event in state transition spec.
Attributes:
source: Source role of this event function.
target: Target role of this event function.
test_source: Whether or not this function call is to test source Role.
If not, this function call is to test target Role.
"""
def __init__(self):
self.source = None
self.target = None
self.test_source = False
def __str__(self):
return ('CONTEXT: s(%s)%s, t(%s)%s' % (self.source,
'*' if self.test_source else '',
self.target,
'' if self.test_source else '*'))
def __init__(self, name, event):
Func.__init__(self, name)
self.event = event
self.context = FuncWithContext.Context()
def __str__(self):
return ('FUNC %s: s(%s), t(%s), a(%s)' % (
self.name, self.context.source, self.context.target, GetCSV(self.args)))
def Run(self):
logging.log(2, 'Running ' + str(self))
new_args = [self.context]
new_args.extend(self.args)
if self.context.test_source:
return self.event.Wait(*new_args)
return self.event.Fire(*new_args)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stubs for File service."""
import base64
import datetime
import hashlib
import os
import random
import string
import StringIO
import tempfile
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import blobstore as api_blobstore
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.files import blobstore as files_blobstore
from google.appengine.api.files import file as files
from google.appengine.api.files import file_service_pb
from google.appengine.api.files import gs
from google.appengine.ext import blobstore
from google.appengine.ext.cloudstorage import cloudstorage_stub
from google.appengine.runtime import apiproxy_errors
MAX_REQUEST_SIZE = 32 << 20
GS_INFO_KIND = blobstore_stub._GS_INFO_KIND
_now_function = datetime.datetime.now
def _to_seconds(datetime_obj):
return int(time.mktime(datetime_obj.timetuple()))
def _random_string(length):
"""Generate a random string of given length."""
return ''.join(
random.choice(string.letters + string.digits) for _ in range(length))
def raise_error(error_code, error_detail=''):
"""Raise application error helper method."""
raise apiproxy_errors.ApplicationError(error_code, error_detail=error_detail)
_BLOBSTORE_DIRECTORY = files_blobstore._BLOBSTORE_DIRECTORY
_GS_PREFIX = gs._GS_PREFIX
_GS_UPLOAD_PREFIX = _GS_PREFIX + 'writable:'
class _GoogleStorageUpload(tuple):
"""Stores information about a writable Google Storage file."""
buf = property(lambda self: self[0])
content_type = property(lambda self: self[1])
gs_filename = property(lambda self: self[2])
class GoogleStorage(object):
"""Virtual google storage to be used by file api."""
def _Upload(self, buf, content_type, gs_filename):
return _GoogleStorageUpload([buf, content_type, gs_filename])
def __init__(self, blob_storage):
"""Constructor.
Args:
blob_storage:
apphosting.api.blobstore.blobstore_stub.BlobStorage instance.
"""
self.blob_storage = blob_storage
self.gs_stub = cloudstorage_stub.CloudStorageStub(self.blob_storage)
self.uploads = {}
self.finalized = set()
self.sequence_keys = {}
def remove_gs_prefix(self, gs_filename):
return gs_filename[len('/gs'):]
def add_gs_prefix(self, gs_filename):
return '/gs' + gs_filename
def get_blobkey(self, gs_filename):
return blobstore.create_gs_key(gs_filename)
def has_upload(self, filename):
"""Checks if there is an upload at this filename."""
return filename in self.uploads
def finalize(self, filename):
"""Marks file as finalized."""
upload = self.uploads[filename]
self.finalized.add(filename)
upload.buf.seek(0)
content = upload.buf.read()
blobkey = self.gs_stub.post_start_creation(
self.remove_gs_prefix(upload.gs_filename),
{'content-type': upload.content_type})
assert blobkey == self.get_blobkey(upload.gs_filename)
self.gs_stub.put_continue_creation(
blobkey, content, (0, len(content) - 1), True)
del self.sequence_keys[filename]
def is_finalized(self, filename):
"""Checks if file is already finalized."""
assert filename in self.uploads
return filename in self.finalized
def start_upload(self, request):
"""Starts a new upload based on the specified CreateRequest."""
mime_type = None
gs_filename = request.filename()
ignored_parameters = [
gs._CACHE_CONTROL_PARAMETER,
gs._CANNED_ACL_PARAMETER,
gs._CONTENT_DISPOSITION_PARAMETER,
gs._CONTENT_ENCODING_PARAMETER,
]
for param in request.parameters_list():
name = param.name()
if name == gs._MIME_TYPE_PARAMETER:
mime_type = param.value()
elif (name in ignored_parameters or
name.startswith(gs._USER_METADATA_PREFIX)):
pass
else:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
if not mime_type:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
elif not gs_filename:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
random_str = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(64))
writable_name = '%s%s' % (
_GS_UPLOAD_PREFIX, base64.urlsafe_b64encode(random_str))
self.uploads[writable_name] = self._Upload(
StringIO.StringIO(), mime_type, gs_filename)
self.sequence_keys[writable_name] = None
datastore.Delete(
datastore.Key.from_path(GS_INFO_KIND,
self.get_blobkey(gs_filename),
namespace=''))
return writable_name
def append(self, filename, data, sequence_key):
"""Appends data to the upload filename."""
assert not self.is_finalized(filename)
if sequence_key:
current_sequence_key = self.sequence_keys[filename]
if current_sequence_key and current_sequence_key >= sequence_key:
raise_error(file_service_pb.FileServiceErrors.SEQUENCE_KEY_OUT_OF_ORDER,
error_detail=current_sequence_key)
self.sequence_keys[filename] = sequence_key
self.uploads[filename].buf.write(data)
def stat(self, gs_filename):
"""
Returns:
file info for a finalized file with given filename
"""
blob_key = self.get_blobkey(gs_filename)
try:
fileinfo = datastore.Get(
datastore.Key.from_path(GS_INFO_KIND, blob_key, namespace=''))
fileinfo['filename'] = self.add_gs_prefix(fileinfo['filename'])
return fileinfo
except datastore_errors.EntityNotFoundError:
raise raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR,
gs_filename)
def get_reader(self, gs_filename):
try:
return self.blob_storage.OpenBlob(self.get_blobkey(gs_filename))
except IOError:
return None
def listdir(self, request, response):
"""listdir.
Args:
request: ListDir RPC request.
response: ListDir RPC response.
Returns:
A list of fully qualified filenames under a certain path sorted by in
char order.
"""
path = self.remove_gs_prefix(request.path())
prefix = request.prefix() if request.has_prefix() else ''
q = datastore.Query(GS_INFO_KIND, namespace='')
fully_qualified_name = '/'.join([path, prefix])
if request.has_marker():
q['filename >'] = '/'.join([path, request.marker()])
else:
q['filename >='] = fully_qualified_name
if request.has_max_keys():
max_keys = request.max_keys()
else:
max_keys = 2**31-1
for gs_file_info in q.Get(max_keys):
filename = gs_file_info['filename']
if filename.startswith(fully_qualified_name):
response.add_filenames(self.add_gs_prefix(filename))
else:
break
class GoogleStorageFile(object):
"""File object for '/gs/' files."""
def __init__(self, open_request, file_storage):
self.filename = open_request.filename()
self.file_storage = file_storage
self.open_mode = open_request.open_mode()
content_type = open_request.content_type()
if self.is_appending:
if not self.filename.startswith(_GS_UPLOAD_PREFIX):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
elif not self.file_storage.has_upload(self.filename):
raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR)
elif self.file_storage.is_finalized(self.filename):
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
else:
if not self.filename.startswith(_GS_PREFIX):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
elif self.filename.startswith(_GS_UPLOAD_PREFIX):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
else:
self.buf = self.file_storage.get_reader(self.filename)
if not self.buf:
raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR)
if content_type != file_service_pb.FileContentType.RAW:
raise_error(file_service_pb.FileServiceErrors.WRONG_CONTENT_TYPE)
@property
def is_appending(self):
"""Checks if the file is opened for appending or reading."""
return self.open_mode == file_service_pb.OpenRequest.APPEND
def stat(self, request, response):
"""Fill response with file stat.
Current implementation only fills length, finalized, filename, and content
type. File must be opened in read mode before stat is called.
"""
file_info = self.file_storage.stat(self.filename)
file_stat = response.add_stat()
file_stat.set_filename(file_info['filename'])
file_stat.set_finalized(True)
file_stat.set_length(file_info['size'])
file_stat.set_ctime(_to_seconds(file_info['creation']))
file_stat.set_mtime(_to_seconds(file_info['creation']))
file_stat.set_content_type(file_service_pb.FileContentType.RAW)
response.set_more_files_found(False)
def read(self, request, response):
"""Copies up to max_bytes starting at pos into response from filename."""
if self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
self.buf.seek(request.pos())
data = self.buf.read(request.max_bytes())
response.set_data(data)
def append(self, request, response):
"""Appends data to filename."""
if not self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
self.file_storage.append(
self.filename, request.data(), request.sequence_key())
def finalize(self):
"""Finalize a file.
Copies temp file data to permanent location for reading.
"""
if not self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
elif self.file_storage.is_finalized(self.filename):
raise_error(
file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
self.file_storage.finalize(self.filename)
class BlobstoreStorage(object):
"""Virtual file storage to be used by file api.
Abstracts away all aspects of logical and physical file organization of the
API.
"""
def __init__(self, blob_storage):
"""Constructor.
Args:
blob_storage: An instance of
apphosting.api.blobstore.blobstore_stub.BlobStorage to use for blob
integration.
"""
self.blob_keys = {}
self.blobstore_files = set()
self.finalized_files = set()
self.created_files = set()
self.data_files = {}
self.sequence_keys = {}
self.blob_storage = blob_storage
self.blob_content_types = {}
self.blob_file_names = {}
def finalize(self, filename):
"""Marks file as finalized."""
if self.is_finalized(filename):
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
self.finalized_files.add(filename)
def is_finalized(self, filename):
"""Checks if file is already finalized."""
return filename in self.finalized_files
def get_blob_key(self, ticket):
"""Gets blob key for blob creation ticket."""
return self.blob_keys.get(ticket)
def register_blob_key(self, ticket, blob_key):
"""Register blob key for a ticket."""
self.blob_keys[ticket] = blob_key
def has_blobstore_file(self, filename):
"""Checks if blobstore file was already created."""
return filename in self.blobstore_files
def add_blobstore_file(self, request):
"""Registers a created blob store file."""
mime_type = None
blob_filename = ''
for param in request.parameters_list():
name = param.name()
if name == files_blobstore._MIME_TYPE_PARAMETER:
mime_type = param.value()
elif name == files_blobstore._BLOBINFO_UPLOADED_FILENAME_PARAMETER:
blob_filename = param.value()
else:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
if mime_type is None:
raise_error(file_service_pb.FileServiceErrors.INVALID_PARAMETER)
random_str = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(64))
filename = (_BLOBSTORE_DIRECTORY +
files._CREATION_HANDLE_PREFIX +
base64.urlsafe_b64encode(random_str))
self.blobstore_files.add(filename)
self.blob_content_types[filename] = mime_type
self.blob_file_names[filename] = blob_filename
return filename
def get_sequence_key(self, filename):
"""Get sequence key for a file."""
return self.sequence_keys.get(filename, '')
def set_sequence_key(self, filename, sequence_key):
"""Set sequence key for a file."""
self.sequence_keys[filename] = sequence_key
def stat(self, filename):
"""
Returns:
file info for a finalized file with given filename."""
blob_key = files_blobstore.get_blob_key(filename)
file_info = datastore.Get(
datastore.Key.from_path(api_blobstore.BLOB_INFO_KIND, str(blob_key),
namespace=''))
if file_info == None:
raise raise_error(
file_service_pb.FileServiceErrors.EXISTENCE_ERROR_MEATADATA_NOT_FOUND,
filename)
return file_info
def save_blob(self, filename, blob_key):
"""Save filename temp data to a blobstore under given key."""
f = self._get_data_file(filename)
f.seek(0)
self.blob_storage.StoreBlob(blob_key, f)
f.seek(0, os.SEEK_END)
size = f.tell()
f.close()
del self.data_files[filename]
return size
def _get_data_file(self, filename):
"""Get a temp data file for a file."""
if not filename in self.data_files:
f = tempfile.TemporaryFile()
self.data_files[filename] = f
return f
return self.data_files[filename]
def get_md5_from_blob(self, blobkey):
"""Get md5 hexdigest of the blobfile with blobkey."""
try:
f = self.blob_storage.OpenBlob(blobkey)
file_md5 = hashlib.md5()
file_md5.update(f.read())
return file_md5.hexdigest()
finally:
f.close()
def append(self, filename, data):
"""Append data to file."""
self._get_data_file(filename).write(data)
def get_content_type(self, filename):
return self.blob_content_types[filename]
def get_blob_file_name(self, filename):
return self.blob_file_names[filename]
class BlobstoreFile(object):
"""File object for generic /blobstore/ file."""
def __init__(self, open_request, file_storage):
"""Constructor.
Args:
open_request: An instance of open file request.
file_storage: An instance of BlobstoreStorage.
"""
self.filename = open_request.filename()
self.file_storage = file_storage
self.blob_reader = None
self.content_type = None
self.mime_content_type = None
open_mode = open_request.open_mode()
content_type = open_request.content_type()
if not self.filename.startswith(_BLOBSTORE_DIRECTORY):
if not self.file_storage.has_blobstore_file(self.filename):
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
self.ticket = self.filename[len(_BLOBSTORE_DIRECTORY):]
if open_mode == file_service_pb.OpenRequest.APPEND:
if not self.file_storage.has_blobstore_file(self.filename):
raise_error(file_service_pb.FileServiceErrors.EXISTENCE_ERROR)
if self.file_storage.is_finalized(self.filename):
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'File is already finalized')
self.mime_content_type = self.file_storage.get_content_type(self.filename)
self.blob_file_name = self.file_storage.get_blob_file_name(self.filename)
else:
if self.ticket.startswith(files._CREATION_HANDLE_PREFIX):
blobkey = self.file_storage.get_blob_key(self.ticket)
if not blobkey:
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'Blobkey not found.')
else:
blobkey = self.ticket
blob_info = blobstore.BlobInfo.get(blobkey)
if not blob_info:
raise_error(file_service_pb.FileServiceErrors.FINALIZATION_ERROR,
'Blobinfo not found.')
self.blob_reader = blobstore.BlobReader(blob_info)
self.mime_content_type = blob_info.content_type
if content_type != file_service_pb.FileContentType.RAW:
raise_error(file_service_pb.FileServiceErrors.WRONG_CONTENT_TYPE)
@property
def is_appending(self):
"""Checks if the file is opened for appending or reading."""
return self.blob_reader == None
def stat(self, request, response):
"""Fill response with file stat.
Current implementation only fills length, finalized, filename, and content
type. File must be opened in read mode before stat is called.
"""
file_info = self.file_storage.stat(self.filename)
file_stat = response.add_stat()
file_stat.set_filename(self.filename)
file_stat.set_finalized(True)
file_stat.set_length(file_info['size'])
file_stat.set_ctime(_to_seconds(file_info['creation']))
file_stat.set_mtime(_to_seconds(file_info['creation']))
file_stat.set_content_type(file_service_pb.FileContentType.RAW)
response.set_more_files_found(False)
def read(self, request, response):
"""Read data from file
Args:
request: An instance of file_service_pb.ReadRequest.
response: An instance of file_service_pb.ReadResponse.
"""
if self.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
self.blob_reader.seek(request.pos())
response.set_data(self.blob_reader.read(request.max_bytes()))
def append(self, request, response):
"""Append data to file.
Args:
request: An instance of file_service_pb.AppendRequest.
response: An instance of file_service_pb.AppendResponse.
"""
sequence_key = request.sequence_key()
if sequence_key:
current_sequence_key = self.file_storage.get_sequence_key(self.filename)
if current_sequence_key and current_sequence_key >= sequence_key:
raise_error(file_service_pb.FileServiceErrors.SEQUENCE_KEY_OUT_OF_ORDER,
error_detail=current_sequence_key)
self.file_storage.set_sequence_key(self.filename, sequence_key)
self.file_storage.append(self.filename, request.data())
def finalize(self):
"""Finalize a file.
Copies temp file data to the blobstore.
"""
self.file_storage.finalize(self.filename)
blob_key = _random_string(64)
self.file_storage.register_blob_key(self.ticket, blob_key)
size = self.file_storage.save_blob(self.filename, blob_key)
blob_info = datastore.Entity(api_blobstore.BLOB_INFO_KIND,
name=str(blob_key), namespace='')
blob_info['content_type'] = self.mime_content_type
blob_info['creation'] = _now_function()
blob_info['filename'] = self.blob_file_name
blob_info['size'] = size
blob_info['creation_handle'] = self.ticket
blob_info['md5_hash'] = self.file_storage.get_md5_from_blob(blob_key)
datastore.Put(blob_info)
blob_file = datastore.Entity('__BlobFileIndex__',
name=self.ticket,
namespace='')
blob_file['blob_key'] = str(blob_key)
datastore.Put(blob_file)
class FileServiceStub(apiproxy_stub.APIProxyStub):
"""Python stub for file service."""
def __init__(self, blob_storage):
"""Constructor."""
super(FileServiceStub, self).__init__('file',
max_request_size=MAX_REQUEST_SIZE)
self.open_files = {}
self.file_storage = BlobstoreStorage(blob_storage)
self.gs_storage = GoogleStorage(blob_storage)
def _Dynamic_Create(self, request, response):
filesystem = request.filesystem()
if request.has_filename() and filesystem != gs._GS_FILESYSTEM:
raise_error(file_service_pb.FileServiceErrors.FILE_NAME_SPECIFIED)
if filesystem == files_blobstore._BLOBSTORE_FILESYSTEM:
response.set_filename(self.file_storage.add_blobstore_file(request))
elif filesystem == gs._GS_FILESYSTEM:
response.set_filename(self.gs_storage.start_upload(request))
else:
raise_error(file_service_pb.FileServiceErrors.UNSUPPORTED_FILE_SYSTEM)
def _Dynamic_Open(self, request, response):
"""Handler for Open RPC call."""
filename = request.filename()
if request.exclusive_lock() and filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.EXCLUSIVE_LOCK_FAILED)
if filename.startswith(_BLOBSTORE_DIRECTORY):
self.open_files[filename] = BlobstoreFile(request, self.file_storage)
elif filename.startswith(_GS_PREFIX):
self.open_files[filename] = GoogleStorageFile(request, self.gs_storage)
else:
raise_error(file_service_pb.FileServiceErrors.INVALID_FILE_NAME)
def _Dynamic_Close(self, request, response):
"""Handler for Close RPC call."""
filename = request.filename()
finalize = request.finalize()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
if finalize:
self.open_files[filename].finalize()
del self.open_files[filename]
def _Dynamic_Stat(self, request, response):
"""Handler for Stat RPC call."""
filename = request.filename()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
file = self.open_files[filename]
if file.is_appending:
raise_error(file_service_pb.FileServiceErrors.WRONG_OPEN_MODE)
file.stat(request, response)
def _Dynamic_Read(self, request, response):
"""Handler for Read RPC call."""
filename = request.filename()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
self.open_files[filename].read(request, response)
def _Dynamic_Append(self, request, response):
"""Handler for Append RPC call."""
filename = request.filename()
if not filename in self.open_files:
raise_error(file_service_pb.FileServiceErrors.FILE_NOT_OPENED)
self.open_files[filename].append(request, response)
def _Dynamic_GetCapabilities(self, request, response):
"""Handler for GetCapabilities RPC call."""
response.add_filesystem('blobstore')
response.add_filesystem('gs')
response.set_shuffle_available(False)
def _Dynamic_GetDefaultGsBucketName(self, request, response):
"""Handler for GetDefaultGsBucketName RPC call."""
response.set_default_gs_bucket_name('app_default_bucket')
def _Dynamic_ListDir(self, request, response):
"""Handler for ListDir RPC call.
Only for dev app server. See b/6761691.
"""
path = request.path()
if not path.startswith(_GS_PREFIX):
raise_error(file_service_pb.FileServiceErrors.UNSUPPORTED_FILE_SYSTEM)
self.gs_storage.listdir(request, response)
|
|
"""
======================================================================
Repeated measures ANOVA on source data with spatio-temporal clustering
======================================================================
This example illustrates how to make use of the clustering functions
for arbitrary, self-defined contrasts beyond standard t-tests. In this
case we will tests if the differences in evoked responses between
stimulation modality (visual VS auditory) depend on the stimulus
location (left vs right) for a group of subjects (simulated here
using one subject's data). For this purpose we will compute an
interaction effect using a repeated measures ANOVA. The multiple
comparisons problem is addressed with a cluster-level permutation test
across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Denis Engemannn <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
print(__doc__)
import os.path as op
import numpy as np
from numpy.random import randn
import mne
from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
grade_to_tris)
from mne.stats import (spatio_temporal_cluster_test, f_threshold_twoway_rm,
f_twoway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id, copy=False)
###############################################################################
# Transform to source space
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([])]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50)
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep
###############################################################################
# Transform to common cortical space
# Normally you would read in estimates across several subjects and morph
# them to the same cortical space (e.g. fsaverage). For example purposes,
# we will simulate this by just having each "subject" have the same
# response (just noisy in source space) here.
# we'll only consider the left hemisphere in this example.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately (and you might want to use morph_data
# instead), but here since all estimates are on 'sample' we can use one
# morph matrix for all the heavy lifting.
fsave_vertices = [np.arange(10242), np.array([])] # right hemisphere is empty
morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
fsave_vertices, 20, subjects_dir)
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
# Now we need to prepare the group matrix for the ANOVA statistic.
# To make the clustering function work correctly with the
# ANOVA function X needs to be a list of multi-dimensional arrays
# (one per condition) of shape: samples (subjects) x time x space
X = np.transpose(X, [2, 1, 0, 3]) # First we permute dimensions
# finally we split the array into a list a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language)
effects = 'A:B' # Without this also the main effects will be returned.
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
# A stat_fun must deal with a variable number of input arguments.
def stat_fun(*args):
# Inside the clustering function each condition will be passed as
# flattened array, necessitated by the clustering procedure.
# The ANOVA however expects an input array of dimensions:
# subjects X conditions X observations (optional).
# The following expression catches the list input
# and swaps the first and the second dimension, and finally calls ANOVA.
return f_twoway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
# get f-values only.
# Note. for further details on this ANOVA function consider the
# corresponding time frequency example.
###############################################################################
# Compute clustering statistic
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
source_space = grade_to_tris(5)
# as we only have one hemisphere we need only need half the connectivity
lh_source_space = source_space[source_space[:, 0] < 10242]
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(lh_source_space)
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_twoway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 128 # ... run fewer permutations (reduces sensitivity)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertno=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'lh',
subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.set_data_time_index(0)
brain.scale_data_colormap(fmin=5, fmid=10, fmax=30, transparent=True)
brain.show_view('lateral')
brain.save_image('cluster-lh.png')
brain.show_view('medial')
###############################################################################
# Finally, let's investigate interaction effect by reconstructing the time
# courses
import matplotlib.pyplot as plt
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
|
|
import time
import threading
from timeit import default_timer as timer
import lazylights
import colorsys
try:
import Tkinter as tk
import Queue as queue
except ImportError:
import tkinter as tk
import queue as queue
LIFX_CONNECT = True
global top
global LAST_UPDATE
MIN_REFRESH_INTERVAL = 0.5
LAST_UPDATE = 0
lastUpdateTimestamp = 0
bulbs = lazylights.find_bulbs(expected_bulbs=1, timeout=10)
print "Found bulb(s): ", bulbs, "\nState", lazylights.get_state(bulbs, timeout=5)
MAXIMUM = 40.0
buffSize = 30
top = tk.Tk()
#top.attributes("-fullscreen", True)
top.configure(background = 'black')
w, h = top.winfo_screenwidth()/1.25, top.winfo_screenheight()/2.
#top.overrideredirect(1)
top.geometry("%dx%d+0+0" % (w, h))
top.focus_set()
top.bind('<Escape>', lambda e: e.widget.quit())# top.destroy())
def turnOn():
lazylights.setpower(bulbs, True)
def turnOff():
lazylights.setpower(bulbs, False)
onButton = tk.Button(top, text="on", command=lambda x : turnOn, bg="#001a00", fg='#004d00')
onButton.grid(column = 1, columnspan = 1, row = 0, sticky='nsew')
offButton = tk.Button(top, text="off", command=lambda x: turnOff, bg="#001a00", fg='#004d00')
offButton.grid(column = 0, columnspan = 1, row = 0, sticky='nsew')
quitButton = tk.Button(top, text="quit", command=lambda: top.quit() , bg="#ff00ff")
quitButton.grid(column = 0, columnspan = 1, row = 5, sticky='nsew')
BAR_WIDTH = 600
sumR = BAR_WIDTH / 2
sumG = BAR_WIDTH / 2
sumB = BAR_WIDTH / 2
barHeight = '100'
rCan = tk.Canvas(top, width=str(BAR_WIDTH), height=barHeight, relief='raised', bg='black', cursor='dot')
rCanColorPoly = rCan.create_polygon(0, 0, 0, barHeight, sumR, barHeight, sumR, 0, fill = 'red')
rCanBlackPoly = rCan.create_polygon(sumR, 0, sumR, barHeight, BAR_WIDTH, barHeight, BAR_WIDTH, 0, fill='black')
rCan.itemconfig(rCanColorPoly, tags=('colorPoly'))
rCan.itemconfig(rCanBlackPoly, tags=('blackPoly'))
rCan.grid(column=0, columnspan=2, row = 2, sticky='w', padx='5')
rCanStrVar = tk.StringVar()
rCanStrVar.set(str(sumR))
rCanLabel = tk.Label(top, anchor='center', bd=0, bg='black', cursor='cross', fg='red', textvariable=rCanStrVar)
rCanLabel.grid(column = 2, row = 2, sticky='w')
gCan = tk.Canvas(top, width=str(BAR_WIDTH), height=barHeight, relief='raised', cursor='dot', bg='black')
gCanColorPoly = gCan.create_polygon(0, 0, 0, barHeight, sumG, barHeight, sumG, 0, fill='green')
gCanBlackPoly = gCan.create_polygon(sumG, 0, sumG, barHeight, BAR_WIDTH, barHeight, BAR_WIDTH, 0, fill='black')
gCan.itemconfig(gCanColorPoly, tags=('colorPoly'))
gCan.itemconfig(gCanBlackPoly, tags=('blackPoly'))
gCan.grid(column=0, columnspan=2, row = 3, sticky='w', padx='5')
gCanStrVar = tk.StringVar()
gCanStrVar.set(str(sumG))
gCanLabel = tk.Label(top, anchor='center', bd=0, cursor='dot', bg='black', fg='green', textvariable=gCanStrVar)
gCanLabel.grid(column = 2, row = 3, sticky='w')
bCan = tk.Canvas(top, width=str(BAR_WIDTH), height=barHeight, relief='raised', cursor='dot', bg='black')
bCanColorPoly = bCan.create_polygon(0, 0, 0, barHeight, sumB, barHeight, sumB, 0, fill='blue')
bCanBlackPoly = bCan.create_polygon(sumB, 0, sumB, barHeight, BAR_WIDTH, barHeight, BAR_WIDTH, 0, fill='black')
bCan.itemconfig(bCanColorPoly, tags=('colorPoly'))
bCan.itemconfig(bCanBlackPoly, tags=('blackPoly'))
bCan.grid(column=0, columnspan=2, row = 4, sticky='w', padx='5')
bCanStrVar = tk.StringVar()
bCanStrVar.set(str(BAR_WIDTH))
bCanLabel = tk.Label(top, anchor='center', bd=0, cursor='dot', fg='blue', bg='black', textvariable=bCanStrVar)
bCanLabel.grid(column = 2, row = 4, sticky='w')
top.columnconfigure(0, weight=3)
top.columnconfigure(1, weight=3)
top.columnconfigure(2, weight=1)
top.rowconfigure(0, weight=2)
top.rowconfigure(1, weight=1)
top.rowconfigure(2, weight=1)
top.rowconfigure(3, weight=1)
top.rowconfigure(4, weight=1)
top.rowconfigure(5, weight=1)
def RGBtoHSB(r, g, b):
print("R", r, "G", g, "B", b)
hue = 0
sat = 0
bright = 0
_max = max(r, g, b)/float(255)
_min = min(r, g, b)/float(255)
delta = float(_max - _min)
bright = float(_max)
if _max != 0:
sat = delta / float(_max)
else:
sat = 0
if sat != 0:
if r == max(r, g, b):
hue = float(g/float(255) - b/float(255)) / delta
elif g == max(r, g, b):
hue = 2 + (b/float(255) - r/float(255)) / delta
else:
hue = 4 + float(r/float(255) - g/float(255)) / delta
else:
hue = -1
hue = hue * 60
if hue < 0:
hue+= 360
return (hue, sat, bright)
def resend():
#if timer() > LAST_UPDATE + MIN_REFRESH_INTERVAL:
#hsb = RGBtoHSB(float(rCanStrVar.get()), float(gCanStrVar.get()), float(bCanStrVar.get()))
hsb = colorsys.rgb_to_hsv(float(rCanStrVar.get()) / BAR_WIDTH, float(gCanStrVar.get()) / BAR_WIDTH, float(bCanStrVar.get()) / BAR_WIDTH)
#hsb[0] = hsb[0] * 360
print 'HSB', hsb[0]*360, hsb[1], hsb[2]
if LIFX_CONNECT:
print("Updating light(s)...")
#lifx.set_light_state(hsb[0], hsb[1], hsb[2], 2400)
lazylights.set_state(bulbs, hsb[0]*360, hsb[1], hsb[2], 5000, 500)
top.update_idletasks()
top.update()
def updateHeight(can, val, _fill):
global LAST_UPDATE
#Create new bar
can.coords(can.find_withtag("colorPoly"), 0, 0, 0, barHeight, val, barHeight, val, 0)
can.coords(can.find_withtag("blackPoly"), val, 0, val, barHeight, BAR_WIDTH, barHeight, BAR_WIDTH, 0)
if can == rCan:
rCanStrVar.set(str(val))
elif can == gCan:
gCanStrVar.set(str(val))
elif can == bCan:
bCanStrVar.set(str(val))
hsb = colorsys.rgb_to_hsv(float(rCanStrVar.get()) / BAR_WIDTH, float(gCanStrVar.get()) / BAR_WIDTH, float (bCanStrVar.get()) / BAR_WIDTH)
if timer() > LAST_UPDATE + MIN_REFRESH_INTERVAL:
top.update_idletasks()
top.update()
LAST_UPDATE = timer()
resend()
#Attach Motion Event Listeners
#rCan.bind("<Button-1>", lambda event:updateHeight(event.widget, event.x, 'red'))
rCan.tag_bind(rCan.find_withtag("colorPoly"), "<B1-Motion>", lambda event:updateHeight(rCan, event.x, 'red'))
rCan.tag_bind(rCan.find_withtag("blackPoly"), "<B1-Motion>", lambda event:updateHeight(rCan, event.x, 'red'))
#gCan.bind("<Button-1>", lambda event:updateHeight(event.widget, event.x, 'green'))
gCan.tag_bind(gCan.find_withtag("colorPoly"), "<B1-Motion>", lambda event:updateHeight(gCan, event.x, 'green'))
gCan.tag_bind(gCan.find_withtag("blackPoly"), "<B1-Motion>", lambda event: updateHeight(gCan, event.x, 'green'))
#bCan.bind("<Button-1>", lambda event:updateHeight(event.widget, event.x, 'blue'))
bCan.tag_bind(bCan.find_withtag("colorPoly"), "<B1-Motion>", lambda event:updateHeight(bCan, event.x, 'blue'))
bCan.tag_bind(bCan.find_withtag("blackPoly"), "<B1-Motion>", lambda event:updateHeight(bCan, event.x, 'blue'))
buffR = queue.Queue()
buffG = queue.Queue()
buffB = queue.Queue()
# Main program
LAST_UPDATE = timer()
if True:
#with lifx.run():
#tk.Button(top, text="Quit", command=quit).pack()
top.mainloop()
top.quit()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
"""Locally-connected layer for 1D input."""
from keras import activations
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.layers.locally_connected import locally_connected_utils
from keras.utils import conv_utils
from keras.utils import tf_utils
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.LocallyConnected1D')
class LocallyConnected1D(Layer):
"""Locally-connected layer for 1D inputs.
The `LocallyConnected1D` layer works similarly to
the `Conv1D` layer, except that weights are unshared,
that is, a different set of filters is applied at each different patch
of the input.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Example:
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(LocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer, specifying the
stride length of the convolution.
padding: Currently only supports `"valid"` (case-insensitive). `"same"`
may be supported in the future. `"valid"` means no padding.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch, length,
channels)` while `channels_first` corresponds to inputs with shape
`(batch, channels, length)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`. If you
never set it, then it will be "channels_last".
activation: Activation function to use. If you don't specify anything, no
activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1`, `2`, or `3`. `1` loops
over input spatial locations to perform the forward pass. It is
memory-efficient but performs a lot of (small) ops. `2` stores layer
weights in a dense but sparsely-populated 2D matrix and implements the
forward pass as a single matrix-multiply. It uses a lot of RAM but
performs few (large) ops. `3` stores layer weights in a sparse tensor
and implements the forward pass as a single sparse matrix-multiply.
How to choose:
`1`: large, dense models,
`2`: small models,
`3`: large, sparse models, where "large" stands for large
input/output activations (i.e. many `filters`, `input_filters`,
large `input_size`, `output_size`), and "sparse" stands for few
connections between inputs and outputs, i.e. small ratio `filters *
input_filters * kernel_size / (input_size * strides)`, where inputs
to and outputs of the layer are assumed to have shapes `(input_size,
input_filters)`, `(output_size, filters)` respectively. It is
recommended to benchmark each in the setting of interest to pick the
most efficient one (in terms of speed and memory usage). Correct
choice of implementation can lead to dramatic speed improvements
(e.g. 50X), potentially at the expense of RAM. Also, only
`padding="valid"` is supported by `implementation=1`.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)` `steps` value
might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs):
super(LocallyConnected1D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
self.strides = conv_utils.normalize_tuple(
strides, 1, 'strides', allow_zero=True)
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid' and implementation == 1:
raise ValueError('Invalid border mode for LocallyConnected1D '
'(only "valid" is supported if implementation is 1): ' +
padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.implementation = implementation
self.input_spec = InputSpec(ndim=3)
@property
def _use_input_spec_as_call_signature(self):
return False
@tf_utils.shape_type_conversion
def build(self, input_shape):
if self.data_format == 'channels_first':
input_dim, input_length = input_shape[1], input_shape[2]
else:
input_dim, input_length = input_shape[2], input_shape[1]
if input_dim is None:
raise ValueError(
'Axis 2 of input should be fully-defined. '
'Found shape:', input_shape)
self.output_length = conv_utils.conv_output_length(input_length,
self.kernel_size[0],
self.padding,
self.strides[0])
if self.output_length <= 0:
raise ValueError(
f'One of the dimensions in the output is <= 0 '
f'due to downsampling in {self.name}. Consider '
f'increasing the input size. '
f'Received input shape {input_shape} which would produce '
f'output shape with a zero or negative value in a '
f'dimension.')
if self.implementation == 1:
self.kernel_shape = (self.output_length, self.kernel_size[0] * input_dim,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
elif self.implementation == 2:
if self.data_format == 'channels_first':
self.kernel_shape = (input_dim, input_length, self.filters,
self.output_length)
else:
self.kernel_shape = (input_length, input_dim, self.output_length,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_mask = locally_connected_utils.get_locallyconnected_mask(
input_shape=(input_length,),
kernel_shape=self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
elif self.implementation == 3:
self.kernel_shape = (self.output_length * self.filters,
input_length * input_dim)
self.kernel_idxs = sorted(
conv_utils.conv_kernel_idxs(
input_shape=(input_length,),
kernel_shape=self.kernel_size,
strides=self.strides,
padding=self.padding,
filters_in=input_dim,
filters_out=self.filters,
data_format=self.data_format))
self.kernel = self.add_weight(
shape=(len(self.kernel_idxs),),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
else:
raise ValueError('Unrecognized implementation mode: %d.' %
self.implementation)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.output_length, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=3, axes={1: input_dim})
else:
self.input_spec = InputSpec(ndim=3, axes={-1: input_dim})
self.built = True
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
input_length = input_shape[2]
else:
input_length = input_shape[1]
length = conv_utils.conv_output_length(input_length, self.kernel_size[0],
self.padding, self.strides[0])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, length)
elif self.data_format == 'channels_last':
return (input_shape[0], length, self.filters)
def call(self, inputs):
if self.implementation == 1:
output = backend.local_conv(
inputs, self.kernel, self.kernel_size, self.strides,
(self.output_length,), self.data_format)
elif self.implementation == 2:
output = locally_connected_utils.local_conv_matmul(
inputs, self.kernel, self.kernel_mask,
self.compute_output_shape(inputs.shape))
elif self.implementation == 3:
output = locally_connected_utils.local_conv_sparse_matmul(
inputs, self.kernel, self.kernel_idxs, self.kernel_shape,
self.compute_output_shape(inputs.shape))
else:
raise ValueError('Unrecognized implementation mode: %d.' %
self.implementation)
if self.use_bias:
output = backend.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'implementation':
self.implementation
}
base_config = super(LocallyConnected1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from oslo_log import log as logging
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Enum, Float
from sqlalchemy import dialects
from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
from sqlalchemy import Text
from sqlalchemy.types import NullType
LOG = logging.getLogger(__name__)
# Note on the autoincrement flag: this is defaulted for primary key columns
# of integral type, so is no longer set explicitly in such cases.
# NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL
# Schema. In Folsom we created tables as latin1 and converted them to utf8
# later. This conversion causes some of the Text columns on MySQL to get
# created as mediumtext instead of just text.
def MediumText():
return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql')
def Inet():
return String(length=43).with_variant(dialects.postgresql.INET(),
'postgresql')
def InetSmall():
return String(length=39).with_variant(dialects.postgresql.INET(),
'postgresql')
def _create_shadow_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = list(meta.tables.keys())
meta.bind = migrate_engine
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
column_copy = None
# NOTE(boris-42): BigInteger is not supported by sqlite, so
# after copy it will have NullType, other
# types that are used in Nova are supported by
# sqlite.
if isinstance(column.type, NullType):
column_copy = Column(column.name, BigInteger(), default=0)
if table_name == 'instances' and column.name == 'locked_by':
enum = Enum('owner', 'admin',
name='shadow_instances0locked_by')
column_copy = Column(column.name, enum)
else:
column_copy = column.copy()
columns.append(column_copy)
shadow_table_name = 'shadow_' + table_name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
except Exception:
LOG.info(repr(shadow_table))
LOG.exception('Exception while creating table.')
raise
# NOTE(dprince): we add these here so our schema contains dump tables
# which were added in migration 209 (in Havana). We can drop these in
# Icehouse: https://bugs.launchpad.net/nova/+bug/1266538
def _create_dump_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions',
'instance_actions_events', 'instance_faults', 'migrations']
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
dump_table_name = 'dump_' + table.name
columns = []
for column in table.columns:
# NOTE(dprince): The dump_ tables were originally created from an
# earlier schema version so we don't want to add the pci_stats
# column so that schema diffs are exactly the same.
if column.name == 'pci_stats':
continue
else:
columns.append(column.copy())
table_dump = Table(dump_table_name, meta, *columns,
mysql_engine='InnoDB')
table_dump.create()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
agent_builds = Table('agent_builds', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('hypervisor', String(length=255)),
Column('os', String(length=255)),
Column('architecture', String(length=255)),
Column('version', String(length=255)),
Column('url', String(length=255)),
Column('md5hash', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_hosts = Table('aggregate_hosts', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_metadata = Table('aggregate_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregates = Table('aggregates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
block_device_mapping = Table('block_device_mapping', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('device_name', String(length=255), nullable=True),
Column('delete_on_termination', Boolean),
Column('snapshot_id', String(length=36), nullable=True),
Column('volume_id', String(length=36), nullable=True),
Column('volume_size', Integer),
Column('no_device', Boolean),
Column('connection_info', MediumText()),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
Column('source_type', String(length=255), nullable=True),
Column('destination_type', String(length=255), nullable=True),
Column('guest_format', String(length=255), nullable=True),
Column('device_type', String(length=255), nullable=True),
Column('disk_bus', String(length=255), nullable=True),
Column('boot_index', Integer),
Column('image_id', String(length=36), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('start_period', DateTime, nullable=False),
Column('last_refreshed', DateTime),
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('mac', String(length=255)),
Column('uuid', String(length=36)),
Column('last_ctr_in', BigInteger()),
Column('last_ctr_out', BigInteger()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
cells = Table('cells', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('api_url', String(length=255)),
Column('weight_offset', Float),
Column('weight_scale', Float),
Column('name', String(length=255)),
Column('is_parent', Boolean),
Column('deleted', Integer),
Column('transport_url', String(length=255), nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
certificates = Table('certificates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('file_name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_node_stats = Table('compute_node_stats', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('compute_node_id', Integer, nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_nodes = Table('compute_nodes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('service_id', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('local_gb', Integer, nullable=False),
Column('vcpus_used', Integer, nullable=False),
Column('memory_mb_used', Integer, nullable=False),
Column('local_gb_used', Integer, nullable=False),
Column('hypervisor_type', MediumText(), nullable=False),
Column('hypervisor_version', Integer, nullable=False),
Column('cpu_info', MediumText(), nullable=False),
Column('disk_available_least', Integer),
Column('free_ram_mb', Integer),
Column('free_disk_gb', Integer),
Column('current_workload', Integer),
Column('running_vms', Integer),
Column('hypervisor_hostname', String(length=255)),
Column('deleted', Integer),
Column('host_ip', InetSmall()),
Column('supported_instances', Text),
Column('pci_stats', Text, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
console_pools = Table('console_pools', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('username', String(length=255)),
Column('password', String(length=255)),
Column('console_type', String(length=255)),
Column('public_hostname', String(length=255)),
Column('host', String(length=255)),
Column('compute_host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
consoles_instance_uuid_column_args = ['instance_uuid', String(length=36)]
consoles_instance_uuid_column_args.append(
ForeignKey('instances.uuid', name='consoles_instance_uuid_fkey'))
consoles = Table('consoles', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_name', String(length=255)),
Column('password', String(length=255)),
Column('port', Integer),
Column('pool_id', Integer, ForeignKey('console_pools.id')),
Column(*consoles_instance_uuid_column_args),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('domain', String(length=255), primary_key=True, nullable=False),
Column('scope', String(length=255)),
Column('availability_zone', String(length=255)),
Column('project_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
fixed_ips = Table('fixed_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('network_id', Integer),
Column('allocated', Boolean),
Column('leased', Boolean),
Column('reserved', Boolean),
Column('virtual_interface_id', Integer),
Column('host', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
floating_ips = Table('floating_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('fixed_ip_id', Integer),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('auto_assigned', Boolean),
Column('pool', String(length=255)),
Column('interface', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36)),
Column('code', Integer, nullable=False),
Column('message', String(length=255)),
Column('details', MediumText()),
Column('host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_id_mappings = Table('instance_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_info_caches = Table('instance_info_caches', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('network_info', MediumText()),
Column('instance_uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
groups = Table('instance_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('uuid', String(length=36), nullable=False),
Column('name', String(length=255)),
UniqueConstraint('uuid', 'deleted',
name='uniq_instance_groups0uuid0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_metadata = Table('instance_group_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_policy = Table('instance_group_policy', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('policy', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_member = Table('instance_group_member', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_id', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_metadata = Table('instance_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_system_metadata = Table('instance_system_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36), nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, ForeignKey('instance_types.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_projects = Table('instance_type_projects', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_types = Table('instance_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('name', String(length=255)),
Column('id', Integer, primary_key=True, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('swap', Integer, nullable=False),
Column('vcpu_weight', Integer),
Column('flavorid', String(length=255)),
Column('rxtx_factor', Float),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('disabled', Boolean),
Column('is_public', Boolean),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
inst_lock_enum = Enum('owner', 'admin', name='instances0locked_by')
instances = Table('instances', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('internal_id', Integer),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('image_ref', String(length=255)),
Column('kernel_id', String(length=255)),
Column('ramdisk_id', String(length=255)),
Column('launch_index', Integer),
Column('key_name', String(length=255)),
Column('key_data', MediumText()),
Column('power_state', Integer),
Column('vm_state', String(length=255)),
Column('memory_mb', Integer),
Column('vcpus', Integer),
Column('hostname', String(length=255)),
Column('host', String(length=255)),
Column('user_data', MediumText()),
Column('reservation_id', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('availability_zone', String(length=255)),
Column('locked', Boolean),
Column('os_type', String(length=255)),
Column('launched_on', MediumText()),
Column('instance_type_id', Integer),
Column('vm_mode', String(length=255)),
Column('uuid', String(length=36)),
Column('architecture', String(length=255)),
Column('root_device_name', String(length=255)),
Column('access_ip_v4', InetSmall()),
Column('access_ip_v6', InetSmall()),
Column('config_drive', String(length=255)),
Column('task_state', String(length=255)),
Column('default_ephemeral_device', String(length=255)),
Column('default_swap_device', String(length=255)),
Column('progress', Integer),
Column('auto_disk_config', Boolean),
Column('shutdown_terminate', Boolean),
Column('disable_terminate', Boolean),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('cell_name', String(length=255)),
Column('node', String(length=255)),
Column('deleted', Integer),
Column('locked_by', inst_lock_enum),
Column('cleaned', Integer, default=0),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_actions = Table('instance_actions', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('action', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('request_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('message', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_actions_events = Table('instance_actions_events', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('event', String(length=255)),
Column('action_id', Integer, ForeignKey('instance_actions.id')),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('result', String(length=255)),
Column('traceback', Text),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
iscsi_targets = Table('iscsi_targets', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('target_num', Integer),
Column('host', String(length=255)),
Column('volume_id', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
key_pairs = Table('key_pairs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('user_id', String(length=255)),
Column('fingerprint', String(length=255)),
Column('public_key', MediumText()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
migrations = Table('migrations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('source_compute', String(length=255)),
Column('dest_compute', String(length=255)),
Column('dest_host', String(length=255)),
Column('status', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
Column('source_node', String(length=255)),
Column('dest_node', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
networks = Table('networks', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('injected', Boolean),
Column('cidr', Inet()),
Column('netmask', InetSmall()),
Column('bridge', String(length=255)),
Column('gateway', InetSmall()),
Column('broadcast', InetSmall()),
Column('dns1', InetSmall()),
Column('vlan', Integer),
Column('vpn_public_address', InetSmall()),
Column('vpn_public_port', Integer),
Column('vpn_private_address', InetSmall()),
Column('dhcp_start', InetSmall()),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('cidr_v6', Inet()),
Column('gateway_v6', InetSmall()),
Column('label', String(length=255)),
Column('netmask_v6', InetSmall()),
Column('bridge_interface', String(length=255)),
Column('multi_host', Boolean),
Column('dns2', InetSmall()),
Column('uuid', String(length=36)),
Column('priority', Integer),
Column('rxtx_base', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
pci_devices_uc_name = 'uniq_pci_devices0compute_node_id0address0deleted'
pci_devices = Table('pci_devices', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Integer, default=0, nullable=False),
Column('id', Integer, primary_key=True),
Column('compute_node_id', Integer, nullable=False),
Column('address', String(12), nullable=False),
Column('product_id', String(4)),
Column('vendor_id', String(4)),
Column('dev_type', String(8)),
Column('dev_id', String(255)),
Column('label', String(255), nullable=False),
Column('status', String(36), nullable=False),
Column('extra_info', Text, nullable=True),
Column('instance_uuid', String(36), nullable=True),
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
UniqueConstraint('compute_node_id',
'address', 'deleted',
name=pci_devices_uc_name),
mysql_engine='InnoDB',
mysql_charset='utf8')
provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_classes = Table('quota_classes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('class_name', String(length=255)),
Column('resource', String(length=255)),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_usages = Table('quota_usages', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('in_use', Integer, nullable=False),
Column('reserved', Integer, nullable=False),
Column('until_refresh', Integer),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quotas = Table('quotas', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('project_id', String(length=255)),
Column('resource', String(length=255), nullable=False),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
project_user_quotas = Table('project_user_quotas', meta,
Column('id', Integer, primary_key=True,
nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('user_id',
String(length=255),
nullable=False),
Column('project_id',
String(length=255),
nullable=False),
Column('resource',
String(length=255),
nullable=False),
Column('hard_limit', Integer, nullable=True),
UniqueConstraint('user_id', 'project_id', 'resource',
'deleted', name=uniq_name),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
reservations = Table('reservations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('usage_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('delta', Integer, nullable=False),
Column('expire', DateTime),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
s3_images = Table('s3_images', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_instance_association = \
Table('security_group_instance_association', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('security_group_id', Integer),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_rules = Table('security_group_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('parent_group_id', Integer, ForeignKey('security_groups.id')),
Column('protocol', String(length=255)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('group_id', Integer, ForeignKey('security_groups.id')),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_groups = Table('security_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_default_rules = Table('security_group_default_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
services = Table('services', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('binary', String(length=255)),
Column('topic', String(length=255)),
Column('report_count', Integer, nullable=False),
Column('disabled', Boolean),
Column('deleted', Integer),
Column('disabled_reason', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshot_id_mappings = Table('snapshot_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshots = Table('snapshots', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('volume_id', String(length=36), nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('status', String(length=255)),
Column('progress', String(length=255)),
Column('volume_size', Integer),
Column('scheduled_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
task_log = Table('task_log', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('task_name', String(length=255), nullable=False),
Column('state', String(length=255), nullable=False),
Column('host', String(length=255), nullable=False),
Column('period_beginning', DateTime, nullable=False),
Column('period_ending', DateTime, nullable=False),
Column('message', String(length=255), nullable=False),
Column('task_items', Integer),
Column('errors', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255)),
Column('network_id', Integer),
Column('uuid', String(length=36)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_id_mappings = Table('volume_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volumes = Table('volumes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('ec2_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('size', Integer),
Column('availability_zone', String(length=255)),
Column('mountpoint', String(length=255)),
Column('status', String(length=255)),
Column('attach_status', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('provider_location', String(length=256)),
Column('provider_auth', String(length=256)),
Column('snapshot_id', String(length=36)),
Column('volume_type_id', Integer),
Column('instance_uuid', String(length=36)),
Column('attach_time', DateTime),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_usage_cache = Table('volume_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', String(36), nullable=False),
Column('tot_last_refreshed', DateTime(timezone=False)),
Column('tot_reads', BigInteger(), default=0),
Column('tot_read_bytes', BigInteger(), default=0),
Column('tot_writes', BigInteger(), default=0),
Column('tot_write_bytes', BigInteger(), default=0),
Column('curr_last_refreshed', DateTime(timezone=False)),
Column('curr_reads', BigInteger(), default=0),
Column('curr_read_bytes', BigInteger(), default=0),
Column('curr_writes', BigInteger(), default=0),
Column('curr_write_bytes', BigInteger(), default=0),
Column('deleted', Integer),
Column("instance_uuid", String(length=36)),
Column("project_id", String(length=36)),
Column("user_id", String(length=36)),
Column("availability_zone", String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instances.create()
Index('project_id', instances.c.project_id).create()
Index('uuid', instances.c.uuid, unique=True).create()
# create all tables
tables = [aggregates, console_pools, instance_types,
security_groups, snapshots, volumes,
# those that are children and others later
agent_builds, aggregate_hosts, aggregate_metadata,
block_device_mapping, bw_usage_cache, cells,
certificates, compute_node_stats, compute_nodes, consoles,
dns_domains, fixed_ips, floating_ips,
instance_faults, instance_id_mappings, instance_info_caches,
instance_metadata, instance_system_metadata,
instance_type_extra_specs, instance_type_projects,
instance_actions, instance_actions_events,
groups, group_metadata, group_policy, group_member,
iscsi_targets, key_pairs, migrations, networks,
pci_devices, provider_fw_rules, quota_classes, quota_usages,
quotas, project_user_quotas,
reservations, s3_images, security_group_instance_association,
security_group_rules, security_group_default_rules,
services, snapshot_id_mappings, task_log,
virtual_interfaces,
volume_id_mappings,
volume_usage_cache]
for table in tables:
try:
table.create()
except Exception:
LOG.info(repr(table))
LOG.exception('Exception while creating table.')
raise
# task log unique constraint
task_log_uc = "uniq_task_log0task_name0host0period_beginning0period_ending"
task_log_cols = ('task_name', 'host', 'period_beginning', 'period_ending')
uc = UniqueConstraint(*task_log_cols, table=task_log, name=task_log_uc)
uc.create()
# networks unique constraint
UniqueConstraint('vlan', 'deleted', table=networks,
name='uniq_networks0vlan0deleted').create()
# instance_type_name constraint
UniqueConstraint('name', 'deleted', table=instance_types,
name='uniq_instance_types0name0deleted').create()
# flavorid unique constraint
UniqueConstraint('flavorid', 'deleted', table=instance_types,
name='uniq_instance_types0flavorid0deleted').create()
# keypair constraint
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').create()
# instance_type_projects constraint
inst_type_uc_name = 'uniq_instance_type_projects0instance_type_id0' + \
'project_id0deleted'
UniqueConstraint('instance_type_id', 'project_id', 'deleted',
table=instance_type_projects,
name=inst_type_uc_name).create()
# floating_ips unique constraint
UniqueConstraint('address', 'deleted',
table=floating_ips,
name='uniq_floating_ips0address0deleted').create()
# instance_info_caches
UniqueConstraint('instance_uuid',
table=instance_info_caches,
name='uniq_instance_info_caches0instance_uuid').create()
UniqueConstraint('address', 'deleted',
table=virtual_interfaces,
name='uniq_virtual_interfaces0address0deleted').create()
# cells
UniqueConstraint('name', 'deleted',
table=cells,
name='uniq_cells0name0deleted').create()
# security_groups
uc = UniqueConstraint('project_id', 'name', 'deleted',
table=security_groups,
name='uniq_security_groups0project_id0name0deleted')
uc.create()
# quotas
UniqueConstraint('project_id', 'resource', 'deleted',
table=quotas,
name='uniq_quotas0project_id0resource0deleted').create()
# fixed_ips
UniqueConstraint('address', 'deleted',
table=fixed_ips,
name='uniq_fixed_ips0address0deleted').create()
# services
UniqueConstraint('host', 'topic', 'deleted',
table=services,
name='uniq_services0host0topic0deleted').create()
UniqueConstraint('host', 'binary', 'deleted',
table=services,
name='uniq_services0host0binary0deleted').create()
# agent_builds
uc_name = 'uniq_agent_builds0hypervisor0os0architecture0deleted'
UniqueConstraint('hypervisor', 'os', 'architecture', 'deleted',
table=agent_builds,
name=uc_name).create()
uc_name = 'uniq_console_pools0host0console_type0compute_host0deleted'
UniqueConstraint('host', 'console_type', 'compute_host', 'deleted',
table=console_pools,
name=uc_name).create()
uc_name = 'uniq_aggregate_hosts0host0aggregate_id0deleted'
UniqueConstraint('host', 'aggregate_id', 'deleted',
table=aggregate_hosts,
name=uc_name).create()
uc_name = 'uniq_aggregate_metadata0aggregate_id0key0deleted'
UniqueConstraint('aggregate_id', 'key', 'deleted',
table=aggregate_metadata,
name=uc_name).create()
uc_name = 'uniq_instance_type_extra_specs0instance_type_id0key0deleted'
UniqueConstraint('instance_type_id', 'key', 'deleted',
table=instance_type_extra_specs,
name=uc_name).create()
# created first (to preserve ordering for schema diffs)
mysql_pre_indexes = [
Index('instance_type_id', instance_type_projects.c.instance_type_id),
Index('project_id', dns_domains.c.project_id),
Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
Index('network_id', virtual_interfaces.c.network_id),
Index('network_id', fixed_ips.c.network_id),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
Index('address', fixed_ips.c.address),
Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
Index('instance_uuid', instance_system_metadata.c.instance_uuid),
Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
Index('snapshot_id', block_device_mapping.c.snapshot_id),
Index('usage_id', reservations.c.usage_id),
Index('virtual_interfaces_instance_uuid_fkey',
virtual_interfaces.c.instance_uuid),
Index('volume_id', block_device_mapping.c.volume_id),
Index('security_group_id',
security_group_instance_association.c.security_group_id),
]
# Common indexes (indexes we apply to all databases)
# NOTE: order specific for MySQL diff support
common_indexes = [
# aggregate_metadata
Index('aggregate_metadata_key_idx', aggregate_metadata.c.key),
# agent_builds
Index('agent_builds_hypervisor_os_arch_idx',
agent_builds.c.hypervisor,
agent_builds.c.os,
agent_builds.c.architecture),
# block_device_mapping
Index('block_device_mapping_instance_uuid_idx',
block_device_mapping.c.instance_uuid),
Index('block_device_mapping_instance_uuid_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
# NOTE(dprince): This is now a duplicate index on MySQL and needs to
# be removed there. We leave it here so the Index ordering
# matches on schema diffs (for MySQL).
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
Index(
'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
Index('block_device_mapping_instance_uuid_volume_id_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.volume_id),
# bw_usage_cache
Index('bw_usage_cache_uuid_start_period_idx',
bw_usage_cache.c.uuid, bw_usage_cache.c.start_period),
Index('certificates_project_id_deleted_idx',
certificates.c.project_id, certificates.c.deleted),
Index('certificates_user_id_deleted_idx', certificates.c.user_id,
certificates.c.deleted),
# compute_node_stats
Index('ix_compute_node_stats_compute_node_id',
compute_node_stats.c.compute_node_id),
Index('compute_node_stats_node_id_and_deleted_idx',
compute_node_stats.c.compute_node_id,
compute_node_stats.c.deleted),
# consoles
Index('consoles_instance_uuid_idx', consoles.c.instance_uuid),
# dns_domains
Index('dns_domains_domain_deleted_idx',
dns_domains.c.domain, dns_domains.c.deleted),
# fixed_ips
Index('fixed_ips_host_idx', fixed_ips.c.host),
Index('fixed_ips_network_id_host_deleted_idx', fixed_ips.c.network_id,
fixed_ips.c.host, fixed_ips.c.deleted),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
fixed_ips.c.address, fixed_ips.c.reserved,
fixed_ips.c.network_id, fixed_ips.c.deleted),
Index('fixed_ips_deleted_allocated_idx', fixed_ips.c.address,
fixed_ips.c.deleted, fixed_ips.c.allocated),
# floating_ips
Index('floating_ips_host_idx', floating_ips.c.host),
Index('floating_ips_project_id_idx', floating_ips.c.project_id),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
floating_ips.c.pool, floating_ips.c.deleted,
floating_ips.c.fixed_ip_id, floating_ips.c.project_id),
# group_member
Index('instance_group_member_instance_idx',
group_member.c.instance_id),
# group_metadata
Index('instance_group_metadata_key_idx', group_metadata.c.key),
# group_policy
Index('instance_group_policy_policy_idx', group_policy.c.policy),
# instances
Index('instances_reservation_id_idx',
instances.c.reservation_id),
Index('instances_terminated_at_launched_at_idx',
instances.c.terminated_at,
instances.c.launched_at),
Index('instances_task_state_updated_at_idx',
instances.c.task_state,
instances.c.updated_at),
Index('instances_host_deleted_idx', instances.c.host,
instances.c.deleted),
Index('instances_uuid_deleted_idx', instances.c.uuid,
instances.c.deleted),
Index('instances_host_node_deleted_idx', instances.c.host,
instances.c.node, instances.c.deleted),
Index('instances_host_deleted_cleaned_idx',
instances.c.host, instances.c.deleted,
instances.c.cleaned),
# instance_actions
Index('instance_uuid_idx', instance_actions.c.instance_uuid),
Index('request_id_idx', instance_actions.c.request_id),
# instance_faults
Index('instance_faults_host_idx', instance_faults.c.host),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
instance_faults.c.instance_uuid, instance_faults.c.deleted,
instance_faults.c.created_at),
# instance_id_mappings
Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid),
# instance_metadata
Index('instance_metadata_instance_uuid_idx',
instance_metadata.c.instance_uuid),
# instance_type_extra_specs
Index('instance_type_extra_specs_instance_type_id_key_idx',
instance_type_extra_specs.c.instance_type_id,
instance_type_extra_specs.c.key),
# iscsi_targets
Index('iscsi_targets_host_idx', iscsi_targets.c.host),
Index('iscsi_targets_host_volume_id_deleted_idx',
iscsi_targets.c.host, iscsi_targets.c.volume_id,
iscsi_targets.c.deleted),
# migrations
Index('migrations_by_host_nodes_and_status_idx',
migrations.c.deleted, migrations.c.source_compute,
migrations.c.dest_compute, migrations.c.source_node,
migrations.c.dest_node, migrations.c.status),
Index('migrations_instance_uuid_and_status_idx',
migrations.c.deleted, migrations.c.instance_uuid,
migrations.c.status),
# networks
Index('networks_host_idx', networks.c.host),
Index('networks_cidr_v6_idx', networks.c.cidr_v6),
Index('networks_bridge_deleted_idx', networks.c.bridge,
networks.c.deleted),
Index('networks_project_id_deleted_idx', networks.c.project_id,
networks.c.deleted),
Index('networks_uuid_project_id_deleted_idx',
networks.c.uuid, networks.c.project_id, networks.c.deleted),
Index('networks_vlan_deleted_idx', networks.c.vlan,
networks.c.deleted),
# project_user_quotas
Index('project_user_quotas_project_id_deleted_idx',
project_user_quotas.c.project_id,
project_user_quotas.c.deleted),
Index('project_user_quotas_user_id_deleted_idx',
project_user_quotas.c.user_id, project_user_quotas.c.deleted),
# reservations
Index('ix_reservations_project_id', reservations.c.project_id),
Index('ix_reservations_user_id_deleted',
reservations.c.user_id, reservations.c.deleted),
Index('reservations_uuid_idx', reservations.c.uuid),
# security_group_instance_association
Index('security_group_instance_association_instance_uuid_idx',
security_group_instance_association.c.instance_uuid),
# task_log
Index('ix_task_log_period_beginning', task_log.c.period_beginning),
Index('ix_task_log_host', task_log.c.host),
Index('ix_task_log_period_ending', task_log.c.period_ending),
# quota_classes
Index('ix_quota_classes_class_name', quota_classes.c.class_name),
# quota_usages
Index('ix_quota_usages_project_id', quota_usages.c.project_id),
Index('ix_quota_usages_user_id_deleted',
quota_usages.c.user_id, quota_usages.c.deleted),
# volumes
Index('volumes_instance_uuid_idx', volumes.c.instance_uuid),
]
# MySQL specific indexes
if migrate_engine.name == 'mysql':
for index in mysql_pre_indexes:
index.create(migrate_engine)
# mysql-specific index by leftmost 100 chars. (mysql gets angry if the
# index key length is too long.)
sql = ("create index migrations_by_host_nodes_and_status_idx ON "
"migrations (deleted, source_compute(100), dest_compute(100), "
"source_node(100), dest_node(100), status)")
migrate_engine.execute(sql)
# PostgreSQL specific indexes
if migrate_engine.name == 'postgresql':
Index('address', fixed_ips.c.address).create()
# NOTE(dprince): PostgreSQL doesn't allow duplicate indexes
# so we skip creation of select indexes (so schemas match exactly).
POSTGRES_INDEX_SKIPS = [
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
'block_device_mapping_instance_uuid_virtual_name_device_name_idx'
]
MYSQL_INDEX_SKIPS = [
# we create this one manually for MySQL above
'migrations_by_host_nodes_and_status_idx'
]
for index in common_indexes:
if ((migrate_engine.name == 'postgresql' and
index.name in POSTGRES_INDEX_SKIPS) or
(migrate_engine.name == 'mysql' and
index.name in MYSQL_INDEX_SKIPS)):
continue
else:
index.create(migrate_engine)
Index('project_id', dns_domains.c.project_id).drop
# Common foreign keys
fkeys = [
[[instance_type_projects.c.instance_type_id],
[instance_types.c.id],
'instance_type_projects_ibfk_1'],
[[iscsi_targets.c.volume_id],
[volumes.c.id],
'iscsi_targets_volume_id_fkey'],
[[reservations.c.usage_id],
[quota_usages.c.id],
'reservations_ibfk_1'],
[[security_group_instance_association.c.security_group_id],
[security_groups.c.id],
'security_group_instance_association_ibfk_1'],
[[compute_node_stats.c.compute_node_id],
[compute_nodes.c.id],
'fk_compute_node_stats_compute_node_id'],
[[compute_nodes.c.service_id],
[services.c.id],
'fk_compute_nodes_service_id'],
]
secgroup_instance_association_instance_uuid_fkey = (
'security_group_instance_association_instance_uuid_fkey')
fkeys.extend(
[
[[fixed_ips.c.instance_uuid],
[instances.c.uuid],
'fixed_ips_instance_uuid_fkey'],
[[block_device_mapping.c.instance_uuid],
[instances.c.uuid],
'block_device_mapping_instance_uuid_fkey'],
[[instance_info_caches.c.instance_uuid],
[instances.c.uuid],
'instance_info_caches_instance_uuid_fkey'],
[[instance_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_metadata_instance_uuid_fkey'],
[[instance_system_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_system_metadata_ibfk_1'],
[[security_group_instance_association.c.instance_uuid],
[instances.c.uuid],
secgroup_instance_association_instance_uuid_fkey],
[[virtual_interfaces.c.instance_uuid],
[instances.c.uuid],
'virtual_interfaces_instance_uuid_fkey'],
[[instance_actions.c.instance_uuid],
[instances.c.uuid],
'fk_instance_actions_instance_uuid'],
[[instance_faults.c.instance_uuid],
[instances.c.uuid],
'fk_instance_faults_instance_uuid'],
[[migrations.c.instance_uuid],
[instances.c.uuid],
'fk_migrations_instance_uuid']
])
for fkey_pair in fkeys:
if migrate_engine.name == 'mysql':
# For MySQL we name our fkeys explicitly
# so they match Havana
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1],
name=fkey_pair[2])
fkey.create()
elif migrate_engine.name == 'postgresql':
# PostgreSQL names things like it wants (correct and compatible!)
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1])
fkey.create()
if migrate_engine.name == 'mysql':
# In Folsom we explicitly converted migrate_version to UTF8.
migrate_engine.execute(
'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8')
# Set default DB charset to UTF8.
migrate_engine.execute(
'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' %
migrate_engine.url.database)
_create_shadow_tables(migrate_engine)
_create_dump_tables(migrate_engine)
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""A simple, direct connection to the vtgate proxy server, using gRPC.
"""
import logging
import re
from urlparse import urlparse
from vtdb import prefer_vtroot_imports # pylint: disable=unused-import
import grpc
from vtproto import vtgate_pb2
from vtproto import vtgateservice_pb2
from vtdb import dbexceptions
from vtdb import proto3_encoding
from vtdb import vtdb_logger
from vtdb import vtgate_client
from vtdb import vtgate_cursor
from vtdb import vtgate_utils
_errno_pattern = re.compile(r'\(errno (\d+)\)', re.IGNORECASE)
class GRPCVTGateConnection(vtgate_client.VTGateClient,
proto3_encoding.Proto3Connection):
"""A direct gRPC connection to the vtgate query service, using proto3.
"""
def __init__(self, addr, timeout,
root_certificates=None, private_key=None, certificate_chain=None,
**kwargs):
"""Creates a new GRPCVTGateConnection.
Args:
addr: address to connect to.
timeout: connection time out.
root_certificates: PEM_encoded root certificates.
private_key: PEM-encoded private key.
certificate_chain: PEM-encoded certificate chain.
**kwargs: passed up.
"""
super(GRPCVTGateConnection, self).__init__(addr, timeout, **kwargs)
self.stub = None
self.root_certificates = root_certificates
self.private_key = private_key
self.certificate_chain = certificate_chain
self.logger_object = vtdb_logger.get_logger()
def dial(self):
if self.stub:
self.stub.close()
p = urlparse('http://' + self.addr)
target = '%s:%s' % (p.hostname, p.port)
if self.root_certificates or self.private_key or self.certificate_chain:
creds = grpc.ssl_channel_credentials(
self.root_certificates, self.private_key, self.certificate_chain)
channel = grpc.secure_channel(target, creds)
else:
channel = grpc.insecure_channel(target)
self.stub = vtgateservice_pb2.VitessStub(channel)
def close(self):
"""close closes the server connection and frees up associated resources.
The stub object is managed by the gRPC library, removing references
to it will just close the channel.
"""
if self.session and self.session.in_transaction:
# If the endpoint is not responding, this would exception out,
# just when we want to not connect to the endpoint any more.
# Let's swallow that exception.
try:
self.rollback()
except dbexceptions.DatabaseError:
pass
self.stub = None
def is_closed(self):
return self.stub is None
def cursor(self, *pargs, **kwargs):
cursorclass = kwargs.pop('cursorclass', None) or vtgate_cursor.VTGateCursor
return cursorclass(self, *pargs, **kwargs)
def begin(self, effective_caller_id=None, single_db=False):
try:
request = self.begin_request(effective_caller_id, single_db)
response = self.stub.Begin(request, self.timeout)
self.update_session(response)
except (grpc.RpcError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, 'Begin')
def commit(self, twopc=False):
try:
request = self.commit_request(twopc)
self.stub.Commit(request, self.timeout)
except (grpc.RpcError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, 'Commit')
finally:
self.session = None
def rollback(self):
try:
request = self.rollback_request()
self.stub.Rollback(request, self.timeout)
except (grpc.RpcError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, 'Rollback')
finally:
self.session = None
@vtgate_utils.exponential_backoff_retry((dbexceptions.ThrottledError,
dbexceptions.TransientError))
def _execute(
self, sql, bind_variables, tablet_type, keyspace_name=None,
shards=None, keyspace_ids=None, keyranges=None,
entity_keyspace_id_map=None, entity_column_name=None,
not_in_transaction=False, effective_caller_id=None,
include_event_token=False, compare_event_token=None, **kwargs):
# FIXME(alainjobart): keyspace should be in routing_kwargs,
# as it's not used for v3.
try:
request, routing_kwargs, method_name = self.execute_request_and_name(
sql, bind_variables, tablet_type,
keyspace_name, shards, keyspace_ids, keyranges,
entity_column_name, entity_keyspace_id_map,
not_in_transaction, effective_caller_id, include_event_token,
compare_event_token)
method = getattr(self.stub, method_name)
response = method(request, self.timeout)
return self.process_execute_response(method_name, response)
except (grpc.RpcError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables)
raise _convert_exception(
e, method_name,
sql=sql, keyspace=keyspace_name, tablet_type=tablet_type,
not_in_transaction=not_in_transaction,
**routing_kwargs)
@vtgate_utils.exponential_backoff_retry((dbexceptions.ThrottledError,
dbexceptions.TransientError))
def _execute_batch(
self, sql_list, bind_variables_list, keyspace_list, keyspace_ids_list,
shards_list, tablet_type, as_transaction, effective_caller_id=None,
**kwargs):
try:
request, method_name = self.execute_batch_request_and_name(
sql_list, bind_variables_list, keyspace_list,
keyspace_ids_list, shards_list,
tablet_type, as_transaction, effective_caller_id)
method = getattr(self.stub, method_name)
response = method(request, self.timeout)
return self.process_execute_batch_response(method_name, response)
except (grpc.RpcError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables_list)
raise _convert_exception(
e, method_name,
sqls=sql_list, tablet_type=tablet_type,
as_transaction=as_transaction)
@vtgate_utils.exponential_backoff_retry((dbexceptions.ThrottledError,
dbexceptions.TransientError))
def _stream_execute(
self, sql, bind_variables, tablet_type, keyspace_name=None,
shards=None, keyspace_ids=None, keyranges=None,
effective_caller_id=None,
**kwargs):
try:
request, routing_kwargs, method_name = (
self.stream_execute_request_and_name(
sql, bind_variables, tablet_type,
keyspace_name,
shards,
keyspace_ids,
keyranges,
effective_caller_id))
method = getattr(self.stub, method_name)
it = method(request, self.timeout)
first_response = it.next()
except (grpc.RpcError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables)
raise _convert_exception(
e, method_name,
sql=sql, keyspace=keyspace_name, tablet_type=tablet_type,
**routing_kwargs)
fields, convs = self.build_conversions(first_response.result.fields)
def row_generator():
try:
for response in it:
for row in response.result.rows:
yield tuple(proto3_encoding.make_row(row, convs))
except Exception:
logging.exception('gRPC low-level error')
raise
return row_generator(), fields
def get_srv_keyspace(self, name):
try:
request = vtgate_pb2.GetSrvKeyspaceRequest(
keyspace=name,
)
response = self.stub.GetSrvKeyspace(request, self.timeout)
return self.keyspace_from_response(name, response)
except (grpc.RpcError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, keyspace=name)
@vtgate_utils.exponential_backoff_retry((dbexceptions.ThrottledError,
dbexceptions.TransientError))
def update_stream(
self, keyspace_name, tablet_type,
timestamp=None, event=None,
shard=None, key_range=None,
effective_caller_id=None,
**kwargs):
try:
request = self.update_stream_request(
keyspace_name, shard, key_range, tablet_type,
timestamp, event, effective_caller_id)
it = self.stub.UpdateStream(request, self.timeout)
except (grpc.RpcError, vtgate_utils.VitessError) as e:
raise _convert_exception(
e, 'UpdateStream',
keyspace=keyspace_name, tablet_type=tablet_type)
def row_generator():
try:
for response in it:
yield (response.event, response.resume_timestamp)
except Exception as e:
raise _convert_exception(e)
return row_generator()
@vtgate_utils.exponential_backoff_retry((dbexceptions.ThrottledError,
dbexceptions.TransientError))
def message_stream(
self, keyspace, name,
shard=None, key_range=None,
effective_caller_id=None,
**kwargs):
try:
request = self.message_stream_request(
keyspace, shard, key_range,
name, effective_caller_id)
it = self.stub.MessageStream(request, self.timeout)
first_response = it.next()
except (grpc.RpcError, vtgate_utils.VitessError) as e:
raise _convert_exception(
e, 'MessageStream', name=name,
keyspace=keyspace)
fields, convs = self.build_conversions(first_response.result.fields)
def row_generator():
try:
for response in it:
for row in response.result.rows:
yield tuple(proto3_encoding.make_row(row, convs))
except Exception:
logging.exception('gRPC low-level error')
raise
return row_generator(), fields
@vtgate_utils.exponential_backoff_retry((dbexceptions.ThrottledError,
dbexceptions.TransientError))
def message_ack(
self,
name, ids,
keyspace=None, effective_caller_id=None,
**kwargs):
try:
request = self.message_ack_request(
keyspace, name, ids, effective_caller_id)
response = self.stub.MessageAck(request, self.timeout)
except (grpc.RpcError, vtgate_utils.VitessError) as e:
raise _convert_exception(
e, 'MessageAck', name=name, ids=ids,
keyspace=keyspace)
return response.result.rows_affected
def _convert_exception(exc, *args, **kwargs):
"""This parses the protocol exceptions to the api interface exceptions.
This also logs the exception and increments the appropriate error counters.
Args:
exc: raw protocol exception.
*args: additional args from the raising site.
**kwargs: additional keyword args from the raising site.
They will be converted into a single string, and added as an extra
arg to the exception.
Returns:
Api interface exceptions - dbexceptions with new args.
"""
kwargs_as_str = vtgate_utils.convert_exception_kwargs(kwargs)
exc.args += args
if kwargs_as_str:
exc.args += kwargs_as_str,
new_args = (type(exc).__name__,) + exc.args
if isinstance(exc, vtgate_utils.VitessError):
new_exc = exc.convert_to_dbexception(new_args)
elif isinstance(exc, grpc.RpcError):
# Most RpcErrors should also implement Call so we can get details.
if isinstance(exc, grpc.Call):
code = exc.code()
details = exc.details()
if code == grpc.StatusCode.DEADLINE_EXCEEDED:
new_exc = dbexceptions.TimeoutError(new_args)
elif code == grpc.StatusCode.UNAVAILABLE:
if vtgate_utils.throttler_err_re.search(details):
return dbexceptions.ThrottledError(new_args)
else:
return dbexceptions.TransientError(details, new_args)
elif code == grpc.StatusCode.ALREADY_EXISTS:
new_exc = _prune_integrity_error(details, new_args)
elif code == grpc.StatusCode.FAILED_PRECONDITION:
return dbexceptions.QueryNotServed(details, new_args)
elif code == grpc.StatusCode.INVALID_ARGUMENT:
return dbexceptions.ProgrammingError(details, new_args)
else:
# Other RPC error that we don't specifically handle.
new_exc = dbexceptions.DatabaseError(new_args + (code, details))
else:
# RPC error that doesn't provide code and details.
# Don't let gRPC-specific errors leak beyond this package.
new_exc = dbexceptions.DatabaseError(new_args + (exc,))
else:
new_exc = exc
vtgate_utils.log_exception(
new_exc,
keyspace=kwargs.get('keyspace'), tablet_type=kwargs.get('tablet_type'))
return new_exc
def _prune_integrity_error(msg, exc_args):
"""Prunes an integrity error message and returns an IntegrityError."""
parts = _errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
exc_args = (pruned_msg,) + tuple(exc_args[1:])
return dbexceptions.IntegrityError(exc_args)
vtgate_client.register_conn_class('grpc', GRPCVTGateConnection)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A gatherer for the TotalRecall brand of HTML templates with replaceable
portions. We wanted to reuse extern.tclib.api.handlers.html.TCHTMLParser
but this proved impossible due to the fact that the TotalRecall HTML templates
are in general quite far from parseable HTML and the TCHTMLParser derives
from HTMLParser.HTMLParser which requires relatively well-formed HTML. Some
examples of "HTML" from the TotalRecall HTML templates that wouldn't be
parseable include things like:
<a [PARAMS]>blabla</a> (not parseable because attributes are invalid)
<table><tr><td>[LOTSOFSTUFF]</tr></table> (not parseable because closing
</td> is in the HTML [LOTSOFSTUFF]
is replaced by)
The other problem with using general parsers (such as TCHTMLParser) is that
we want to make sure we output the TotalRecall template with as little changes
as possible in terms of whitespace characters, layout etc. With any parser
that generates a parse tree, and generates output by dumping the parse tree,
we would always have little inconsistencies which could cause bugs (the
TotalRecall template stuff is quite brittle and can break if e.g. a tab
character is replaced with spaces).
The solution, which may be applicable to some other HTML-like template
languages floating around Google, is to create a parser with a simple state
machine that keeps track of what kind of tag it's inside, and whether it's in
a translateable section or not. Translateable sections are:
a) text (including [BINGO] replaceables) inside of tags that
can contain translateable text (which is all tags except
for a few)
b) text inside of an 'alt' attribute in an <image> element, or
the 'value' attribute of a <submit>, <button> or <text>
element.
The parser does not build up a parse tree but rather a "skeleton" which
is a list of nontranslateable strings intermingled with grit.clique.MessageClique
objects. This simplifies the parser considerably compared to a regular HTML
parser. To output a translated document, each item in the skeleton is
printed out, with the relevant Translation from each MessageCliques being used
for the requested language.
This implementation borrows some code, constants and ideas from
extern.tclib.api.handlers.html.TCHTMLParser.
'''
from __future__ import print_function
import re
import six
from grit import clique
from grit import exception
from grit import lazy_re
from grit import util
from grit import tclib
from grit.gather import interface
# HTML tags which break (separate) chunks.
_BLOCK_TAGS = ['script', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'br',
'body', 'style', 'head', 'title', 'table', 'tr', 'td', 'th',
'ul', 'ol', 'dl', 'nl', 'li', 'div', 'object', 'center',
'html', 'link', 'form', 'select', 'textarea',
'button', 'option', 'map', 'area', 'blockquote', 'pre',
'meta', 'xmp', 'noscript', 'label', 'tbody', 'thead',
'script', 'style', 'pre', 'iframe', 'img', 'input', 'nowrap',
'fieldset', 'legend']
# HTML tags which may appear within a chunk.
_INLINE_TAGS = ['b', 'i', 'u', 'tt', 'code', 'font', 'a', 'span', 'small',
'key', 'nobr', 'url', 'em', 's', 'sup', 'strike',
'strong']
# HTML tags within which linebreaks are significant.
_PREFORMATTED_TAGS = ['textarea', 'xmp', 'pre']
# An array mapping some of the inline HTML tags to more meaningful
# names for those tags. This will be used when generating placeholders
# representing these tags.
_HTML_PLACEHOLDER_NAMES = { 'a' : 'link', 'br' : 'break', 'b' : 'bold',
'i' : 'italic', 'li' : 'item', 'ol' : 'ordered_list', 'p' : 'paragraph',
'ul' : 'unordered_list', 'img' : 'image', 'em' : 'emphasis' }
# We append each of these characters in sequence to distinguish between
# different placeholders with basically the same name (e.g. BOLD1, BOLD2).
# Keep in mind that a placeholder name must not be a substring of any other
# placeholder name in the same message, so we can't simply count (BOLD_1
# would be a substring of BOLD_10).
_SUFFIXES = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# Matches whitespace in an HTML document. Also matches HTML comments, which are
# treated as whitespace.
_WHITESPACE = lazy_re.compile(r'(\s| |\\n|\\r|<!--\s*desc\s*=.*?-->)+',
re.DOTALL)
# Matches whitespace sequences which can be folded into a single whitespace
# character. This matches single characters so that non-spaces are replaced
# with spaces.
_FOLD_WHITESPACE = lazy_re.compile(r'\s+')
# Finds a non-whitespace character
_NON_WHITESPACE = lazy_re.compile(r'\S')
# Matches two or more in a row (a single   is not changed into
# placeholders because different languages require different numbers of spaces
# and placeholders must match exactly; more than one is probably a "special"
# whitespace sequence and should be turned into a placeholder).
_NBSP = lazy_re.compile(r' ( )+')
# Matches nontranslateable chunks of the document
_NONTRANSLATEABLES = lazy_re.compile(r'''
<\s*script.+?<\s*/\s*script\s*>
|
<\s*style.+?<\s*/\s*style\s*>
|
<!--.+?-->
|
<\?IMPORT\s.+?> # import tag
|
<\s*[a-zA-Z_]+:.+?> # custom tag (open)
|
<\s*/\s*[a-zA-Z_]+:.+?> # custom tag (close)
|
<!\s*[A-Z]+\s*([^>]+|"[^"]+"|'[^']+')*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches a tag and its attributes
_ELEMENT = lazy_re.compile(r'''
# Optional closing /, element name
<\s*(?P<closing>/)?\s*(?P<element>[a-zA-Z0-9]+)\s*
# Attributes and/or replaceables inside the tag, if any
(?P<atts>(
\s*([a-zA-Z_][-:.a-zA-Z_0-9]*) # Attribute name
(\s*=\s*(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?
|
\s*\[(\$?\~)?([A-Z0-9-_]+?)(\~\$?)?\]
)*)
\s*(?P<empty>/)?\s*> # Optional empty-tag closing /, and tag close
''',
re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches elements that may have translateable attributes. The value of these
# special attributes is given by group 'value1' or 'value2'. Note that this
# regexp demands that the attribute value be quoted; this is necessary because
# the non-tree-building nature of the parser means we don't know when we're
# writing out attributes, so we wouldn't know to escape spaces.
_SPECIAL_ELEMENT = lazy_re.compile(r'''
<\s*(
input[^>]+?value\s*=\s*(\'(?P<value3>[^\']*)\'|"(?P<value4>[^"]*)")
[^>]+type\s*=\s*"?'?(button|reset|text|submit)'?"?
|
(
table[^>]+?title\s*=
|
img[^>]+?alt\s*=
|
input[^>]+?type\s*=\s*"?'?(button|reset|text|submit)'?"?[^>]+?value\s*=
)
\s*(\'(?P<value1>[^\']*)\'|"(?P<value2>[^"]*)")
)[^>]*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches stuff that is translateable if it occurs in the right context
# (between tags). This includes all characters and character entities.
# Note that this also matches which needs to be handled as whitespace
# before this regexp is applied.
_CHARACTERS = lazy_re.compile(r'''
(
\w
|
[\!\@\#\$\%\^\*\(\)\-\=\_\+\[\]\{\}\\\|\;\:\'\"\,\.\/\?\`\~]
|
&(\#[0-9]+|\#x[0-9a-fA-F]+|[A-Za-z0-9]+);
)+
''', re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches Total Recall's "replaceable" tags, which are just any text
# in capitals enclosed by delimiters like [] or [~~] or [$~~$] (e.g. [HELLO],
# [~HELLO~] and [$~HELLO~$]).
_REPLACEABLE = lazy_re.compile(r'\[(\$?\~)?(?P<name>[A-Z0-9-_]+?)(\~\$?)?\]',
re.MULTILINE)
# Matches the silly [!]-prefixed "header" that is used in some TotalRecall
# templates.
_SILLY_HEADER = lazy_re.compile(r'\[!\]\ntitle\t(?P<title>[^\n]+?)\n.+?\n\n',
re.MULTILINE | re.DOTALL)
# Matches a comment that provides a description for the message it occurs in.
_DESCRIPTION_COMMENT = lazy_re.compile(
r'<!--\s*desc\s*=\s*(?P<description>.+?)\s*-->', re.DOTALL)
# Matches a comment which is used to break apart multiple messages.
_MESSAGE_BREAK_COMMENT = lazy_re.compile(r'<!--\s*message-break\s*-->',
re.DOTALL)
# Matches a comment which is used to prevent block tags from splitting a message
_MESSAGE_NO_BREAK_COMMENT = re.compile(r'<!--\s*message-no-break\s*-->',
re.DOTALL)
_DEBUG = 0
def _DebugPrint(text):
if _DEBUG:
print(text.encode('utf-8'))
class HtmlChunks(object):
'''A parser that knows how to break an HTML-like document into a list of
chunks, where each chunk is either translateable or non-translateable.
The chunks are unmodified sections of the original document, so concatenating
the text of all chunks would result in the original document.'''
def InTranslateable(self):
return self.last_translateable != -1
def Rest(self):
return self.text_[self.current:]
def StartTranslateable(self):
assert not self.InTranslateable()
if self.current != 0:
# Append a nontranslateable chunk
chunk_text = self.text_[self.chunk_start : self.last_nontranslateable + 1]
# Needed in the case where document starts with a translateable.
if len(chunk_text) > 0:
self.AddChunk(False, chunk_text)
self.chunk_start = self.last_nontranslateable + 1
self.last_translateable = self.current
self.last_nontranslateable = -1
def EndTranslateable(self):
assert self.InTranslateable()
# Append a translateable chunk
self.AddChunk(True,
self.text_[self.chunk_start : self.last_translateable + 1])
self.chunk_start = self.last_translateable + 1
self.last_translateable = -1
self.last_nontranslateable = self.current
def AdvancePast(self, match):
self.current += match.end()
def AddChunk(self, translateable, text):
'''Adds a chunk to self, removing linebreaks and duplicate whitespace
if appropriate.
'''
m = _DESCRIPTION_COMMENT.search(text)
if m:
self.last_description = m.group('description')
# Remove the description from the output text
text = _DESCRIPTION_COMMENT.sub('', text)
m = _MESSAGE_BREAK_COMMENT.search(text)
if m:
# Remove the coment from the output text. It should already effectively
# break apart messages.
text = _MESSAGE_BREAK_COMMENT.sub('', text)
if translateable and not self.last_element_ in _PREFORMATTED_TAGS:
if self.fold_whitespace_:
# Fold whitespace sequences if appropriate. This is optional because it
# alters the output strings.
text = _FOLD_WHITESPACE.sub(' ', text)
else:
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
# This whitespace folding doesn't work in all cases, thus the
# fold_whitespace flag to support backwards compatibility.
text = text.replace(' ', ' ')
text = text.replace(' ', ' ')
if translateable:
description = self.last_description
self.last_description = ''
else:
description = ''
if text != '':
self.chunks_.append((translateable, text, description))
def Parse(self, text, fold_whitespace):
'''Parses self.text_ into an intermediate format stored in self.chunks_
which is translateable and nontranslateable chunks. Also returns
self.chunks_
Args:
text: The HTML for parsing.
fold_whitespace: Whether whitespace sequences should be folded into a
single space.
Return:
[chunk1, chunk2, chunk3, ...] (instances of class Chunk)
'''
#
# Chunker state
#
self.text_ = text
self.fold_whitespace_ = fold_whitespace
# A list of tuples (is_translateable, text) which represents the document
# after chunking.
self.chunks_ = []
# Start index of the last chunk, whether translateable or not
self.chunk_start = 0
# Index of the last for-sure translateable character if we are parsing
# a translateable chunk, -1 to indicate we are not in a translateable chunk.
# This is needed so that we don't include trailing whitespace in the
# translateable chunk (whitespace is neutral).
self.last_translateable = -1
# Index of the last for-sure nontranslateable character if we are parsing
# a nontranslateable chunk, -1 if we are not in a nontranslateable chunk.
# This is needed to make sure we can group e.g. "<b>Hello</b> there"
# together instead of just "Hello</b> there" which would be much worse
# for translation.
self.last_nontranslateable = -1
# Index of the character we're currently looking at.
self.current = 0
# The name of the last block element parsed.
self.last_element_ = ''
# The last explicit description we found.
self.last_description = ''
# Whether no-break was the last chunk seen
self.last_nobreak = False
while self.current < len(self.text_):
_DebugPrint('REST: %s' % self.text_[self.current:self.current+60])
m = _MESSAGE_NO_BREAK_COMMENT.match(self.Rest())
if m:
self.AdvancePast(m)
self.last_nobreak = True
continue
# Try to match whitespace
m = _WHITESPACE.match(self.Rest())
if m:
# Whitespace is neutral, it just advances 'current' and does not switch
# between translateable/nontranslateable. If we are in a
# nontranslateable section that extends to the current point, we extend
# it to include the whitespace. If we are in a translateable section,
# we do not extend it until we find
# more translateable parts, because we never want a translateable chunk
# to end with whitespace.
if (not self.InTranslateable() and
self.last_nontranslateable == self.current - 1):
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Then we try to match nontranslateables
m = _NONTRANSLATEABLES.match(self.Rest())
if m:
if self.InTranslateable():
self.EndTranslateable()
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Now match all other HTML element tags (opening, closing, or empty, we
# don't care).
m = _ELEMENT.match(self.Rest())
if m:
element_name = m.group('element').lower()
if element_name in _BLOCK_TAGS:
self.last_element_ = element_name
if self.InTranslateable():
if self.last_nobreak:
self.last_nobreak = False
else:
self.EndTranslateable()
# Check for "special" elements, i.e. ones that have a translateable
# attribute, and handle them correctly. Note that all of the
# "special" elements are block tags, so no need to check for this
# if the tag is not a block tag.
sm = _SPECIAL_ELEMENT.match(self.Rest())
if sm:
# Get the appropriate group name
for group in sm.groupdict():
if sm.groupdict()[group]:
break
# First make a nontranslateable chunk up to and including the
# quote before the translateable attribute value
self.AddChunk(False, self.text_[
self.chunk_start : self.current + sm.start(group)])
# Then a translateable for the translateable bit
self.AddChunk(True, self.Rest()[sm.start(group) : sm.end(group)])
# Finally correct the data invariant for the parser
self.chunk_start = self.current + sm.end(group)
self.last_nontranslateable = self.current + m.end() - 1
elif self.InTranslateable():
# We're in a translateable and the tag is an inline tag, so we
# need to include it in the translateable.
self.last_translateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Anything else we find must be translateable, so we advance one character
# at a time until one of the above matches.
if not self.InTranslateable():
self.StartTranslateable()
else:
self.last_translateable = self.current
self.current += 1
# Close the final chunk
if self.InTranslateable():
self.AddChunk(True, self.text_[self.chunk_start : ])
else:
self.AddChunk(False, self.text_[self.chunk_start : ])
return self.chunks_
def HtmlToMessage(html, include_block_tags=False, description=''):
'''Takes a bit of HTML, which must contain only "inline" HTML elements,
and changes it into a tclib.Message. This involves escaping any entities and
replacing any HTML code with placeholders.
If include_block_tags is true, no error will be given if block tags (e.g.
<p> or <br>) are included in the HTML.
Args:
html: 'Hello <b>[USERNAME]</b>, how <i>are</i> you?'
include_block_tags: False
Return:
tclib.Message('Hello START_BOLD1USERNAMEEND_BOLD, '
'howNBSPSTART_ITALICareEND_ITALIC you?',
[ Placeholder('START_BOLD', '<b>', ''),
Placeholder('USERNAME', '[USERNAME]', ''),
Placeholder('END_BOLD', '</b>', ''),
Placeholder('START_ITALIC', '<i>', ''),
Placeholder('END_ITALIC', '</i>', ''), ])
'''
# Approach is:
# - first placeholderize, finding <elements>, [REPLACEABLES] and
# - then escape all character entities in text in-between placeholders
parts = [] # List of strings (for text chunks) and tuples (ID, original)
# for placeholders
count_names = {} # Map of base names to number of times used
end_names = {} # Map of base names to stack of end tags (for correct nesting)
def MakeNameClosure(base, type = ''):
'''Returns a closure that can be called once all names have been allocated
to return the final name of the placeholder. This allows us to minimally
number placeholders for non-overlap.
Also ensures that END_XXX_Y placeholders have the same Y as the
corresponding BEGIN_XXX_Y placeholder when we have nested tags of the same
type.
Args:
base: 'phname'
type: '' | 'begin' | 'end'
Return:
Closure()
'''
name = base.upper()
if type != '':
name = ('%s_%s' % (type, base)).upper()
count_names.setdefault(name, 0)
count_names[name] += 1
def MakeFinalName(name_ = name, index = count_names[name] - 1):
if type.lower() == 'end' and end_names.get(base):
return end_names[base].pop(-1) # For correct nesting
if count_names[name_] != 1:
name_ = '%s_%s' % (name_, _SUFFIXES[index])
# We need to use a stack to ensure that the end-tag suffixes match
# the begin-tag suffixes. Only needed when more than one tag of the
# same type.
if type == 'begin':
end_name = ('END_%s_%s' % (base, _SUFFIXES[index])).upper()
if base in end_names:
end_names[base].append(end_name)
else:
end_names[base] = [end_name]
return name_
return MakeFinalName
current = 0
last_nobreak = False
while current < len(html):
m = _MESSAGE_NO_BREAK_COMMENT.match(html[current:])
if m:
last_nobreak = True
current += m.end()
continue
m = _NBSP.match(html[current:])
if m:
parts.append((MakeNameClosure('SPACE'), m.group()))
current += m.end()
continue
m = _REPLACEABLE.match(html[current:])
if m:
# Replaceables allow - but placeholders don't, so replace - with _
ph_name = MakeNameClosure('X_%s_X' % m.group('name').replace('-', '_'))
parts.append((ph_name, m.group()))
current += m.end()
continue
m = _SPECIAL_ELEMENT.match(html[current:])
if m:
if not include_block_tags:
if last_nobreak:
last_nobreak = False
else:
raise exception.BlockTagInTranslateableChunk(html)
element_name = 'block' # for simplification
# Get the appropriate group name
for group in m.groupdict():
if m.groupdict()[group]:
break
parts.append((MakeNameClosure(element_name, 'begin'),
html[current : current + m.start(group)]))
parts.append(m.group(group))
parts.append((MakeNameClosure(element_name, 'end'),
html[current + m.end(group) : current + m.end()]))
current += m.end()
continue
m = _ELEMENT.match(html[current:])
if m:
element_name = m.group('element').lower()
if not include_block_tags and not element_name in _INLINE_TAGS:
if last_nobreak:
last_nobreak = False
else:
raise exception.BlockTagInTranslateableChunk(html[current:])
if element_name in _HTML_PLACEHOLDER_NAMES: # use meaningful names
element_name = _HTML_PLACEHOLDER_NAMES[element_name]
# Make a name for the placeholder
type = ''
if not m.group('empty'):
if m.group('closing'):
type = 'end'
else:
type = 'begin'
parts.append((MakeNameClosure(element_name, type), m.group()))
current += m.end()
continue
if len(parts) and isinstance(parts[-1], six.string_types):
parts[-1] += html[current]
else:
parts.append(html[current])
current += 1
msg_text = ''
placeholders = []
for part in parts:
if isinstance(part, tuple):
final_name = part[0]()
original = part[1]
msg_text += final_name
placeholders.append(tclib.Placeholder(final_name, original, '(HTML code)'))
else:
msg_text += part
msg = tclib.Message(text=msg_text, placeholders=placeholders,
description=description)
content = msg.GetContent()
for ix in range(len(content)):
if isinstance(content[ix], six.string_types):
content[ix] = util.UnescapeHtml(content[ix], replace_nbsp=False)
return msg
class TrHtml(interface.GathererBase):
'''Represents a document or message in the template format used by
Total Recall for HTML documents.'''
def __init__(self, *args, **kwargs):
super(TrHtml, self).__init__(*args, **kwargs)
self.have_parsed_ = False
self.skeleton_ = [] # list of strings and MessageClique objects
self.fold_whitespace_ = False
def SetAttributes(self, attrs):
'''Sets node attributes used by the gatherer.
This checks the fold_whitespace attribute.
Args:
attrs: The mapping of node attributes.
'''
self.fold_whitespace_ = ('fold_whitespace' in attrs and
attrs['fold_whitespace'] == 'true')
def GetText(self):
'''Returns the original text of the HTML document'''
return self.text_
def GetTextualIds(self):
return [self.extkey]
def GetCliques(self):
'''Returns the message cliques for each translateable message in the
document.'''
return [x for x in self.skeleton_ if isinstance(x, clique.MessageClique)]
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
'''Returns this document with translateable messages filled with
the translation for language 'lang'.
Args:
lang: 'en'
pseudo_if_not_available: True
Return:
'ID_THIS_SECTION TYPE\n...BEGIN\n "Translated message"\n......\nEND
Raises:
grit.exception.NotReady() if used before Parse() has been successfully
called.
grit.exception.NoSuchTranslation() if 'pseudo_if_not_available' is false
and there is no translation for the requested language.
'''
if len(self.skeleton_) == 0:
raise exception.NotReady()
# TODO(joi) Implement support for skeleton gatherers here.
out = []
for item in self.skeleton_:
if isinstance(item, six.string_types):
out.append(item)
else:
msg = item.MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english)
for content in msg.GetContent():
if isinstance(content, tclib.Placeholder):
out.append(content.GetOriginal())
else:
# We escape " characters to increase the chance that attributes
# will be properly escaped.
out.append(util.EscapeHtml(content, True))
return ''.join(out)
def Parse(self):
if self.have_parsed_:
return
self.have_parsed_ = True
text = self._LoadInputFile()
# Ignore the BOM character if the document starts with one.
if text.startswith(u'\ufeff'):
text = text[1:]
self.text_ = text
# Parsing is done in two phases: First, we break the document into
# translateable and nontranslateable chunks. Second, we run through each
# translateable chunk and insert placeholders for any HTML elements,
# unescape escaped characters, etc.
# First handle the silly little [!]-prefixed header because it's not
# handled by our HTML parsers.
m = _SILLY_HEADER.match(text)
if m:
self.skeleton_.append(text[:m.start('title')])
self.skeleton_.append(self.uberclique.MakeClique(
tclib.Message(text=text[m.start('title'):m.end('title')])))
self.skeleton_.append(text[m.end('title') : m.end()])
text = text[m.end():]
chunks = HtmlChunks().Parse(text, self.fold_whitespace_)
for chunk in chunks:
if chunk[0]: # Chunk is translateable
self.skeleton_.append(self.uberclique.MakeClique(
HtmlToMessage(chunk[1], description=chunk[2])))
else:
self.skeleton_.append(chunk[1])
# Go through the skeleton and change any messages that consist solely of
# placeholders and whitespace into nontranslateable strings.
for ix in range(len(self.skeleton_)):
got_text = False
if isinstance(self.skeleton_[ix], clique.MessageClique):
msg = self.skeleton_[ix].GetMessage()
for item in msg.GetContent():
if (isinstance(item, six.string_types)
and _NON_WHITESPACE.search(item) and item != ' '):
got_text = True
break
if not got_text:
self.skeleton_[ix] = msg.GetRealContent()
def SubstituteMessages(self, substituter):
'''Applies substitutions to all messages in the tree.
Goes through the skeleton and finds all MessageCliques.
Args:
substituter: a grit.util.Substituter object.
'''
new_skel = []
for chunk in self.skeleton_:
if isinstance(chunk, clique.MessageClique):
old_message = chunk.GetMessage()
new_message = substituter.SubstituteMessage(old_message)
if new_message is not old_message:
new_skel.append(self.uberclique.MakeClique(new_message))
continue
new_skel.append(chunk)
self.skeleton_ = new_skel
|
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import os
import subprocess
import tempfile
import logging
import hgapi
import errno
# fsutils, , misc filesystem utils, internal
import fsutils
git_logger = logging.getLogger('git')
hg_logger = logging.getLogger('hg')
class VCSError(Exception):
def __init__(self, message, returncode=None):
super(VCSError, self).__init__(message)
self.returncode = returncode
class VCS(object):
@classmethod
def cloneToTemporaryDir(cls, remote):
raise NotImplementedError()
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
raise NotImplementedError()
def isClean(self):
raise NotImplementedError()
def commit(self, message, tag=None):
raise NotImplementedError()
def isClean(self):
raise NotImplementedError()
def tags(self):
raise NotImplementedError()
def markForCommit(self, path):
pass
def remove(self):
raise NotImplementedError()
def __nonzero__(self):
raise NotImplementedError()
# python 3 truthiness
def __bool__(self):
return self.__nonzero__()
class Git(VCS):
def __init__(self, path):
self.worktree = path
self.gitdir = os.path.join(path, '.git')
@classmethod
def cloneToTemporaryDir(cls, remote):
return cls.cloneToDirectory(remote, tempfile.mkdtemp())
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
commands = [
['git', 'clone', remote, directory]
]
cls._execCommands(commands)
r = Git(directory)
if tag is not None:
r.updateToTag(tag)
return r
def fetchAllBranches(self):
remote_branches = []
local_branches = []
# list remote branches
out, err = self._execCommands([self._gitCmd('branch', '-r')])
for line in out.split(b'\n'):
branch_info = line.split(b' -> ')
# skip HEAD:
if len(branch_info) > 1:
continue
remote_branch = branch_info[0].strip()
branch = b'/'.join(remote_branch.split(b'/')[1:])
remote_branches.append((remote_branch, branch))
# list already-existing local branches
out, err = self._execCommands([self._gitCmd('branch')])
for line in out.split(b'\n'):
local_branches.append(line.strip(b' *'))
for remote, branchname in remote_branches:
# don't try to replace existing local branches
if branchname in local_branches:
continue
try:
out, err = self._execCommands([
self._gitCmd('checkout', '-b', branchname, remote)
])
except VCSError as e:
git_logger.error('failed to fetch remote branch %s %s' % (remote, branchname))
raise
def remove(self):
fsutils.rmRf(self.worktree)
def workingDirectory(self):
return self.worktree
def _gitCmd(self, *args):
return ['git','--work-tree=%s' % self.worktree,'--git-dir=%s'%self.gitdir.replace('\\', '/')] + list(args);
@classmethod
def _execCommands(cls, commands):
out, err = None, None
for cmd in commands:
try:
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
if cmd[0] == 'git':
raise VCSError(
'git is not installed, or not in your path. Please follow the installation instructions at http://docs.yottabuild.org/#installing'
)
else:
raise VCSError('%s is not installed' % (cmd[0]))
else:
raise VCSError('command %s failed' % (cmd))
out, err = child.communicate()
returncode = child.returncode
if returncode:
raise VCSError("command failed: %s:%s" % (cmd, err or out), returncode=returncode)
return out, err
def isClean(self):
commands = [
self._gitCmd('diff', '--quiet', '--exit-code'),
self._gitCmd('diff', '--cached', '--quiet', '--exit-code'),
]
try:
out, err = self._execCommands(commands)
except VCSError as e:
if e.returncode:
return False
else:
raise
return True
def markForCommit(self, relative_path):
commands = [
self._gitCmd('add', os.path.join(self.worktree, relative_path)),
]
self._execCommands(commands)
def updateToTag(self, tag):
commands = [
self._gitCmd('checkout', tag),
]
self._execCommands(commands)
def tags(self):
commands = [
self._gitCmd('tag', '-l')
]
out, err = self._execCommands(commands)
# I think utf-8 is the right encoding? commit messages are utf-8
# encoded, couldn't find any documentation on tag names.
return out.decode('utf-8').split(u'\n')
def branches(self):
commands = [
self._gitCmd('branch', '--list')
]
out, err = self._execCommands(commands)
return [x.lstrip(' *') for x in out.decode('utf-8').split('\n')]
def commit(self, message, tag=None):
commands = [
self._gitCmd('commit', '-m', message),
]
if tag:
commands.append(
self._gitCmd('tag', tag),
)
self._execCommands(commands)
def __nonzero__(self):
return True
# FIXME: hgapi will throw HgException when something goes wrong, it may be worth trying
# to catch that in some methods
class HG(VCS):
def __init__(self, path):
self.worktree = path
self.repo = hgapi.Repo(path)
@classmethod
def cloneToTemporaryDir(cls, remote):
return cls.cloneToDirectory(remote, tempfile.mkdtemp())
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
# hg doesn't automatically create the directories needed by destination
try:
os.makedirs(directory)
except:
pass
hg_logger.debug('will clone %s into %s', remote, directory)
hgapi.Repo.hg_clone(remote, directory)
r = HG(directory)
if tag is not None:
r.updateToTag(tag)
return r
def remove(self):
fsutils.rmRf(self.worktree)
def workingDirectory(self):
return self.worktree
def isClean(self):
return not bool(self.repo.hg_status(empty=True))
def markForCommit(self, relative_path):
self.repo.hg_add(os.path.join(self.worktree, relative_path))
def updateToTag(self, tag):
self.repo.hg_update(tag)
def tags(self):
l = list(self.repo.hg_tags().keys())
l.remove('tip')
return l
def commit(self, message, tag=None):
self.repo.hg_commit(message)
if tag:
self.repo.hg_tag(tag)
def __nonzero__(self):
return True
def getVCS(path):
# crude heuristic, does the job...
if os.path.exists(os.path.join(path, '.git')):
return Git(path)
if os.path.isdir(os.path.join(path, '.hg')):
return HG(path)
return None
|
|
import ini
import var
import irc
from tools import is_identified, is_number
from tools import trim, nsfw_check
# Action strings lists.
add_strings = [
"-a", "-add", "--add",
"-s", "-set", "--set"
]
del_strings = [
"-rm", "-remove", "--remove",
"-del", "-delete", "--delete"
]
rep_strings = [
"-re", "-replace", "--replace",
"-rp"
]
# URL database command usage, for a quick .help entry.
command_usage = [
"{} - See your {n} list.",
"{} n - See nth {n} in your list.",
"{} user - See user's {n} list.",
"{} user n - See user's nth {n}.",
" ",
"{} -add url1 url2 ... - Add a list of urls to your {n} list.",
"You can use {} to add lists of urls.".format(", ".join(add_strings)),
" ",
"{} -remove n1,n2,... - Remove a list of indexes from your list.",
"You can use {} to remove lists of indexes.".format(", ".join(del_strings)),
"{} -rm * will remove all of your saved {n}s.",
" ",
"{} -replace n url - Replace your nth {n} with url.",
"You can use {} to replace urls.".format(", ".join(rep_strings))
]
# Syntax for providing help.
syntax = {
tuple(add_strings):"{} {} url1 url2 ...",
tuple(del_strings):"{} {} n1,n2 n3 ...",
tuple(rep_strings):"{} {} n url"
}
# Functions
# User-URL database can only be altered by identified users.
def ident (f):
def check (user, channel, word):
try:
if word[1] in [
"-a", "-add", "--add",
"-rm", "-remove", "--remove",
"-re", "-replace", "--replace"
] and not is_identified(user):
irc.msg(channel, "{}: Identify with NickServ first.".format(user))
else:
f(user, channel, word)
except IndexError:
f(user, channel, word)
return check
# Returns a namespace with the functions used in different commands.
def namespace (url_dictionary, dictionary_name, filename,
section_name, max = 5):
f_namespace = type("namespace", (object,), {})()
f_namespace.list_function = list_function(
url_dictionary = url_dictionary,
dictionary_name = dictionary_name
)
f_namespace.add_function = add_function(
url_dictionary = url_dictionary,
dictionary_name = dictionary_name,
section_name = section_name,
filename = filename,
max = max
)
f_namespace.delete_function = delete_function(
url_dictionary = url_dictionary,
dictionary_name = dictionary_name,
filename = filename,
section_name = section_name
)
f_namespace.replace_function = replace_function(
url_dictionary = url_dictionary,
dictionary_name = dictionary_name,
filename = filename,
section_name = section_name,
max = max
)
return f_namespace
# Returns the function we'll need for the URL database listing command.
def list_function (url_dictionary, dictionary_name):
# Responsible for listing URLs.
# Can accept username and/or a number as parameter.
def list_urls (user, channel, word):
target = False
number = False
# Grab values from message.
if len(word) == 1:
target = user
elif len(word) == 2:
target = word[1] if not is_number(word[1]) else user
number = word[1] if is_number(word[1]) else False
elif len(word) >= 3:
target = word[1]
number = word[2]
# Check if it's a URL or a number.
if target.startswith("http://") or target.startswith("https://"):
irc.msg(channel, "Did you mean to use -a {}?".format(target))
return
elif number and target.isdigit():
irc.msg(channel, "Did you mean to use -re {} {}?".format(target, number))
return
# Check if an action string was misused.
if target.startswith("-"):
# Grab tuple containing string.
commands = ()
for str_list in syntax:
if target in str_list:
commands = str_list
break
if commands in syntax:
line = syntax[commands].format(word[0], target)
irc.msg(channel, "Syntax for {}: {}".format(target, line))
else:
irc.msg(channel, "{}: Unknown command.".format(user))
return
# Case insensitive check.
for nick in url_dictionary:
if target.lower() == nick.lower():
target = nick
break
# Throw a message if the target isn't in the URL database.
if target not in url_dictionary:
err_msg = "You don't" if target == user else "{} doesn't".format(target)
irc.msg(channel, "{} have any {} saved.".format(err_msg, dictionary_name))
return
# Check if the received number is a valid integer.
if (not is_number(number)) or ((int(number) if number else 1) < 1):
irc.msg(channel, "{}: Invalid number.".format(user))
return
# Finally gather results and display them.
if number:
# Turning number into a numeric value.
number = int(number) - 1
if number >= len(url_dictionary[target]):
err_msg = "You don't" if target == user else "{} doesn't".format(target)
irc.msg(channel, "{} have that many {}.".format(err_msg, dictionary_name))
else:
# Looking for NSFW URLs. (as indicated by '!')
if "!" in url_dictionary[target][number]:
line = "[\x034NSFW\x0f] "
else:
line = "[{}] ".format(number + 1)
line += "{} [{}]".format(url_dictionary[target][number].strip("!"), target)
irc.msg(channel, line)
else:
url_list = ["[{}] {}".format(ind+1, url) for ind, url in \
enumerate(url_dictionary[target])]
# Looking for NSFW URLs. (as indicated by '!')
url_list = map(nsfw_check, url_list)
line = " ".join(url_list) + " [{}]".format(target)
irc.msg(channel, line)
return list_urls
# Return the add_url function. Can receive a different max number.
def add_function (url_dictionary, dictionary_name, filename,
section_name, max = 5):
# Add a list of URLs to the database. Will require NickServ authentication.
def add_url (user, channel, word):
a_list = [url for url in word[2:] if url.startswith("http://") \
or url.startswith("https://")]
if not a_list:
irc.msg(channel, "{}: Links have to start with \"http://\" or \"https://\".".format(user))
return
# Case insensitive check.
for nick in url_dictionary:
if user.lower() == nick.lower():
user = nick
break
# Create an empty list for a new user.
if user not in url_dictionary:
url_dictionary[user] = []
# Or check if existing user already has max URLs saved.
elif len(url_dictionary[user]) == max:
irc.msg(channel, "{}: You already have {} {} saved.".format(user, max, dictionary_name))
return
# Fill saved list until it reaches max URLs.
for url in a_list:
if len(url_dictionary[user]) < max:
url_dictionary[user].append(trim(url))
else:
break
ini.add_to_ini(section = section_name, option = user,
data = '\n'.join(url_dictionary[user]), filename = filename)
irc.msg(channel, "{}: {} added.".format(user, section_name))
return add_url
# Returns the delete function.
def delete_function (url_dictionary, dictionary_name, filename, section_name):
# Removes URLs from the user's list. Will require NickServ authentication.
def delete_url (user, channel, word):
# Make list of indexes to remove from the user's list.
del_list = [int(x) - 1 for x in word[2].split(',') if \
(is_number(x) and int(x) > 0)]
del_list += [int(x) - 1 for x in word[2:] if \
(is_number(x) and int(x) > 0)]
# Case insensitive check.
for nick in url_dictionary:
if user.lower() == nick.lower():
user = nick
break
# Wildcard removes everything saved for that user from the database.
if word[2] == "*" and user in url_dictionary:
del url_dictionary[user]
ini.remove_from_ini(section = section_name, option = user, filename = filename)
irc.msg(channel, "{}: All of your {} were removed successfully.".format(
user, dictionary_name))
return
# The list only needs numbers.
if not del_list:
irc.msg(channel, "{}: Invalid number(s).".format(user))
return
# The user must be in the database to be able to remove URLs.
if user not in url_dictionary:
irc.msg(channel, "{}: You don't have any {} saved.".format(
user, dictionary_name))
return
# Copy contents of indexed list in database to deletion list.
for index, number in enumerate(del_list):
if len(url_dictionary[user]) > number:
del_list[index] = url_dictionary[user][number]
# Proceed to remove them one by one.
for entry in del_list:
if entry in url_dictionary[user]:
url_dictionary[user].remove(entry)
# Delete entry in database for an empty list and remove user from ini file.
if not url_dictionary[user]:
del url_dictionary[user]
ini.remove_from_ini(section = section_name, option = user, filename = filename)
irc.msg(channel, "{}: All of your {} were removed successfully.".format(
user, dictionary_name))
return
ini.add_to_ini(section = section_name, option = user,
data = '\n'.join(url_dictionary[user]), filename = filename)
irc.msg(channel, "{}: {} deleted.".format(user, section_name))
return delete_url
# Return the replace function for the URL database command.
# Can also accept a different max number.
def replace_function (url_dictionary, dictionary_name, filename,
section_name, max = 5):
# Replace a URL in the user's list.
# Will require NickServ authentication.
def replace_url (user, channel, word):
# This command receives two pieces of information.
if len(word) < 4:
irc.msg(channel, "{}: Wrong syntax. Check .help".format(user))
return
# The first must be a number.
if not is_number(word[2]):
irc.msg(channel, "{}: Invalid number.".format(user))
return
# The second must be a URL.
if not (word[3].startswith("http://") or word[3].startswith("https://")):
irc.msg(channel, "{}: Invalid URL.".format(user))
return
# Turn it into a numeric value.
number = int(word[2]) - 1
# Check if it's in the max range, and no negative indexes will be accepted.
if number > max-1 or number < 0:
irc.msg(channel, "{}: Invalid number.".format(user))
return
# Ignore case.
for nick in url_dictionary:
if user.lower() == nick.lower():
user = nick
break
# Try to replace URL using received number.
try:
url_dictionary[user][number] = trim(word[3])
ini.add_to_ini(section = section_name, option = user,
data = '\n'.join(url_dictionary[user]), filename = filename)
irc.msg(channel, "{}: {} replaced.".format(user, section_name.rstrip("s")))
# It might not work, if the list isn't long enough.
except IndexError:
irc.msg(channel, "{}: Invalid number.".format(user))
# And it won't work if the user isn't in the URL database.
except KeyError:
irc.msg(channel, "{}: You don't have any {} saved.".format(user, dictionary_name))
return replace_url
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from mock import Mock, MagicMock, patch, mock_open
import pexpect
from trove.common.exception import GuestError, ProcessExecutionError
from trove.common import utils
from trove.guestagent import volume
from trove.tests.unittests import trove_testtools
def _setUp_fake_spawn(return_val=0):
fake_spawn = pexpect.spawn('echo')
fake_spawn.expect = Mock(return_value=return_val)
pexpect.spawn = Mock(return_value=fake_spawn)
return fake_spawn
class VolumeDeviceTest(trove_testtools.TestCase):
def setUp(self):
super(VolumeDeviceTest, self).setUp()
self.volumeDevice = volume.VolumeDevice('/dev/vdb')
def tearDown(self):
super(VolumeDeviceTest, self).tearDown()
@patch.object(pexpect, 'spawn', Mock())
def test_migrate_data(self):
origin_execute = utils.execute
utils.execute = Mock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
fake_spawn = _setUp_fake_spawn()
origin_unmount = self.volumeDevice.unmount
self.volumeDevice.unmount = MagicMock()
self.volumeDevice.migrate_data('/')
self.assertEqual(1, fake_spawn.expect.call_count)
self.assertEqual(1, utils.execute.call_count)
self.assertEqual(1, self.volumeDevice.unmount.call_count)
utils.execute = origin_execute
self.volumeDevice.unmount = origin_unmount
os.path.exists = origin_os_path_exists
def test__check_device_exists(self):
origin_execute = utils.execute
utils.execute = Mock()
self.volumeDevice._check_device_exists()
self.assertEqual(1, utils.execute.call_count)
utils.execute = origin_execute
def test_fail__check_device_exists(self):
with patch.object(utils, 'execute', side_effect=ProcessExecutionError):
self.assertRaises(GuestError,
self.volumeDevice._check_device_exists)
@patch.object(pexpect, 'spawn', Mock())
def test__check_format(self):
fake_spawn = _setUp_fake_spawn()
self.volumeDevice._check_format()
self.assertEqual(1, fake_spawn.expect.call_count)
@patch.object(pexpect, 'spawn', Mock())
def test__check_format_2(self):
fake_spawn = _setUp_fake_spawn(return_val=1)
self.assertEqual(0, fake_spawn.expect.call_count)
self.assertRaises(IOError, self.volumeDevice._check_format)
@patch.object(pexpect, 'spawn', Mock())
def test__format(self):
fake_spawn = _setUp_fake_spawn()
self.volumeDevice._format()
self.assertEqual(1, fake_spawn.expect.call_count)
self.assertEqual(1, pexpect.spawn.call_count)
def test_format(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
origin_format = self.volumeDevice._format
origin_check_format = self.volumeDevice._check_format
self.volumeDevice._check_device_exists = MagicMock()
self.volumeDevice._check_format = MagicMock()
self.volumeDevice._format = MagicMock()
self.volumeDevice.format()
self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
self.assertEqual(1, self.volumeDevice._format.call_count)
self.assertEqual(1, self.volumeDevice._check_format.call_count)
self.volumeDevice._check_device_exists = origin_check_device_exists
self.volumeDevice._format = origin_format
self.volumeDevice._check_format = origin_check_format
def test_mount(self):
origin_ = volume.VolumeMountPoint.mount
volume.VolumeMountPoint.mount = Mock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
origin_write_to_fstab = volume.VolumeMountPoint.write_to_fstab
volume.VolumeMountPoint.write_to_fstab = Mock()
self.volumeDevice.mount(Mock)
self.assertEqual(1, volume.VolumeMountPoint.mount.call_count)
self.assertEqual(1, volume.VolumeMountPoint.write_to_fstab.call_count)
volume.VolumeMountPoint.mount = origin_
volume.VolumeMountPoint.write_to_fstab = origin_write_to_fstab
os.path.exists = origin_os_path_exists
def test_resize_fs(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
origin_execute = utils.execute
utils.execute = Mock()
self.volumeDevice._check_device_exists = MagicMock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
self.volumeDevice.resize_fs('/mnt/volume')
self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
self.assertEqual(2, utils.execute.call_count)
self.volumeDevice._check_device_exists = origin_check_device_exists
os.path.exists = origin_os_path_exists
utils.execute = origin_execute
@patch.object(os.path, 'ismount', return_value=True)
@patch.object(utils, 'execute', side_effect=ProcessExecutionError)
def test_fail_resize_fs(self, mock_execute, mock_mount):
with patch.object(self.volumeDevice, '_check_device_exists'):
self.assertRaises(GuestError,
self.volumeDevice.resize_fs, '/mnt/volume')
self.assertEqual(1,
self.volumeDevice._check_device_exists.call_count)
self.assertEqual(1, mock_mount.call_count)
def test_unmount_positive(self):
self._test_unmount()
def test_unmount_negative(self):
self._test_unmount(False)
@patch.object(pexpect, 'spawn', Mock())
def _test_unmount(self, positive=True):
origin_ = os.path.exists
os.path.exists = MagicMock(return_value=positive)
fake_spawn = _setUp_fake_spawn()
self.volumeDevice.unmount('/mnt/volume')
COUNT = 1
if not positive:
COUNT = 0
self.assertEqual(COUNT, fake_spawn.expect.call_count)
os.path.exists = origin_
@patch.object(utils, 'execute', return_value=('/var/lib/mysql', ''))
def test_mount_points(self, mock_execute):
mount_point = self.volumeDevice.mount_points('/dev/vdb')
self.assertEqual(['/var/lib/mysql'], mount_point)
@patch.object(utils, 'execute', side_effect=ProcessExecutionError)
def test_fail_mount_points(self, mock_execute):
self.assertRaises(GuestError, self.volumeDevice.mount_points,
'/mnt/volume')
def test_set_readahead_size(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
self.volumeDevice._check_device_exists = MagicMock()
mock_execute = MagicMock(return_value=None)
readahead_size = 2048
self.volumeDevice.set_readahead_size(readahead_size,
execute_function=mock_execute)
blockdev = mock_execute.call_args_list[0]
blockdev.assert_called_with("sudo", "blockdev", "--setra",
readahead_size, "/dev/vdb")
self.volumeDevice._check_device_exists = origin_check_device_exists
def test_fail_set_readahead_size(self):
mock_execute = MagicMock(side_effect=ProcessExecutionError)
readahead_size = 2048
with patch.object(self.volumeDevice, '_check_device_exists'):
self.assertRaises(GuestError, self.volumeDevice.set_readahead_size,
readahead_size, execute_function=mock_execute)
self.volumeDevice._check_device_exists.assert_any_call()
class VolumeMountPointTest(trove_testtools.TestCase):
def setUp(self):
super(VolumeMountPointTest, self).setUp()
self.volumeMountPoint = volume.VolumeMountPoint('/mnt/device',
'/dev/vdb')
def tearDown(self):
super(VolumeMountPointTest, self).tearDown()
@patch.object(pexpect, 'spawn', Mock())
def test_mount(self):
origin_ = os.path.exists
os.path.exists = MagicMock(return_value=False)
fake_spawn = _setUp_fake_spawn()
with patch.object(utils, 'execute_with_timeout'):
self.volumeMountPoint.mount()
self.assertEqual(1, os.path.exists.call_count)
self.assertEqual(1, utils.execute_with_timeout.call_count)
self.assertEqual(1, fake_spawn.expect.call_count)
os.path.exists = origin_
def test_write_to_fstab(self):
origin_execute = utils.execute
utils.execute = Mock()
m = mock_open()
with patch('%s.open' % volume.__name__, m, create=True):
self.volumeMountPoint.write_to_fstab()
self.assertEqual(1, utils.execute.call_count)
utils.execute = origin_execute
|
|
#!/usr/bin/env python
"""Test of "New Hunt" wizard."""
from grr.gui import runtests_test
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flags
from grr.lib import output_plugin
from grr.lib import test_lib
from grr.lib.flows.general import processes
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import foreman as rdf_foreman
from grr.lib.rdfvalues import paths as rdf_paths
class DummyOutputPlugin(output_plugin.OutputPlugin):
"""An output plugin that sends an email for each response received."""
name = "dummy"
description = "Dummy do do."
args_type = processes.ListProcessesArgs
def ProcessResponses(self, responses):
pass
class TestNewHuntWizard(test_lib.GRRSeleniumTest):
"""Test the Cron view GUI."""
@staticmethod
def FindForemanRules(hunt, token):
fman = aff4.FACTORY.Open("aff4:/foreman", mode="r", aff4_type="GRRForeman",
token=token)
hunt_rules = []
rules = fman.Get(fman.Schema.RULES, [])
for rule in rules:
for action in rule.actions:
if action.hunt_id == hunt.urn:
hunt_rules.append(rule)
return hunt_rules
@staticmethod
def CreateHuntFixtureWithTwoClients():
token = access_control.ACLToken(username="test", reason="test")
# Ensure that clients list is empty
root = aff4.FACTORY.Open(aff4.ROOT_URN, token=token)
for client_urn in root.ListChildren():
if aff4.VFSGRRClient.CLIENT_ID_RE.match(client_urn.Basename()):
data_store.DB.DeleteSubject(client_urn, token=token)
# Add 2 distinct clients
client_id = "C.1%015d" % 0
fd = aff4.FACTORY.Create(rdf_client.ClientURN(client_id), "VFSGRRClient",
token=token)
fd.Set(fd.Schema.SYSTEM("Windows"))
fd.Set(fd.Schema.CLOCK(2336650631137737))
fd.Close()
client_id = "C.1%015d" % 1
fd = aff4.FACTORY.Create(rdf_client.ClientURN(client_id), "VFSGRRClient",
token=token)
fd.Set(fd.Schema.SYSTEM("Linux"))
fd.Set(fd.Schema.CLOCK(2336650631137737))
fd.Close()
def setUp(self):
super(TestNewHuntWizard, self).setUp()
with self.ACLChecksDisabled():
# Create a Foreman with an empty rule set.
with aff4.FACTORY.Create("aff4:/foreman", "GRRForeman", mode="rw",
token=self.token) as self.foreman:
self.foreman.Set(self.foreman.Schema.RULES())
self.foreman.Close()
def testNewHuntWizard(self):
with self.ACLChecksDisabled():
self.CreateHuntFixtureWithTwoClients()
# Open up and click on View Hunts.
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.WaitUntil(self.IsElementPresent, "css=a[grrtarget=ManageHunts]")
self.Click("css=a[grrtarget=ManageHunts]")
self.WaitUntil(self.IsElementPresent, "css=button[name=NewHunt]")
# Open up "New Hunt" wizard
self.Click("css=button[name=NewHunt]")
self.WaitUntil(self.IsTextPresent, "What to run?")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > ins.jstree-icon")
self.Click("css=#_Filesystem > ins.jstree-icon")
# Click on the FileFinder item in Filesystem flows list
self.Click("link=File Finder")
# Wait for flow configuration form to be rendered (just wait for first
# input field).
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
# Change "path" and "pathtype" values
self.Type("css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) "
"input", "/tmp")
self.Select("css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) "
"select", "TSK")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
# Click on "Back" button and check that all the values in the form
# remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form label:contains('Paths')")
self.assertEqual("/tmp", self.GetValue(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-repeated-field:has(label:contains('Paths')) input"))
self.assertEqual("TSK", self.GetSelectedLabel(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Pathtype')) select"))
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
# Configure the hunt to send an email on results.
self.Select("css=grr-new-hunt-wizard-form select",
"DummyOutputPlugin")
self.Type(
"css=grr-new-hunt-wizard-form "
"grr-form-proto-single-field:has(label:contains('Filename Regex')) "
"input", "some regex")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Where to run?")
# Create 3 foreman rules. Note that "Add" button adds rules to the beginning
# of a list. So we always use :nth(0) selector.
self.Select("css=grr-new-hunt-wizard-form div.Rule:nth(0) select",
"Regular Expression")
self.Select(
"css=grr-new-hunt-wizard-form div.Rule:nth(0) "
"grr-form-proto-single-field:has(label:contains('Attribute name')) "
"select", "System")
self.Type(
"css=grr-new-hunt-wizard-form div.Rule:nth(0) "
"grr-form-proto-single-field:has(label:contains('Attribute regex')) "
"input", "Linux")
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form div.Rule:nth(0) select",
"Integer Rule")
self.Select(
"css=grr-new-hunt-wizard-form div.Rule:nth(0) "
"grr-form-proto-single-field:has(label:contains('Attribute name')) "
"select", "Clock")
self.Select(
"css=grr-new-hunt-wizard-form div.Rule:nth(0) "
"grr-form-proto-single-field:has(label:contains('Operator')) select",
"GREATER_THAN")
self.Type(
"css=grr-new-hunt-wizard-form div.Rule:nth(0) "
"grr-form-proto-single-field:has(label:contains('Value')) input",
"1336650631137737")
self.Click("css=grr-new-hunt-wizard-form button[name=Add]")
self.Select("css=grr-new-hunt-wizard-form div.Rule:nth(0) select",
"OS X")
# Click on "Back" button
self.Click("css=grr-new-hunt-wizard-form button.Back")
self.WaitUntil(self.IsTextPresent, "Output Processing")
# Click on "Next" button again and check that all the values that we've just
# entered remain intact.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Where to run?")
# Click on "Next" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Review")
# Check that the arguments summary is present.
self.WaitUntil(self.IsTextPresent, "Paths")
self.WaitUntil(self.IsTextPresent, "/tmp")
# Check that output plugins are shown.
self.assertTrue(self.IsTextPresent("DummyOutputPlugin"))
self.assertTrue(self.IsTextPresent("some regex"))
# Check that rules summary is present.
self.assertTrue(self.IsTextPresent("Regex rules"))
# Click on "Run" button
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Created Hunt")
# Close the window and check that the hunt was created.
self.Click("css=button.Next")
# Select newly created cron job.
self.Click("css=grr-hunts-list td:contains('GenericHunt')")
# Check that correct details are displayed in cron job details tab.
self.WaitUntil(self.IsTextPresent, "GenericHunt")
self.WaitUntil(self.IsTextPresent, "Flow args")
self.assertTrue(self.IsTextPresent("Paths"))
self.assertTrue(self.IsTextPresent("/tmp"))
self.assertTrue(self.IsTextPresent("DummyOutputPlugin"))
self.assertTrue(self.IsTextPresent("some regex"))
# Check that the hunt object was actually created
hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
hunts_list = list(hunts_root.OpenChildren())
self.assertEqual(len(hunts_list), 1)
# Check that the hunt was created with a correct flow
hunt = hunts_list[0]
self.assertEqual(hunt.state.args.flow_runner_args.flow_name,
"FileFinder")
self.assertEqual(hunt.state.args.flow_args.paths[0], "/tmp")
self.assertEqual(hunt.state.args.flow_args.pathtype,
rdf_paths.PathSpec.PathType.TSK)
# self.assertEqual(hunt.state.args.flow_args.ignore_errors, True)
self.assertTrue(hunt.state.args.output_plugins[0].plugin_name,
"DummyOutputPlugin")
# Check that hunt was not started
self.assertEqual(hunt.Get(hunt.Schema.STATE), "PAUSED")
# Now try to start the hunt.
self.Click("css=button[name=RunHunt]")
# Note that hunt ACL controls are already tested in acl_manager_test.py.
# Run the hunt.
with self.ACLChecksDisabled():
with aff4.FACTORY.Open(hunt.urn, mode="rw", token=self.token) as hunt:
hunt.Run()
# Check that the hunt was created with correct rules
with self.ACLChecksDisabled():
hunt_rules = self.FindForemanRules(hunt, token=self.token)
self.assertEqual(len(hunt_rules), 1)
self.assertTrue(
abs(int(hunt_rules[0].expires - hunt_rules[0].created) -
31 * 24 * 60 * 60) <= 1)
self.assertEqual(len(hunt_rules[0].regex_rules), 2)
self.assertEqual(hunt_rules[0].regex_rules[0].path, "/")
self.assertEqual(hunt_rules[0].regex_rules[0].attribute_name, "System")
self.assertEqual(hunt_rules[0].regex_rules[0].attribute_regex, "Darwin")
self.assertEqual(hunt_rules[0].regex_rules[1].path, "/")
self.assertEqual(hunt_rules[0].regex_rules[1].attribute_name, "System")
self.assertEqual(hunt_rules[0].regex_rules[1].attribute_regex, "Linux")
self.assertEqual(len(hunt_rules[0].integer_rules), 1)
self.assertEqual(hunt_rules[0].integer_rules[0].path, "/")
self.assertEqual(hunt_rules[0].integer_rules[0].attribute_name, "Clock")
self.assertEqual(hunt_rules[0].integer_rules[0].operator,
rdf_foreman.ForemanAttributeInteger.Operator.GREATER_THAN)
self.assertEqual(hunt_rules[0].integer_rules[0].value, 1336650631137737)
def testOutputPluginsListEmptyWhenNoDefaultOutputPluginSet(self):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# There should be no dummy output plugin visible.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
self.WaitUntilNot(self.IsTextPresent, "Dummy do do")
def testDefaultOutputPluginIsCorrectlyAddedToThePluginsList(self):
with test_lib.ConfigOverrider({
"AdminUI.new_hunt_wizard.default_output_plugin":
"DummyOutputPlugin"}):
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# Dummy output plugin should be added by default.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
self.WaitUntil(self.IsTextPresent, "DummyOutputPlugin")
def testLabelsHuntRuleDisplaysAvailableLabels(self):
with self.ACLChecksDisabled():
with aff4.FACTORY.Open("C.0000000000000001", aff4_type="VFSGRRClient",
mode="rw", token=self.token) as client:
client.AddLabels("foo", owner="owner1")
client.AddLabels("bar", owner="owner2")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to output plugins page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Select 'Clients With Label' rule.
self.Select("css=grr-new-hunt-wizard-form div.Rule select",
"Clients With Label")
# Check that there's an option present for labels 'bar' (this option should
# be selected) and for label 'foo'.
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form div.Rule "
".form-group:has(label:contains('Label')) "
"select option:selected[label=bar]")
self.WaitUntil(self.IsElementPresent,
"css=grr-new-hunt-wizard-form div.Rule "
".form-group:has(label:contains('Label')) "
"select option:not(:selected)[label=foo]")
def testLabelsHuntRuleCreatesForemanRegexRuleInResultingHunt(self):
with self.ACLChecksDisabled():
with aff4.FACTORY.Open("C.0000000000000001", mode="rw",
token=self.token) as client:
client.AddLabels("foo", owner="test")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to the output plugins page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Click 'Next' to go to the hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Select 'Clients With Label' rule.
self.Select("css=grr-new-hunt-wizard-form div.Rule select",
"Clients With Label")
self.Select("css=grr-new-hunt-wizard-form div.Rule "
".form-group:has(label:contains('Label')) select", "foo")
# Click 'Next' to go to the hunt overview page. Check that generated regexp
# is displayed there.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "(.+,|\\A)foo(,.+|\\Z)")
# Click 'Next' to go to submit the hunt and wait until it's created.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Created Hunt")
# Get hunt's rules.
with self.ACLChecksDisabled():
hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
hunts_list = list(hunts_root.OpenChildren(mode="rw"))
hunt = hunts_list[0]
hunt.Run() # Run the hunt so that rules are added to the foreman.
hunt_rules = self.FindForemanRules(hunt, token=self.token)
self.assertEqual(len(hunt_rules), 1)
self.assertEqual(len(hunt_rules[0].regex_rules), 1)
self.assertEqual(hunt_rules[0].regex_rules[0].path, "/")
self.assertEqual(hunt_rules[0].regex_rules[0].attribute_name, "Labels")
self.assertEqual(hunt_rules[0].regex_rules[0].attribute_regex,
"(.+,|\\A)foo(,.+|\\Z)")
def testLabelsHuntRuleMatchesCorrectClients(self):
with self.ACLChecksDisabled():
client_ids = self.SetupClients(10)
with self.ACLChecksDisabled():
with aff4.FACTORY.Open("C.0000000000000001", mode="rw",
token=self.token) as client:
client.AddLabels("foo", owner="owner1")
client.AddLabels("bar", owner="owner2")
with aff4.FACTORY.Open("C.0000000000000007", mode="rw",
token=self.token) as client:
client.AddLabels("bar", owner="GRR")
self.Open("/#main=ManageHunts")
self.Click("css=button[name=NewHunt]")
# Select "List Processes" flow.
self.Click("css=#_Processes > ins.jstree-icon")
self.Click("link=ListProcesses")
# Click 'Next' to go to the output plugins page and then to hunt rules page.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button.Next")
# Select 'Clients With Label' rule.
self.Select("css=grr-new-hunt-wizard-form div.Rule select",
"Clients With Label")
self.Select("css=grr-new-hunt-wizard-form div.Rule "
".form-group:has(label:contains('Label')) select", "foo")
# Click 'Next' to go to hunt overview page. Then click 'Next' to go to
# submit the hunt and wait until it's created.
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.Click("css=grr-new-hunt-wizard-form button.Next")
self.WaitUntil(self.IsTextPresent, "Created Hunt")
with self.ACLChecksDisabled():
hunts_root = aff4.FACTORY.Open("aff4:/hunts", token=self.token)
hunts_list = list(hunts_root.OpenChildren(mode="rw"))
hunt = hunts_list[0]
hunt.Run() # Run the hunt so that rules are added to the foreman.
foreman = aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token)
for client_id in client_ids:
foreman.AssignTasksToClient(client_id)
# Check that hunt flow was started only on labeled clients.
for client_id in client_ids:
flows_count = len(list(aff4.FACTORY.Open(
client_id.Add("flows"), token=self.token).ListChildren()))
if (client_id == rdf_client.ClientURN("C.0000000000000001") or
client_id == rdf_client.ClientURN("C.0000000000000007")):
self.assertEqual(flows_count, 1)
else:
self.assertEqual(flows_count, 0)
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
r"""Functions for exclusive $B\to P\ell^+\ell^-$ decays."""
import flavio
from math import sqrt,pi
from flavio.physics.bdecays.common import lambda_K, beta_l, meson_quark, meson_ff
from flavio.physics import ckm
from flavio.classes import AuxiliaryQuantity
from flavio.config import config
from flavio.physics.running import running
from flavio.physics.common import conjugate_par, conjugate_wc, add_dict
from flavio.physics.bdecays import matrixelements, angular
from flavio.physics.bdecays.wilsoncoefficients import get_wceff, get_wceff_lfv, wctot_dict
from flavio.classes import Observable, Prediction
import warnings
def prefactor(q2, par, B, P):
GF = par['GF']
scale = config['renormalization scale']['bpll']
alphaem = running.get_alpha(par, scale)['alpha_e']
di_dj = meson_quark[(B,P)]
xi_t = ckm.xi('t',di_dj)(par)
return 4*GF/sqrt(2)*xi_t*alphaem/(4*pi)
# form factors
def get_ff(q2, par, B, P):
ff_name = meson_ff[(B,P)] + ' form factor'
return AuxiliaryQuantity[ff_name].prediction(par_dict=par, wc_obj=None, q2=q2)
# get subleading hadronic contribution
def get_subleading(q2, wc_obj, par_dict, B, P, lep, cp_conjugate):
if q2 <= 9:
sub_name = B+'->'+P + 'll subleading effects at low q2'
return AuxiliaryQuantity[sub_name].prediction(par_dict=par_dict, wc_obj=wc_obj, q2=q2, cp_conjugate=cp_conjugate)
elif q2 > 14:
sub_name = B+'->'+P + 'll subleading effects at high q2'
return AuxiliaryQuantity[sub_name].prediction(par_dict=par_dict, wc_obj=wc_obj, q2=q2, cp_conjugate=cp_conjugate)
else:
return {}
def helicity_amps_ff(q2, wc_obj, par_dict, B, P, lep, cp_conjugate):
par = par_dict.copy()
if cp_conjugate:
par = conjugate_par(par)
scale = config['renormalization scale']['bpll']
label = meson_quark[(B,P)] + lep + lep # e.g. bsmumu, bdtautau
wc = wctot_dict(wc_obj, label, scale, par)
if cp_conjugate:
wc = conjugate_wc(wc)
wc_eff = get_wceff(q2, wc, par, B, P, lep, scale)
ml = par['m_'+lep]
mB = par['m_'+B]
mP = par['m_'+P]
mb = running.get_mb(par, scale)
N = prefactor(q2, par, B, P)
ff = get_ff(q2, par, B, P)
h = angular.helicity_amps_p(q2, mB, mP, mb, 0, ml, ml, ff, wc_eff, N)
return h
def helicity_amps(q2, wc_obj, par, B, P, lep):
if q2 >= 8.7 and q2 < 14 and lep != 'tau':
warnings.warn("The predictions in the region of narrow charmonium resonances are not meaningful")
return add_dict((
helicity_amps_ff(q2, wc_obj, par, B, P, lep, cp_conjugate=False),
get_subleading(q2, wc_obj, par, B, P, lep, cp_conjugate=False)
))
def helicity_amps_bar(q2, wc_obj, par, B, P, lep):
if q2 >= 8.7 and q2 < 14 and lep != 'tau':
warnings.warn("The predictions in the region of narrow charmonium resonances are not meaningful")
return add_dict((
helicity_amps_ff(q2, wc_obj, par, B, P, lep, cp_conjugate=True),
get_subleading(q2, wc_obj, par, B, P, lep, cp_conjugate=True)
))
def bpll_obs(function, q2, wc_obj, par, B, P, lep):
ml = par['m_'+lep]
mB = par['m_'+B]
mP = par['m_'+P]
if q2 <= (ml+ml)**2 or q2 > (mB-mP)**2:
return 0
scale = config['renormalization scale']['bpll']
mb = running.get_mb(par, scale)
h = helicity_amps(q2, wc_obj, par, B, P, lep)
J = angular.angularcoeffs_general_p(h, q2, mB, mP, mb, 0, ml, ml)
if lep == lep:
h_bar = helicity_amps_bar(q2, wc_obj, par, B, P, lep)
J_bar = angular.angularcoeffs_general_p(h_bar, q2, mB, mP, mb, 0, ml, ml)
else:
# for LFV decays, don't bother about the CP average. There is no strong phase.
J_bar = J
return function(J, J_bar)
def dGdq2(J):
return 2 * (J['a'] + J['c']/3.)
def AFB_num(J):
return J['b']
def FH_num(J):
return 2 * (J['a'] + J['c'])
def dGdq2_cpaverage(J, J_bar):
return (dGdq2(J) + dGdq2(J_bar))/2.
def dGdq2_cpdiff(J, J_bar):
return (dGdq2(J) - dGdq2(J_bar))/2.
def AFB_cpaverage_num(J, J_bar):
return (AFB_num(J) + AFB_num(J_bar))/2.
def FH_cpaverage_num(J, J_bar):
return (FH_num(J) + FH_num(J_bar))/2.
def bpll_obs_int(function, q2min, q2max, wc_obj, par, B, P, lep, epsrel=0.005):
def obs(q2):
return bpll_obs(function, q2, wc_obj, par, B, P, lep)
return flavio.math.integrate.nintegrate(obs, q2min, q2max, epsrel=epsrel)
def bpll_dbrdq2(q2, wc_obj, par, B, P, lep):
tauB = par['tau_'+B]
dBR = tauB * bpll_obs(dGdq2_cpaverage, q2, wc_obj, par, B, P, lep)
if P == 'pi0':
# factor of 1/2 for neutral pi due to pi = (uubar-ddbar)/sqrt(2)
return dBR / 2.
else:
return dBR
def bpll_dbrdq2_int(q2min, q2max, wc_obj, par, B, P, lep, epsrel=0.005):
def obs(q2):
return bpll_dbrdq2(q2, wc_obj, par, B, P, lep)
return flavio.math.integrate.nintegrate(obs, q2min, q2max, epsrel=epsrel)/(q2max-q2min)
# Functions returning functions needed for Prediction instances
def bpll_dbrdq2_int_func(B, P, lep):
def fct(wc_obj, par, q2min, q2max):
return bpll_dbrdq2_int(q2min, q2max, wc_obj, par, B, P, lep)
return fct
def bpll_dbrdq2_tot_func(B, P, lep):
def fct(wc_obj, par):
mB = par['m_'+B]
mP = par['m_'+P]
ml = par['m_'+lep]
ml = par['m_'+lep]
q2max = (mB-mP)**2
q2min = (ml+ml)**2
return bpll_dbrdq2_int(q2min, q2max, wc_obj, par, B, P, lep)*(q2max-q2min)
return fct
def bpll_dbrdq2_func(B, P, lep):
def fct(wc_obj, par, q2):
return bpll_dbrdq2(q2, wc_obj, par, B, P, lep)
return fct
def bpll_obs_int_ratio_func(func_num, func_den, B, P, lep):
def fct(wc_obj, par, q2min, q2max):
num = bpll_obs_int(func_num, q2min, q2max, wc_obj, par, B, P, lep)
if num == 0:
return 0
den = bpll_obs_int(func_den, q2min, q2max, wc_obj, par, B, P, lep)
return num/den
return fct
def bpll_obs_int_ratio_leptonflavour(func, B, P, lnum, lden):
def fct(wc_obj, par, q2min, q2max):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="The QCDF corrections should not be trusted .*")
num = bpll_obs_int(func, q2min, q2max, wc_obj, par, B, P, lnum, epsrel=0.0005)
if num == 0:
return 0
denom = bpll_obs_int(func, q2min, q2max, wc_obj, par, B, P, lden, epsrel=0.0005)
return num/denom
return fct
def bpll_obs_ratio_leptonflavour(func, B, P, lnum, lden):
def fct(wc_obj, par, q2):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="The QCDF corrections should not be trusted .*")
num = bpll_obs(func, q2, wc_obj, par, B, P, lnum)
if num == 0:
return 0
denom = bpll_obs(func, q2, wc_obj, par, B, P, lden)
return num/denom
return fct
def bpll_obs_ratio_func(func_num, func_den, B, P, lep):
def fct(wc_obj, par, q2):
num = bpll_obs(func_num, q2, wc_obj, par, B, P, lep)
if num == 0:
return 0
den = bpll_obs(func_den, q2, wc_obj, par, B, P, lep)
return num/den
return fct
# Observable and Prediction instances
_tex = {'e': 'e', 'mu': '\mu', 'tau': r'\tau'}
_observables = {
'ACP': {'func_num': dGdq2_cpdiff, 'tex': r'A_\text{CP}', 'desc': 'Direct CP asymmetry'},
'AFB': {'func_num': AFB_cpaverage_num, 'tex': r'A_\text{FB}', 'desc': 'forward-backward asymmetry'},
'FH': {'func_num': FH_cpaverage_num, 'tex': r'F_H', 'desc': 'flat term'},
}
_hadr = {
'B0->K': {'tex': r"B^0\to K^0", 'B': 'B0', 'P': 'K0', },
'B+->K': {'tex': r"B^\pm\to K^\pm ", 'B': 'B+', 'P': 'K+', },
}
_hadr_lfv = {
'B0->K': {'tex': r"\bar B^0\to \bar K^0", 'B': 'B0', 'P': 'K0', },
'B+->K': {'tex': r"B^-\to K^-", 'B': 'B+', 'P': 'K+', },
'B0->pi': {'tex': r"\bar B^0\to \pi^0", 'B': 'B0', 'P': 'pi0', },
'B+->pi': {'tex': r"B^-\to \pi^-", 'B': 'B+', 'P': 'pi+', },
}
_tex_lfv = {'emu': r'e^+\mu^-', 'mue': r'\mu^+e^-',
'taue': r'\tau^+e^-', 'etau': r'e^+\tau^-',
'taumu': r'\tau^+\mu^-', 'mutau': r'\mu^+\tau^-',
'emu,mue': r'e^\pm\mu^\mp', 'etau,taue': r'e^\pm\tau^\mp',
'mutau,taumu': r'\mu^\pm\tau^\mp'}
for l in ['e', 'mu', 'tau']:
for M in _hadr.keys():
_process_tex = _hadr[M]['tex'] +_tex[l]+r"^+"+_tex[l]+r"^-"
_process_taxonomy = r'Process :: $b$ hadron decays :: FCNC decays :: $B\to P\ell^+\ell^-$ :: $' + _process_tex + r"$"
for obs in sorted(_observables.keys()):
_obs_name = "<" + obs + ">("+M+l+l+")"
_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])
_obs.set_description('Binned ' + _observables[obs]['desc'] + r" in $" + _process_tex + r"$")
_obs.tex = r"$\langle " + _observables[obs]['tex'] + r"\rangle(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, bpll_obs_int_ratio_func(_observables[obs]['func_num'], dGdq2_cpaverage, _hadr[M]['B'], _hadr[M]['P'], l))
_obs_name = obs + "("+M+l+l+")"
_obs = Observable(name=_obs_name, arguments=['q2'])
_obs.set_description(_observables[obs]['desc'][0].capitalize() + _observables[obs]['desc'][1:] + r" in $" + _process_tex + r"$")
_obs.tex = r"$" + _observables[obs]['tex'] + r"(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, bpll_obs_ratio_func(_observables[obs]['func_num'], dGdq2_cpaverage, _hadr[M]['B'], _hadr[M]['P'], l))
# binned branching ratio
_obs_name = "<dBR/dq2>("+M+l+l+")"
_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])
_obs.set_description(r"Binned differential branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\langle \frac{d\text{BR}}{dq^2} \rangle(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, bpll_dbrdq2_int_func(_hadr[M]['B'], _hadr[M]['P'], l))
# differential branching ratio
_obs_name = "dBR/dq2("+M+l+l+")"
_obs = Observable(name=_obs_name, arguments=['q2'])
_obs.set_description(r"Differential branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\frac{d\text{BR}}{dq^2}(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, bpll_dbrdq2_func(_hadr[M]['B'], _hadr[M]['P'], l))
# only for tau: total branching ratio
if l == 'tau':
_obs_name = "BR("+M+l+l+")"
_obs = Observable(name=_obs_name)
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, bpll_dbrdq2_tot_func(_hadr[M]['B'], _hadr[M]['P'], l))
# Lepton flavour ratios
for l in [('mu','e'), ('tau','mu'),]:
for M in _hadr.keys():
# binned ratio of BRs
_obs_name = "<R"+l[0]+l[1]+">("+M+"ll)"
_obs = Observable(name=_obs_name, arguments=['q2min', 'q2max'])
_obs.set_description(r"Ratio of partial branching ratios of $" + _hadr[M]['tex'] +_tex[l[0]]+r"^+ "+_tex[l[0]]+r"^-$" + " and " + r"$" + _hadr[M]['tex'] +_tex[l[1]]+r"^+ "+_tex[l[1]]+"^-$")
_obs.tex = r"$\langle R_{" + _tex[l[0]] + ' ' + _tex[l[1]] + r"} \rangle(" + _hadr[M]['tex'] + r"\ell^+\ell^-)$"
for li in l:
# add taxonomy for both processes (e.g. B->Pee and B->Pmumu)
_obs.add_taxonomy(r'Process :: $b$ hadron decays :: FCNC decays :: $B\to P\ell^+\ell^-$ :: $' + _hadr[M]['tex'] +_tex[li]+r"^+"+_tex[li]+r"^-$")
Prediction(_obs_name, bpll_obs_int_ratio_leptonflavour(dGdq2_cpaverage, _hadr[M]['B'], _hadr[M]['P'], *l))
# differential ratio of BRs
_obs_name = "R"+l[0]+l[1]+"("+M+"ll)"
_obs = Observable(name=_obs_name, arguments=['q2'])
_obs.set_description(r"Ratio of differential branching ratios of $" + _hadr[M]['tex'] +_tex[l[0]]+r"^+ "+_tex[l[0]]+r"^-$" + " and " + r"$" + _hadr[M]['tex'] +_tex[l[1]]+r"^+ "+_tex[l[1]]+"^-$")
_obs.tex = r"$R_{" + _tex[l[0]] + ' ' + _tex[l[1]] + r"}(" + _hadr[M]['tex'] + r"\ell^+\ell^-)$"
for li in l:
# add taxonomy for both processes (e.g. B->Pee and B->Pmumu)
_obs.add_taxonomy(r'Process :: $b$ hadron decays :: FCNC decays :: $B\to P\ell^+\ell^-$ :: $' + _hadr[M]['tex'] +_tex[li]+r"^+"+_tex[li]+r"^-$")
Prediction(_obs_name, bpll_obs_ratio_leptonflavour(dGdq2_cpaverage, _hadr[M]['B'], _hadr[M]['P'], *l))
|
|
import locale
from dialog import Dialog
locale.setlocale(locale.LC_ALL, '')
import sys, os
import glob
import re
import subprocess
from time import sleep
import shutil
import configparser
import logging
import logging.handlers
from .interface import *
from .mount import *
from .wimlib import wiminfo
logger = logging.getLogger()
import urllib.request
from urllib.error import HTTPError, URLError
import socket
config_url = 'http://10.10.200.1/linux/kiwi/kiwi.conf'
config_timeout=0.1
class FailedInstallStep(Exception): pass
class WindowsInstallApp(object):
def __init__(self, config=None):
self.logger = logging.getLogger(__name__)
self.config = config
self.uefi = False
self.boot_part = ''
self.system_part = ''
self.image_path = ''
self.image_index = ''
self.boot_dir = '/mnt/boot'
self.system_dir = '/mnt/system'
self.mbr_disk_signature = '4D34B30F'
self.gpt_disk_signature = '572BD0E9-D39E-422C-82E6-F37157C3535D'
self.boot_partuuid = '8d03c7bb-6b0c-4223-aaa1-f20bf521cd6e'
self.system_partuuid = '57092450-f142-4749-b540-f2ec0a183b7b'
self.cluster_size = 4096
self.fs_compression = False
self.quick_format = True
self.d = Dialog(dialog='dialog')
self.d.set_background_title('KiWI: Killer Windows Installer')
self.source_dir = '/mnt/source/'
advanced_items = [
('Filesystem options', MenuItem(self.fs_options)),
]
advanced_submenu = Menu(self.d, advanced_items, title='Advanced Options')
main_menu_items = [
('Configure Networking', MenuItem(self.configure_network)),
('Prepare Storage Device', MenuItem(self.auto_prepare)),
('Select Installation Source', MenuItem(self.prepare_source)),
('Install OS', MenuItem(self.install_os)),
#('Install Bootloader', self.install_bootloader),
('Reboot', MenuItem(self.reboot)),
('---', MenuItem(separator=True)),
('Advanced Options', advanced_submenu),
]
self.running = True
self.main_menu = StatefulMenu(self.d, main_menu_items, title='Main Menu')
while self.running: self.main_menu.run(ret=self.exit())
def sync(self):
self.d.infobox('Syncing buffered data\n\nDo NOT reboot!', width=30)
subprocess.check_call(['sync'])
def reboot(self):
self.sync()
subprocess.check_call(['reboot'])
def fs_options(self):
choices = [
('Quick Format', '', 'quick_format'),
('NTFS Compression', '', 'fs_compression'),
('Force GPT/EFI', '', 'uefi'),
]
code, selected = self.d.checklist('Filesystem Options', choices=[
(choice[0], choice[1], getattr(self, choice[2])) for choice in choices],
cancel_label='Back')
if code != self.d.OK: return
for item in choices:
tag = item[0]
var_name = item[2]
if tag in selected: setattr(self, var_name, True)
else: setattr(self, var_name, False)
def test_network(self):
return True if subprocess.call(
['ping', '-c 2', '-i 0.2', '8.8.8.8'],
stdout=subprocess.PIPE) == 0 else False
def configure_network(self):
if not self.test_network():
rc = subprocess.call('nmtui', shell=True)
else:
self.d.msgbox('Network Configuration Successful', width=40, title='Network Status')
self.main_menu.advance()
def detect_blockdevs(self):
devices = []
p = subprocess.run(['lsblk', '-Ppd'], stdout=subprocess.PIPE)
for line in p.stdout.decode('UTF-8').split('\n'):
dev = {}
for p in line.split():
pair = p.split('=')
dev[pair[0]] = pair[1][1:-1]
# We don't need read-only devices
if 'RO' not in dev or dev['RO'] == '1': continue
devices.append(dev)
self.d.msgbox('Detected Devices:\n\n' + '\n'.join(
[' '.join([dev['NAME'], dev['SIZE']]) for dev in devices]))
self.devices = devices
def select_disk(self):
self.detect_blockdevs()
entries = [tuple([device['NAME'], '-']) for device in self.devices] + [('OTHER', '+')]
code, tag = self.d.menu('Choose an installation drive', choices=entries)
if code != self.d.OK: raise FailedInstallStep
if tag == 'OTHER':
code, tag = self.d.inputbox('Enter a path to a block device')
if code != self.d.OK:
raise FailedInstallStep
if not os.path.isfile(tag):
raise FailedInstallStep
import stat
mode = os.stat(tag).st_mode
if not stat.S_ISBLK(mode):
raise FailedInstallStep
code, confirmation = self.d.inputbox('This will erase ALL data on %s' % tag + \
'\n\nType \'YES\' to continue', width=40, height=15)
if code != self.d.OK or confirmation != 'YES': raise FailedInstallStep
self.install_drive = tag
self.logger.info('Block device {} selected for installation'.format(self.install_drive))
def supports_uefi(self):
p = subprocess.Popen(['efivar', '-l'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
uefi = True if p.returncode == 0 else False
return uefi
def auto_prepare(self):
try:
self.select_disk()
except FailedInstallStep:
self.d.msgbox('Disk selection failed. Please retry the step to prepare the storage device.', width=40)
raise
for dir in [self.system_dir, self.boot_dir]:
if mountpoint(dir): unmount(dir)
partitions = glob.glob(self.install_drive + '+.')
for part in partitions:
logger.debug('Unmounting partition {}'.format(part))
try: unmount(part)
except subprocess.CalledProcessError: pass
self.auto_partition()
self.auto_format()
self.main_menu.advance()
def auto_partition(self):
if self.uefi is False:
self.uefi = self.supports_uefi()
else: uefi_forced = True
if self.uefi and not uefi_forced:
self.logger.info('Detected machine booted with UEFI, using GPT')
elif self.uefi and uefi_forced:
self.logger.info('UEFI install forced, using GPT')
else: self.logger.info('UEFI not supported, creating DOS partition table')
partition_table = 'msdos' if not self.uefi else 'gpt'
try:
subprocess.check_call(['parted', '-s', self.install_drive,
'mklabel', partition_table])
if self.uefi:
subprocess.check_call(['parted', '--align', 'optimal',
'-s', self.install_drive, '--',
'mkpart', 'ESP', 'fat32', '0%s', '512',
'mkpart', 'Windows', 'NTFS', '512', '100%',
'set', '1', 'esp', 'on'])
else:
subprocess.check_call(['parted', '-s', self.install_drive, '--',
'mkpart', 'primary', 'NTFS', '2048s', '-1s',
'set', '1', 'boot', 'on'])
except subprocess.CalledProcessError:
self.d.msgbox('Partitioning/formatting failed. Please retry.', width=40)
raise FailedInstallStep
if self.uefi:
self.boot_part = self.install_drive + '1'
self.system_part = self.install_drive + '2'
else:
self.system_part = self.install_drive + '1'
def auto_format(self):
call = ['mkfs.ntfs']
call.append('-c')
call.append(str(self.cluster_size))
if self.fs_compression: call.append('-C')
if self.quick_format: call.append('-Q')
call.append(self.system_part)
try:
subprocess.check_call(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.uefi: subprocess.check_call(['mkfs.msdos', '-F32', self.boot_part],
stdout = subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
raise FailedInstallStep
self.d.infobox('Formatting drive...')
self.logger.info('Sucessfully formatted installation drive')
def prepare_source(self):
if not self.test_network():
self.configure_network()
source_items = [
('Network Filesystem (NFS)', MenuItem(self.prepare_nfs_source)),
('Windows Share (SMB/CIFS)', MenuItem(self.prepare_smb_source)),
#('Network Block Device (NBD)', MenuItem()),
('SCP/SFTP (SSH)', MenuItem(self.prepare_sshfs_source)),
('Block Device (USB, CD/DVD, etc.)', self.prepare_blk_source),
('---', MenuItem(separator=True)),
('OTHER (Path)', MenuItem(self.prepare_fs_source)),
]
try:
Menu(self.d, source_items, 'Select Installation Source', ret=None).run()
self.select_source()
except FailedInstallStep: raise
except subprocess.CalledProcessError:
self.d.msgbox('Mount Failed. Please retry the installation source selection step')
def prepare_nfs_source(self):
code, path = self.d.inputbox('Enter an NFS server or share',
init=self.config.get('source', 'default_nfs', fallback=''), width=40)
mount(path, self.source_dir, force=True, mkdir=True)
def prepare_smb_source(self):
code, path = self.d.inputbox(
'Enter an SMB share path in the format \'user@//server/share\'', width=40)
user, passwd, cred = '', '', ''
if '@' in path:
user, path = path.split('@')
code, passwd = self.d.passwordbox(
'Enter the share password, if applicable', width=40)
cred = 'password={},'.format(passwd)
if user: cred += 'username={},'.format(user)
mount(path, self.source_dir, options=cred, force=True, mkdir=True, fs_type='cifs')
def prepare_fs_source(self):
code, path = self.d.inputbox('Enter a UNIX path', width=40)
mount(path, self.source_dir, force=True, mkdir=True, bind=True)
def prepare_sshfs_source(self):
code, path = self.d.inputbox('Enter an SSHFS path, in the format user@server:/', width=40)
code, passwd = self.d.passwordbox('Enter the password', width=40)
try: os.makedirs(self.source_dir)
except FileExistsError: pass
if mountpoint(self.source_dir): unmount(self.source_dir)
disable_hostkey_check = ['-o', 'StrictHostKeyChecking=no']
call = ['sshfs', path, self.source_dir, '-o', 'password_stdin']
call += disable_hostkey_check
p = subprocess.Popen(call, stdin=subprocess.PIPE, stdout=open('/dev/null', 'w'))
p.communicate(input=passwd.encode('UTF-8'))
if p.returncode != 0: raise subprocess.CalledProcessError
def prepare_blk_source(self):
code, path = self.d.inputbox('Enter a block device path', width=40)
mount(path, self.source_dir, force=True, mkdir=True)
def select_source(self):
discovered_wims = glob.glob(self.source_dir + '**/*.wim', recursive=True)
discovered_wims += glob.glob(self.source_dir + '**/.esd', recursive=True)
#discovered_isos = glob.glob(self.source_dir + '**/.iso', recursive=True)
if not discovered_wims: # or discovered_isos:
self.d.msgbox('Failed to locate install sources. Check your media, and try again.', width=40)
raise FailedInstallStep
entries = [tuple([wim, '-']) for wim in discovered_wims]
code, tag = self.d.menu('Choose a WIM', choices=entries)
if code == self.d.OK: self.image_path = tag
else: raise FailedInstallStep
try:
entries = [
tuple([
image['Index'],
# Not every WIM has 'Display Name' defined
image.get('Display Name') or image.get('Description') + ' ' +
image.get('Architecture')
]) for image in wiminfo(self.image_path)]
except subprocess.CalledProcessError:
self.d.msgbox('Image is invalid or corrupt. Please retry the installation source step.', width=40)
raise FailedInstallStep
code, tag = self.d.menu('Choose an image', choices=entries, width=40)
if code == self.d.OK: self.image_index = tag
else: raise FailedInstallStep
self.main_menu.advance()
def install_os(self):
if not self.system_part:
self.auto_prepare()
self.main_menu.position -= 1
if not (self.image_path and self.image_index):
self.prepare_source()
self.main_menu.position -= 1
self.extract_wim(self.image_path, self.image_index, self.system_part)
self.sync()
self.install_bootloader()
self.main_menu.advance()
def extract_wim(self, wimfile, imageid, target):
r, w = os.pipe()
process = subprocess.Popen(['wimlib-imagex', 'apply', wimfile, imageid, target],
stdout=w, stderr=w)
filp = os.fdopen(r)
self.logger.info('Applying WIM...')
while True:
line = filp.readline()
self.logger.debug('Discarding line from WIM STDOUT: {}'.format(line))
if 'Creating files' in line: break
for stage in ['Creating files', 'Extracting file data']:
self.d.gauge_start(text=stage, width=80, percent=0)
while(True):
line = filp.readline()
self.logger.debug('Wim extraction STDOUT: {}'.format(line))
if stage not in line: continue
pct = re.search(r'\d+%', line).group(0)[:-1]
if pct:
self.d.gauge_update(int(pct))
if pct == '100': break
exit_code = self.d.gauge_stop()
def ntfs_hide(self, path):
subprocess.check_call(['setfattr', '-h', '-v', '0x02000000',
'-n', 'system.ntfs_attrib', path])
def install_bootloader(self):
from . import BCD
mount(self.system_part, self.system_dir, mkdir=True)
if not self.uefi:
self.write_mbr()
shutil.copytree(
os.path.join(self.system_dir, 'Windows/Boot/PCAT'),
os.path.join(self.system_dir, 'Boot'))
shutil.copy2(
os.path.join(self.system_dir, 'Boot/bootmgr'), self.system_dir)
for file in ['Boot', 'bootmgr']:
self.ntfs_hide(os.path.join(self.system_dir, file))
BCD.write_bcd(BCD.bios_bcd, os.path.join(self.system_dir, 'Boot/BCD'))
else:
mount(self.boot_part, self.boot_dir, mkdir=True)
subprocess.check_call(['sgdisk', self.install_drive,
'-U', self.gpt_disk_signature,
'-u 1:' + self.boot_partuuid,
'-u 2:' + self.system_partuuid])
for dir in ['Boot', 'Microsoft']:
os.makedirs(os.path.join(self.boot_dir, 'EFI/' + dir))
shutil.copytree(
os.path.join(self.system_dir, 'Windows/Boot/EFI'),
os.path.join(self.boot_dir, 'EFI/Microsoft/Boot'))
shutil.copyfile(
os.path.join(self.boot_dir, 'EFI/Microsoft/Boot/bootmgfw.efi'),
os.path.join(self.boot_dir, 'EFI/Boot/bootx64.efi'))
BCD.write_bcd(BCD.uefi_bcd,
os.path.join(self.boot_dir, 'EFI/Microsoft/Boot/BCD'))
def write_mbr(self):
subprocess.check_call(['ms-sys', '-S', self.mbr_disk_signature, '-7', self.install_drive],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.logger.info('MBR written to {}'.format(self.install_drive))
def exit(self):
self.running = False
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.critical('Unhandled exception', exc_info=(exc_type, exc_value, exc_traceback))
import sys
sys.excepthook = handle_exception
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler('/tmp/kiwi-install.log')
logger.addHandler(fh)
configdata = None
try:
configdata = urllib.request.urlopen(config_url, timeout=config_timeout).read().decode('UTF-8')
except (HTTPError, URLError):
logger.warning('Unable to fetch config file from URL {}'.format(config_url))
except socket.timeout:
logger.warning('Socket timed out while trying to fetch config')
config = configparser.ConfigParser()
config.read_string(configdata)
app = WindowsInstallApp(config)
|
|
#
# Copyright (C) 2017 Dominik Murzynowski
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from unittest.mock import patch
from pycnic.errors import HTTP_400, HTTP_404
from api.entity_type import EntityTypeHandler
from api.meta_attribute import MetaAttributeHandler
from api.series_attribute import SeriesAttributeHandler
from api.tag_attribute import TagAttributeHandler
from api.tree import EntityHandler
from database.helpers import get_all
from database.model import EntityType, TagAttribute, MetaAttribute, SeriesAttribute, Entity
from .test_utils import Session, AbstractTestWithDatabase, get_handler
@patch('api.entity_type.Session', new=Session)
class TestEntityTypePostHandler(AbstractTestWithDatabase):
def test_post_creates_object(self):
result = get_handler(EntityTypeHandler, {'name': 'foo', 'tags': [], 'series': [], 'meta': []}).post()
self.assertEqual({'success': True, 'ID': 1}, result)
self.assertEqual(len(get_all(Session(), EntityType)), 1)
self.assertEqual(len(get_all(Session(), TagAttribute)), 0)
self.assertEqual(len(get_all(Session(), MetaAttribute)), 0)
self.assertEqual(len(get_all(Session(), SeriesAttribute)), 0)
def test_post_with_attributes_creates_objects(self):
result = get_handler(EntityTypeHandler, {'name': 'foo', 'tags': ['tag1'], 'series': ['series1'],
'meta': ['meta1']}).post()
self.assertEqual({'success': True, 'ID': 1}, result)
self.assertEqual(len(get_all(Session(), EntityType)), 1)
self.assertEqual(len(get_all(Session(), TagAttribute)), 1)
self.assertEqual(len(get_all(Session(), MetaAttribute)), 1)
self.assertEqual(len(get_all(Session(), SeriesAttribute)), 1)
def test_invalid_request_raises(self):
handler = get_handler(EntityTypeHandler)
with self.assertRaises(HTTP_400):
handler.request._data = {'name': 'foo', 'tags': [], 'series': []} # no meta
handler.post()
with self.assertRaises(HTTP_400):
handler.request._data = {'name': 'foo', 'tags': [], 'meta': []} # no series
handler.post()
with self.assertRaises(HTTP_400):
handler.request._data = {'name': 'foo', 'series': [], 'meta': []} # no tags
handler.post()
with self.assertRaises(HTTP_400):
handler.request._data = {'tags': [], 'series': [], 'meta': []} # no name
handler.post()
self.assertEqual(len(get_all(Session(), EntityType)), 0)
@patch('api.meta_attribute.Session', new=Session)
@patch('api.validators.Session', new=Session)
class TestMetaAttributePostHandler(AbstractTestWithDatabase):
def setUp(self):
super().setUp()
# create entity type
with patch('api.entity_type.Session', new=Session):
get_handler(EntityTypeHandler, {'name': 'foo', 'tags': [], 'series': [], 'meta': ['meta_name']}).post()
def test_post_creates_object(self):
self.assertEqual(len(get_all(Session(), MetaAttribute)), 1)
result = get_handler(MetaAttributeHandler, {'name': 'foo'}).post(1)
self.assertEqual({'success': True, 'ID': 2}, result)
self.assertEqual(len(get_all(Session(), MetaAttribute)), 2)
def test_invalid_request_raises(self):
self.assertEqual(len(get_all(Session(), MetaAttribute)), 1)
handler = get_handler(MetaAttributeHandler)
with self.assertRaises(HTTP_400):
handler.request._data = {} # no name
handler.post(1)
with self.assertRaises(HTTP_400):
handler.request._data = {'name': 'meta_name'} # existing name
handler.post(1)
with self.assertRaises(HTTP_404):
handler.request._data = {'name': 'foo'}
handler.post(2) # non-existing entity type
self.assertEqual(len(get_all(Session(), MetaAttribute)), 1)
@patch('api.tag_attribute.Session', new=Session)
@patch('api.validators.Session', new=Session)
class TestTagAttributePostHandler(AbstractTestWithDatabase):
def setUp(self):
super().setUp()
# create entity type
with patch('api.entity_type.Session', new=Session):
get_handler(EntityTypeHandler, {'name': 'foo', 'tags': ['tag_name'], 'series': [], 'meta': []}).post()
def test_post_creates_object(self):
self.assertEqual(len(get_all(Session(), TagAttribute)), 1)
result = get_handler(TagAttributeHandler, {'name': 'foo'}).post(1)
self.assertEqual({'success': True, 'ID': 2}, result)
self.assertEqual(len(get_all(Session(), TagAttribute)), 2)
def test_invalid_request_raises(self):
self.assertEqual(len(get_all(Session(), TagAttribute)), 1)
handler = get_handler(TagAttributeHandler)
with self.assertRaises(HTTP_400):
handler.request._data = {} # no name
handler.post(1)
with self.assertRaises(HTTP_400):
handler.request._data = {'name': 'tag_name'} # existing name
handler.post(1)
with self.assertRaises(HTTP_404):
handler.request._data = {'name': 'foo'}
handler.post(2) # non-existing entity type
self.assertEqual(len(get_all(Session(), TagAttribute)), 1)
@patch('api.series_attribute.Session', new=Session)
@patch('api.validators.Session', new=Session)
class TestSeriesAttributePostHandler(AbstractTestWithDatabase):
def setUp(self):
super().setUp()
# create entity type
with patch('api.entity_type.Session', new=Session):
get_handler(EntityTypeHandler, {'name': 'foo', 'tags': [], 'series': ['series_name'], 'meta': []}).post()
def test_post_creates_object(self):
self.assertEqual(len(get_all(Session(), SeriesAttribute)), 1)
result = get_handler(SeriesAttributeHandler, {'name': 'foo'}).post(1)
self.assertEqual({'success': True, 'ID': 2}, result)
self.assertEqual(len(get_all(Session(), SeriesAttribute)), 2)
# post with more params
result = get_handler(SeriesAttributeHandler, {'name': 'foo_prim', 'type': 'enum', 'refresh_time': 3600}).post(1)
self.assertEqual({'success': True, 'ID': 3}, result)
self.assertEqual(len(get_all(Session(), SeriesAttribute)), 3)
def test_invalid_request_raises(self):
self.assertEqual(len(get_all(Session(), SeriesAttribute)), 1)
handler = get_handler(SeriesAttributeHandler)
with self.assertRaises(HTTP_400):
handler.request._data = {} # no name
handler.post(1)
with self.assertRaises(HTTP_400):
handler.request._data = {'name': 'foo', 'type': 'nop'} # bad type
handler.post(1)
with self.assertRaises(HTTP_400):
handler.request._data = {'name': 'foo', 'refresh_time': '2'} # bad refresh time
handler.post(1)
with self.assertRaises(HTTP_400):
handler.request._data = {'name': 'series_name'} # existing name
handler.post(1)
with self.assertRaises(HTTP_404):
handler.request._data = {'name': 'foo'}
handler.post(2) # non-existing entity type
self.assertEqual(len(get_all(Session(), SeriesAttribute)), 1)
@patch('api.tree.Session', new=Session)
class TestEntityPostHandler(AbstractTestWithDatabase):
def setUp(self):
super().setUp()
# create entity type
with patch('api.entity_type.Session', new=Session):
get_handler(EntityTypeHandler, {'name': 'foo', 'tags': ['t'], 'series': ['s'], 'meta': ['m']}).post()
def test_post_creates_object(self):
result = get_handler(EntityHandler, {'parent_id': None, 'entity_type_id': 1, 'tag_1': 'tag_val',
'meta_1': 'meta_val'}).post()
self.assertEqual({'success': True, 'ID': 1}, result)
self.assertEqual(len(get_all(Session(), Entity)), 1)
result = get_handler(EntityHandler, {'parent_id': 1, 'entity_type_id': 1, 'tag_1': 'tag_val_1',
'meta_1': 'meta_val'}).post()
self.assertEqual({'success': True, 'ID': 2}, result)
self.assertEqual(len(get_all(Session(), Entity)), 2)
def test_invalid_request_raises(self):
handler = get_handler(EntityHandler)
with self.assertRaises(HTTP_400):
handler.request._data = {'parent_id': None, 'entity_type_id': 1, 'tag_1': 'tag_val'} # no meta_1
handler.post()
with self.assertRaises(HTTP_400):
handler.request._data = {'parent_id': None, 'entity_type_id': 1, 'meta_1': 'meta_val'} # no tag_1
handler.post()
with self.assertRaises(HTTP_400):
handler.request._data = {'parent_id': None, 'entity_type_id': 1, 'tag_1': 't', 'meta_1': 'm',
'tag_10': 'f'} # unexpected tag_10
handler.post()
with self.assertRaises(HTTP_400):
handler.request._data = {'parent_id': None, 'entity_type_id': 1, 'tag_1': 't', 'meta_1': 'm',
'meta_10': 'f'} # unexpected meta_10
handler.post()
with self.assertRaises(HTTP_400):
handler.request._data = {'parent_id': None, 'tag_1': 't', 'meta_1': 'm'} # no entity type
handler.post()
with self.assertRaises(HTTP_400):
handler.request._data = {'entity_type_id': 1, 'tag_1': 't', 'meta_1': 'm'} # no parent_id
handler.post()
with self.assertRaises(HTTP_404):
handler.request._data = {'parent_id': 10, 'entity_type_id': 1,
'tag_1': 't', 'meta_1': 'm'} # non-existent parent id
handler.post()
with self.assertRaises(HTTP_404):
handler.request._data = {'parent_id': None, 'entity_type_id': 10} # non-existent entity id
handler.post()
self.assertEqual(len(get_all(Session(), Entity)), 0)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import types as python_types
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import activations
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras._impl.keras.utils.generic_utils import func_dump
from tensorflow.python.keras._impl.keras.utils.generic_utils import func_load
from tensorflow.python.keras._impl.keras.utils.generic_utils import has_arg
from tensorflow.python.layers import core as tf_core_layers
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to a LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
def compute_mask(self, inputs, mask=None):
return K.any(K.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
K.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
return inputs * K.cast(boolean_mask, inputs.dtype)
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Dropout(tf_core_layers.Dropout, Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
self.supports_masking = True
# Inheritance call order:
# 1) tf.layers.Dropout, 2) keras.layers.Layer, 3) tf.layers.Layer
super(Dropout, self).__init__(rate=rate,
noise_shape=noise_shape,
seed=seed,
**kwargs)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
output = super(Dropout, self).call(inputs, training=training)
if training is K.learning_phase():
output._uses_learning_phase = True # pylint: disable=protected-access
return output
def get_config(self):
config = {'rate': self.rate}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
Input shape:
3D tensor with shape:
`(samples, timesteps, channels)`
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension
(the depth) is at index 1,
in 'channels_last' mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, input_shape[3])
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
class Activation(Layer):
"""Applies an activation function to an output.
Arguments:
activation: name of activation function to use
or alternatively, a Theano or TensorFlow operation.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Reshape(Layer):
"""Reshapes an output to a certain shape.
Arguments:
target_shape: target shape. Tuple of integers,
does not include the samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
```python
# as first layer in a Sequential model
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# now: model.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# as intermediate layer in a Sequential model
model.add(Reshape((6, 2)))
# now: model.output_shape == (None, 6, 2)
# also supports shape inference using `-1` as dimension
model.add(Reshape((-1, 2, 2)))
# now: model.output_shape == (None, 3, 2, 2)
```
"""
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Arguments:
input_shape: shape of array being reshaped
output_shape: desired shape of the array with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises a ValueError if the total array size of the output_shape is
different then the input_shape, or more than one unknown dimension
is specified.
Raises:
ValueError: in case of invalid values
for `input_shape` or `input_shape`.
"""
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
# In case the target shape is not fully defined,
# we need access to the shape of x.
target_shape = self.target_shape
if -1 in target_shape:
# target shape not fully defined
target_shape = self._compute_output_shape(inputs.get_shape())
target_shape = target_shape.as_list()[1:]
return K.reshape(inputs, (-1,) + tuple(target_shape))
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Arguments:
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimension
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return K.permute_dimensions(inputs, (0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Example:
```python
model = Sequential()
model.add(Convolution2D(64, 3, 3,
border_mode='same',
input_shape=(3, 32, 32)))
# now: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# now: model.output_shape == (None, 65536)
```
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.input_spec = InputSpec(min_ndim=3)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if not all(input_shape[1:]):
raise ValueError('The shape of the input to "Flatten" '
'is not fully defined '
'(got ' + str(input_shape[1:]) + '. '
'Make sure to pass a complete "input_shape" '
'or "batch_input_shape" argument to the first '
'layer in your model.')
return tensor_shape.TensorShape([input_shape[0], np.prod(input_shape[1:])])
def call(self, inputs):
outputs = K.batch_flatten(inputs)
outputs.set_shape(self._compute_output_shape(inputs.get_shape()))
return outputs
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Arguments:
n: integer, repetition factor.
Input shape:
2D tensor of shape `(num_samples, features)`.
Output shape:
3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Lambda(Layer):
"""Wraps arbitrary expression as a `Layer` object.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Arguments:
function: The function to be evaluated.
Takes input tensor as first argument.
arguments: optional dictionary of keyword arguments to be passed
to the function.
Input shape:
Arbitrary. Use the keyword argument input_shape
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Specified by `output_shape` argument
(or auto-inferred when using TensorFlow).
"""
def __init__(self, function, mask=None, arguments=None, **kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
def call(self, inputs, mask=None):
arguments = self.arguments
if has_arg(self.function, 'mask'):
arguments['mask'] = mask
return self.function(inputs, **arguments)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
if isinstance(self.function, python_types.LambdaType):
function = func_dump(self.function)
function_type = 'lambda'
else:
function = self.function.__name__
function_type = 'function'
config = {
'function': function,
'function_type': function_type,
'arguments': self.arguments
}
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
globs = globals()
if custom_objects:
globs = dict(list(globs.items()) + list(custom_objects.items()))
function_type = config.pop('function_type')
if function_type == 'function':
# Simple lookup in custom objects
function = deserialize_keras_object(
config['function'],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = func_load(config['function'], globs=globs)
else:
raise TypeError('Unknown function type:', function_type)
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
config['function'] = function
return cls(**config)
class Dense(tf_core_layers.Dense, Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
# Inheritance call order:
# 1) tf.layers.Dense, 2) keras.layers.Layer, 3) tf.layers.Layer
super(Dense, self).__init__(
units,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.supports_masking = True
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Arguments:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(**kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import six
from troveclient.compat import exceptions
def get_authenticator_cls(cls_or_name):
"""Factory method to retrieve Authenticator class."""
if isinstance(cls_or_name, type):
return cls_or_name
elif isinstance(cls_or_name, six.string_types):
if cls_or_name == "keystone":
return KeyStoneV2Authenticator
elif cls_or_name == "rax":
return RaxAuthenticator
elif cls_or_name == "rax2":
return RaxAuthenticator2
elif cls_or_name == "auth1.1":
return Auth1_1
elif cls_or_name == "fake":
return FakeAuth
raise ValueError("Could not determine authenticator class from the given "
"value %r." % cls_or_name)
class Authenticator(object):
"""Helper class to perform Keystone or other miscellaneous authentication.
The "authenticate" method returns a ServiceCatalog, which can be used
to obtain a token.
"""
URL_REQUIRED = True
def __init__(self, client, type, url, username, password, tenant,
region=None, service_type=None, service_name=None,
service_url=None):
self.client = client
self.type = type
self.url = url
self.username = username
self.password = password
self.tenant = tenant
self.region = region
self.service_type = service_type
self.service_name = service_name
self.service_url = service_url
def _authenticate(self, url, body, root_key='access'):
"""Authenticate and extract the service catalog."""
# Make sure we follow redirects when trying to reach Keystone
tmp_follow_all_redirects = self.client.follow_all_redirects
self.client.follow_all_redirects = True
try:
resp, body = self.client._time_request(url, "POST", body=body)
finally:
self.client.follow_all_redirects = tmp_follow_all_redirects
if resp.status == 200: # content must always present
try:
return ServiceCatalog(body, region=self.region,
service_type=self.service_type,
service_name=self.service_name,
service_url=self.service_url,
root_key=root_key)
except exceptions.AmbiguousEndpoints:
print("Found more than one valid endpoint. Use a more "
"restrictive filter")
raise
except KeyError:
raise exceptions.AuthorizationFailure()
except exceptions.EndpointNotFound:
print("Could not find any suitable endpoint. Correct region?")
raise
elif resp.status == 305:
return resp['location']
else:
raise exceptions.from_response(resp, body)
def authenticate(self):
raise NotImplementedError("Missing authenticate method.")
class KeyStoneV2Authenticator(Authenticator):
def authenticate(self):
if self.url is None:
raise exceptions.AuthUrlNotGiven()
return self._v2_auth(self.url)
def _v2_auth(self, url):
"""Authenticate against a v2.0 auth service."""
body = {"auth": {
"passwordCredentials": {
"username": self.username,
"password": self.password}
}
}
if self.tenant:
body['auth']['tenantName'] = self.tenant
return self._authenticate(url, body)
class Auth1_1(Authenticator):
def authenticate(self):
"""Authenticate against a v2.0 auth service."""
if self.url is None:
raise exceptions.AuthUrlNotGiven()
auth_url = self.url
body = {
"credentials": {
"username": self.username,
"key": self.password
}}
return self._authenticate(auth_url, body, root_key='auth')
class RaxAuthenticator(Authenticator):
def authenticate(self):
if self.url is None:
raise exceptions.AuthUrlNotGiven()
return self._rax_auth(self.url)
def _rax_auth(self, url):
"""Authenticate against the Rackspace auth service."""
body = {'auth': {
'RAX-KSKEY:apiKeyCredentials': {
'username': self.username,
'apiKey': self.password,
'tenantName': self.tenant}
}
}
return self._authenticate(self.url, body)
class RaxAuthenticator2(RaxAuthenticator):
"""Rax specific authenticator.
Necessary to be able to call using the same auth url as the new client
uses for Rax auth.
"""
def __init__(self, *args, **kwargs):
super(RaxAuthenticator2, self).__init__(*args, **kwargs)
self.url = "%s/tokens" % self.url
class FakeAuth(Authenticator):
"""Useful for faking auth."""
def authenticate(self):
class FakeCatalog(object):
def __init__(self, auth):
self.auth = auth
def get_public_url(self):
return "%s/%s" % ('http://localhost:8779/v1.0',
self.auth.tenant)
def get_token(self):
return self.auth.tenant
return FakeCatalog(self)
class ServiceCatalog(object):
"""Represents a Keystone Service Catalog which describes a service.
This class has methods to obtain a valid token as well as a public service
url and a management url.
"""
def __init__(self, resource_dict, region=None, service_type=None,
service_name=None, service_url=None, root_key='access'):
self.catalog = resource_dict
self.region = region
self.service_type = service_type
self.service_name = service_name
self.service_url = service_url
self.management_url = None
self.public_url = None
self.root_key = root_key
self._load()
def _load(self):
if not self.service_url:
self.public_url = self._url_for(attr='region',
filter_value=self.region,
endpoint_type="publicURL")
self.management_url = self._url_for(attr='region',
filter_value=self.region,
endpoint_type="adminURL")
else:
self.public_url = self.service_url
self.management_url = self.service_url
def get_token(self):
return self.catalog[self.root_key]['token']['id']
def get_management_url(self):
return self.management_url
def get_public_url(self):
return self.public_url
def _url_for(self, attr=None, filter_value=None,
endpoint_type='publicURL'):
"""Fetch requested URL.
Fetch the public URL from the Trove service for a particular
endpoint attribute. If none given, return the first.
"""
matching_endpoints = []
if 'endpoints' in self.catalog:
# We have a bastardized service catalog. Treat it special. :/
for endpoint in self.catalog['endpoints']:
if not filter_value or endpoint[attr] == filter_value:
matching_endpoints.append(endpoint)
if not matching_endpoints:
raise exceptions.EndpointNotFound()
# We don't always get a service catalog back ...
if 'serviceCatalog' not in self.catalog[self.root_key]:
raise exceptions.EndpointNotFound()
# Full catalog ...
catalog = self.catalog[self.root_key]['serviceCatalog']
for service in catalog:
if service.get("type") != self.service_type:
continue
if (self.service_name and self.service_type == 'database' and
service.get('name') != self.service_name):
continue
endpoints = service['endpoints']
for endpoint in endpoints:
if not filter_value or endpoint.get(attr) == filter_value:
endpoint["serviceName"] = service.get("name")
matching_endpoints.append(endpoint)
if not matching_endpoints:
raise exceptions.EndpointNotFound()
elif len(matching_endpoints) > 1:
raise exceptions.AmbiguousEndpoints(endpoints=matching_endpoints)
else:
return matching_endpoints[0].get(endpoint_type, None)
|
|
# Copyright (c) 2008-2009 Aryeh Leib Taurog, http://www.aryehleib.com
# All rights reserved.
#
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import unittest
from django.contrib.gis.geos.mutable_list import ListMixin
class UserListA(ListMixin):
_mytype = tuple
def __init__(self, i_list, *args, **kwargs):
self._list = self._mytype(i_list)
super().__init__(*args, **kwargs)
def __len__(self):
return len(self._list)
def __str__(self):
return str(self._list)
def __repr__(self):
return repr(self._list)
def _set_list(self, length, items):
# this would work:
# self._list = self._mytype(items)
# but then we wouldn't be testing length parameter
itemList = ['x'] * length
for i, v in enumerate(items):
itemList[i] = v
self._list = self._mytype(itemList)
def _get_single_external(self, index):
return self._list[index]
class UserListB(UserListA):
_mytype = list
def _set_single(self, index, value):
self._list[index] = value
def nextRange(length):
nextRange.start += 100
return range(nextRange.start, nextRange.start + length)
nextRange.start = 0
class ListMixinTest(unittest.TestCase):
"""
Tests base class ListMixin by comparing a list clone which is
a ListMixin subclass with a real Python list.
"""
limit = 3
listType = UserListA
def lists_of_len(self, length=None):
if length is None:
length = self.limit
pl = list(range(length))
return pl, self.listType(pl)
def limits_plus(self, b):
return range(-self.limit - b, self.limit + b)
def step_range(self):
return list(range(-1 - self.limit, 0)) + list(range(1, 1 + self.limit))
def test01_getslice(self):
'Slice retrieval'
pl, ul = self.lists_of_len()
for i in self.limits_plus(1):
self.assertEqual(pl[i:], ul[i:], 'slice [%d:]' % (i))
self.assertEqual(pl[:i], ul[:i], 'slice [:%d]' % (i))
for j in self.limits_plus(1):
self.assertEqual(pl[i:j], ul[i:j], 'slice [%d:%d]' % (i, j))
for k in self.step_range():
self.assertEqual(pl[i:j:k], ul[i:j:k], 'slice [%d:%d:%d]' % (i, j, k))
for k in self.step_range():
self.assertEqual(pl[i::k], ul[i::k], 'slice [%d::%d]' % (i, k))
self.assertEqual(pl[:i:k], ul[:i:k], 'slice [:%d:%d]' % (i, k))
for k in self.step_range():
self.assertEqual(pl[::k], ul[::k], 'slice [::%d]' % (k))
def test02_setslice(self):
'Slice assignment'
def setfcn(x, i, j, k, L):
x[i:j:k] = range(L)
pl, ul = self.lists_of_len()
for slen in range(self.limit + 1):
ssl = nextRange(slen)
ul[:] = ssl
pl[:] = ssl
self.assertEqual(pl, ul[:], 'set slice [:]')
for i in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:] = ssl
pl[i:] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:]' % (i))
ssl = nextRange(slen)
ul[:i] = ssl
pl[:i] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d]' % (i))
for j in self.limits_plus(1):
ssl = nextRange(slen)
ul[i:j] = ssl
pl[i:j] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d]' % (i, j))
for k in self.step_range():
ssl = nextRange(len(ul[i:j:k]))
ul[i:j:k] = ssl
pl[i:j:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d:%d:%d]' % (i, j, k))
sliceLen = len(ul[i:j:k])
with self.assertRaises(ValueError):
setfcn(ul, i, j, k, sliceLen + 1)
if sliceLen > 2:
with self.assertRaises(ValueError):
setfcn(ul, i, j, k, sliceLen - 1)
for k in self.step_range():
ssl = nextRange(len(ul[i::k]))
ul[i::k] = ssl
pl[i::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [%d::%d]' % (i, k))
ssl = nextRange(len(ul[:i:k]))
ul[:i:k] = ssl
pl[:i:k] = ssl
self.assertEqual(pl, ul[:], 'set slice [:%d:%d]' % (i, k))
for k in self.step_range():
ssl = nextRange(len(ul[::k]))
ul[::k] = ssl
pl[::k] = ssl
self.assertEqual(pl, ul[:], 'set slice [::%d]' % (k))
def test03_delslice(self):
'Delete slice'
for Len in range(self.limit):
pl, ul = self.lists_of_len(Len)
del pl[:]
del ul[:]
self.assertEqual(pl[:], ul[:], 'del slice [:]')
for i in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:]
del ul[i:]
self.assertEqual(pl[:], ul[:], 'del slice [%d:]' % (i))
pl, ul = self.lists_of_len(Len)
del pl[:i]
del ul[:i]
self.assertEqual(pl[:], ul[:], 'del slice [:%d]' % (i))
for j in range(-Len - 1, Len + 1):
pl, ul = self.lists_of_len(Len)
del pl[i:j]
del ul[i:j]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d]' % (i, j))
for k in list(range(-Len - 1, 0)) + list(range(1, Len)):
pl, ul = self.lists_of_len(Len)
del pl[i:j:k]
del ul[i:j:k]
self.assertEqual(pl[:], ul[:], 'del slice [%d:%d:%d]' % (i, j, k))
for k in list(range(-Len - 1, 0)) + list(range(1, Len)):
pl, ul = self.lists_of_len(Len)
del pl[:i:k]
del ul[:i:k]
self.assertEqual(pl[:], ul[:], 'del slice [:%d:%d]' % (i, k))
pl, ul = self.lists_of_len(Len)
del pl[i::k]
del ul[i::k]
self.assertEqual(pl[:], ul[:], 'del slice [%d::%d]' % (i, k))
for k in list(range(-Len - 1, 0)) + list(range(1, Len)):
pl, ul = self.lists_of_len(Len)
del pl[::k]
del ul[::k]
self.assertEqual(pl[:], ul[:], 'del slice [::%d]' % (k))
def test04_get_set_del_single(self):
'Get/set/delete single item'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
self.assertEqual(pl[i], ul[i], 'get single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
pl[i] = 100
ul[i] = 100
self.assertEqual(pl[:], ul[:], 'set single item [%d]' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
del pl[i]
del ul[i]
self.assertEqual(pl[:], ul[:], 'del single item [%d]' % i)
def test05_out_of_range_exceptions(self):
'Out of range exceptions'
def setfcn(x, i):
x[i] = 20
def getfcn(x, i):
return x[i]
def delfcn(x, i):
del x[i]
pl, ul = self.lists_of_len()
for i in (-1 - self.limit, self.limit):
with self.assertRaises(IndexError): # 'set index %d' % i)
setfcn(ul, i)
with self.assertRaises(IndexError): # 'get index %d' % i)
getfcn(ul, i)
with self.assertRaises(IndexError): # 'del index %d' % i)
delfcn(ul, i)
def test06_list_methods(self):
'List methods'
pl, ul = self.lists_of_len()
pl.append(40)
ul.append(40)
self.assertEqual(pl[:], ul[:], 'append')
pl.extend(range(50, 55))
ul.extend(range(50, 55))
self.assertEqual(pl[:], ul[:], 'extend')
pl.reverse()
ul.reverse()
self.assertEqual(pl[:], ul[:], 'reverse')
for i in self.limits_plus(1):
pl, ul = self.lists_of_len()
pl.insert(i, 50)
ul.insert(i, 50)
self.assertEqual(pl[:], ul[:], 'insert at %d' % i)
for i in self.limits_plus(0):
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(i), ul.pop(i), 'popped value at %d' % i)
self.assertEqual(pl[:], ul[:], 'after pop at %d' % i)
pl, ul = self.lists_of_len()
self.assertEqual(pl.pop(), ul.pop(i), 'popped value')
self.assertEqual(pl[:], ul[:], 'after pop')
pl, ul = self.lists_of_len()
def popfcn(x, i):
x.pop(i)
with self.assertRaises(IndexError):
popfcn(ul, self.limit)
with self.assertRaises(IndexError):
popfcn(ul, -1 - self.limit)
pl, ul = self.lists_of_len()
for val in range(self.limit):
self.assertEqual(pl.index(val), ul.index(val), 'index of %d' % val)
for val in self.limits_plus(2):
self.assertEqual(pl.count(val), ul.count(val), 'count %d' % val)
for val in range(self.limit):
pl, ul = self.lists_of_len()
pl.remove(val)
ul.remove(val)
self.assertEqual(pl[:], ul[:], 'after remove val %d' % val)
def indexfcn(x, v):
return x.index(v)
def removefcn(x, v):
return x.remove(v)
with self.assertRaises(ValueError):
indexfcn(ul, 40)
with self.assertRaises(ValueError):
removefcn(ul, 40)
def test07_allowed_types(self):
'Type-restricted list'
pl, ul = self.lists_of_len()
ul._allowed = int
ul[1] = 50
ul[:2] = [60, 70, 80]
def setfcn(x, i, v):
x[i] = v
with self.assertRaises(TypeError):
setfcn(ul, 2, 'hello')
with self.assertRaises(TypeError):
setfcn(ul, slice(0, 3, 2), ('hello', 'goodbye'))
def test08_min_length(self):
'Length limits'
pl, ul = self.lists_of_len(5)
ul._minlength = 3
def delfcn(x, i):
del x[:i]
def setfcn(x, i):
x[:i] = []
for i in range(len(ul) - ul._minlength + 1, len(ul)):
with self.assertRaises(ValueError):
delfcn(ul, i)
with self.assertRaises(ValueError):
setfcn(ul, i)
del ul[:len(ul) - ul._minlength]
ul._maxlength = 4
for i in range(0, ul._maxlength - len(ul)):
ul.append(i)
with self.assertRaises(ValueError):
ul.append(10)
def test09_iterable_check(self):
'Error on assigning non-iterable to slice'
pl, ul = self.lists_of_len(self.limit + 1)
def setfcn(x, i, v):
x[i] = v
with self.assertRaises(TypeError):
setfcn(ul, slice(0, 3, 2), 2)
def test10_checkindex(self):
'Index check'
pl, ul = self.lists_of_len()
for i in self.limits_plus(0):
if i < 0:
self.assertEqual(ul._checkindex(i), i + self.limit, '_checkindex(neg index)')
else:
self.assertEqual(ul._checkindex(i), i, '_checkindex(pos index)')
for i in (-self.limit - 1, self.limit):
with self.assertRaises(IndexError):
ul._checkindex(i)
def test_11_sorting(self):
'Sorting'
pl, ul = self.lists_of_len()
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort()
ul.sort()
self.assertEqual(pl[:], ul[:], 'sort')
mid = pl[len(pl) // 2]
pl.sort(key=lambda x: (mid - x) ** 2)
ul.sort(key=lambda x: (mid - x) ** 2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
pl.insert(0, pl.pop())
ul.insert(0, ul.pop())
pl.sort(reverse=True)
ul.sort(reverse=True)
self.assertEqual(pl[:], ul[:], 'sort w/ reverse')
mid = pl[len(pl) // 2]
pl.sort(key=lambda x: (mid - x) ** 2)
ul.sort(key=lambda x: (mid - x) ** 2)
self.assertEqual(pl[:], ul[:], 'sort w/ key')
def test_12_arithmetic(self):
'Arithmetic'
pl, ul = self.lists_of_len()
al = list(range(10, 14))
self.assertEqual(list(pl + al), list(ul + al), 'add')
self.assertEqual(type(ul), type(ul + al), 'type of add result')
self.assertEqual(list(al + pl), list(al + ul), 'radd')
self.assertEqual(type(al), type(al + ul), 'type of radd result')
objid = id(ul)
pl += al
ul += al
self.assertEqual(pl[:], ul[:], 'in-place add')
self.assertEqual(objid, id(ul), 'in-place add id')
for n in (-1, 0, 1, 3):
pl, ul = self.lists_of_len()
self.assertEqual(list(pl * n), list(ul * n), 'mul by %d' % n)
self.assertEqual(type(ul), type(ul * n), 'type of mul by %d result' % n)
self.assertEqual(list(n * pl), list(n * ul), 'rmul by %d' % n)
self.assertEqual(type(ul), type(n * ul), 'type of rmul by %d result' % n)
objid = id(ul)
pl *= n
ul *= n
self.assertEqual(pl[:], ul[:], 'in-place mul by %d' % n)
self.assertEqual(objid, id(ul), 'in-place mul by %d id' % n)
pl, ul = self.lists_of_len()
self.assertEqual(pl, ul, 'cmp for equal')
self.assertNotEqual(ul, pl + [2], 'cmp for not equal')
self.assertGreaterEqual(pl, ul, 'cmp for gte self')
self.assertLessEqual(pl, ul, 'cmp for lte self')
self.assertGreaterEqual(ul, pl, 'cmp for self gte')
self.assertLessEqual(ul, pl, 'cmp for self lte')
self.assertGreater(pl + [5], ul, 'cmp')
self.assertGreaterEqual(pl + [5], ul, 'cmp')
self.assertLess(pl, ul + [2], 'cmp')
self.assertLessEqual(pl, ul + [2], 'cmp')
self.assertGreater(ul + [5], pl, 'cmp')
self.assertGreaterEqual(ul + [5], pl, 'cmp')
self.assertLess(ul, pl + [2], 'cmp')
self.assertLessEqual(ul, pl + [2], 'cmp')
# Also works with a custom IndexError
ul_longer = ul + [2]
ul_longer._IndexError = TypeError
ul._IndexError = TypeError
self.assertNotEqual(ul_longer, pl)
self.assertGreater(ul_longer, ul)
pl[1] = 20
self.assertGreater(pl, ul, 'cmp for gt self')
self.assertLess(ul, pl, 'cmp for self lt')
pl[1] = -20
self.assertLess(pl, ul, 'cmp for lt self')
self.assertGreater(ul, pl, 'cmp for gt self')
class ListMixinTestSingle(ListMixinTest):
listType = UserListB
|
|
import logging
from pathlib import Path
from io import StringIO
from traitlets.config import Config
from traitlets.config.manager import BaseJSONConfigManager
from traitlets.traitlets import Bool, Unicode, List, Dict, Tuple, default
from nbconvert.nbconvertapp import NbConvertApp
from nbconvert.exporters import export
from nbconvert.writers import FilesWriter
from ipype.config import Pipeline
from ipype.notebook import get_notebooks_in_zip, is_valid_notebook, \
export_notebook, open_notebook
class IPype(NbConvertApp):
name = Unicode('ipype')
description = Unicode('IPype is an open-source, Python 3 only, BSD-licensed library that allows you to run self-documenting Jupyter notebook pipelines.')
# The version string of this application.
version = Unicode('')
#aliases = {'pipeline': 'Pipeline.path',
# 'output': 'Pipeline.output_dir'}
@default('version')
def get_version(self):
from ipype import __version__
return __version__
# The usage and example string that goes at the end of the help string.
#examples = Unicode()
# A sequence of Configurable subclasses whose config=True attributes will
# be exposed at the command line.
classes = List([Pipeline])
config_file = Unicode(u'', config=True, help="Load this config file")
# config_file is reachable only with --MyApp.config_file=... or --help-all
def initialize(self, argv=None):
super().initialize(argv) #btw this also calls init notebooks
#set pipeline path and output dir
self._path = Path(self.config.pipeline)
self._output = Path(self.config.output_dir)
#setup logging
self._setup_logging()
#set the pipeline Configurable object
self.Pipeline = Pipeline(config=Config(self.config), log=self.log)
#init writers
self.init_writers()
def init_notebooks(self):
filenames = []
pipeline_path = Path(self.config.pipeline)
output_path = Path(self.config.output_dir)
if pipeline_path.is_dir():
filenames = sorted(pipeline_path.glob('*.ipynb'))
elif is_zipfile(str(pipeline_path)):
filenames = get_notebooks_in_zip(str(pipeline_path))
elif pipeline_path.is_file():
if is_valid_notebook(str(pipeline_path)):
filenames = [pipeline_path] # list with one notebook
else:
raise Exception("Could not validate notebook")
_notebooks = [str(f) for f in filenames]
#copy notebooks to pipeline subfolder
copied_notebooks = []
for notebook in _notebooks:
notebook_pth = Path(notebook)
from nbconvert.exporters import NotebookExporter
pipeline_writer = FilesWriter(build_directory=str(output_path / 'pipeline'))
pipeline_output, resources = export(NotebookExporter, notebook, resources={})
pipeline_writer.write(pipeline_output, resources, notebook_name=notebook_pth.stem)
copied_notebook_pth = output_path / 'pipeline' / notebook_pth.name
assert copied_notebook_pth.exists()
copied_notebooks.append(str(copied_notebook_pth.absolute()))
self.notebooks = copied_notebooks
#add it into the Pipeline metadata
self.config.pipeline_notebooks = [nb for nb in self.notebooks]
self.config.executed_notebooks = []
def _output_subdir(self, subdir):
return str(self._output / subdir)
def init_writers(self):
self.log.debug("Call: IPype.init_writers()") #log
self.writers = Config()
self.writers['pipeline_writer'] = FilesWriter(build_directory=self._output_subdir('pipeline'))
self.writers['calib_writer'] = FilesWriter(build_directory=self._output_subdir('calib_notebooks'))
self.writers['exec_writer'] = FilesWriter(build_directory=self._output_subdir('exec_notebooks'))
self.writers['html_writer'] = FilesWriter(build_directory=self._output_subdir('html'))
def _setup_logging(self):
self.log.debug("Call: IPype._setup_logging()") #log
self.log.setLevel(logging.INFO)
logs_subdir = Path(self._output_subdir('logs'))
logs_subdir.mkdir(exist_ok=True)
pipeline_log_pth = logs_subdir / 'pipeline.log'
log_file_handler = logging.FileHandler(str(pipeline_log_pth))
log_file_handler.setLevel(logging.INFO)
log_file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
log_file_handler.setFormatter(log_file_formatter)
self.log.addHandler(log_file_handler)
self.log.info("Application logging was set up.") #log
def write_json_config(self):
self.log.debug("Call: IPype.write_json_config().") #log
#save a config.json into the output dir
configmanager = BaseJSONConfigManager(config_dir=str(self._output))
configmanager.set('config', self.config)
self.log.debug("config.json file saved into the main output dir.") #log
#save a config.json into the "pipeline subdir"
configmanager = BaseJSONConfigManager(config_dir=str(self._output_subdir('pipeline')))
configmanager.set('config', self.Pipeline.config)
self.log.debug("config.json file saved into the pipeline subdir of the output dir.") #log
def convert_notebooks(self):
#write json config into output dir
self.write_json_config()
#initialize notebooks ("that have been executed") as empty list
self.executed_notebooks = []
#loop over the notebooks
for notebook in self.notebooks:
self.log.debug("Call: IPype.convert_single_notebook() for notebook: {}".format(notebook))
self.convert_single_notebook(notebook)
def convert_single_notebook(self, notebook_filename, input_buffer=None):
notebook_pth = Path(notebook_filename)
self.log.info("Initializing single notebook resources for notebook: {}".format(notebook_filename)) #log
resources = self.init_single_notebook_resources(notebook_filename)
#calib
self.log.info("Calibrating notebook: {}".format(notebook_filename)) #log
from ipype.exporters import CalibratedNotebookExporter
resources.update(output_subdir=str(self._output / 'calib_notebooks'))
calib_output, resources = export(CalibratedNotebookExporter, notebook_filename, resources=resources)
self.writers['calib_writer'].write(calib_output, resources, notebook_name=notebook_pth.stem)
##############################################################
#exec ##########################################################
self.log.info("Executing notebook: {}".format(notebook_filename)) #log
results_subdir = Path(self._output / 'results')
results_subdir.mkdir(exist_ok=True) #create results subdir
from ipype.exporters import ExecutedNotebookExporter
exec_subdir = self._output / 'exec_notebooks'
resources.update(output_subdir=str(exec_subdir))
exec_output, resources = export(ExecutedNotebookExporter, notebook_filename, resources=resources)
self.writers['exec_writer'].write(exec_output, resources, notebook_name=notebook_pth.with_suffix('.exec').name)
executed_notebook_pth = exec_subdir / notebook_pth.with_suffix('.exec.ipynb').name
self.executed_notebooks.append(str(executed_notebook_pth))
##############################################################
#html
self.log.info("Exporting executed notebook {} to html..".format(str(executed_notebook_pth))) #log
from ipype.exporters import HTMLExporter
resources.update(output_subdir=str(self._output / 'html'), notebook_name=notebook_pth.stem)
exec_output_filelike = StringIO(exec_output)
html_output, resources = export(HTMLExporter, exec_output_filelike, resources=resources)
self.writers['html_writer'].write(html_output, resources, notebook_name=notebook_pth.stem)
##############################################################
def init_single_notebook_resources(self, notebook_filename):
self.log.debug("Call: IPype.init_single_notebook_resources() for notebook: {}".format(notebook_filename)) #log
notebook_pth = Path(notebook_filename).absolute()
return {
'config_dir': str(self.config.pipeline),
'unique_key': notebook_pth.name,
'output_files_dir': str(self.Pipeline.output_dir),
'notebook_filename': str(notebook_pth),
'pipeline_dir': self._output_subdir('pipeline'),
'pipeline_notebooks': self.notebooks,
'executed_notebooks': self.executed_notebooks,
'pipeline_info': self.Pipeline.config,
}
def export_single_notebook(self, notebook_filename, resources, input_buffer=None):
"""not used"""
return output, resources
def write_single_notebook(self, output, resources):
"""not used"""
return write_results #return type: file
def postprocess_single_notebook(self, write_results):
"""not used"""
pass
##############################################################
main = launch_new_instance = IPype.launch_instance
if __name__ == "__main__":
main()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import re
from recipe_engine import recipe_api
class GitApi(recipe_api.RecipeApi):
_GIT_HASH_RE = re.compile('[0-9a-f]{40}', re.IGNORECASE)
def __call__(self, *args, **kwargs):
"""Return a git command step."""
name = kwargs.pop('name', 'git ' + args[0])
infra_step = kwargs.pop('infra_step', True)
git_cmd = ['git']
options = kwargs.pop('git_config_options', {})
for k, v in sorted(options.iteritems()):
git_cmd.extend(['-c', '%s=%s' % (k, v)])
can_fail_build = kwargs.pop('can_fail_build', True)
try:
with self.m.context(cwd=(self.m.context.cwd or self.m.path['checkout'])):
return self.m.step(name, git_cmd + list(args), infra_step=infra_step,
**kwargs)
except self.m.step.StepFailure as f:
if can_fail_build:
raise
else:
return f.result
def fetch_tags(self, remote_name=None, **kwargs):
"""Fetches all tags from the remote."""
kwargs.setdefault('name', 'git fetch tags')
remote_name = remote_name or 'origin'
return self('fetch', remote_name, '--tags', **kwargs)
def cat_file_at_commit(self, file_path, commit_hash, remote_name=None,
**kwargs):
"""Outputs the contents of a file at a given revision."""
self.fetch_tags(remote_name=remote_name, **kwargs)
kwargs.setdefault('name', 'git cat-file %s:%s' % (commit_hash, file_path))
return self('cat-file', 'blob', '%s:%s' % (commit_hash, file_path),
**kwargs)
def count_objects(self, previous_result=None, can_fail_build=False, **kwargs):
"""Returns `git count-objects` result as a dict.
Args:
previous_result (dict): the result of previous count_objects call.
If passed, delta is reported in the log and step text.
can_fail_build (bool): if True, may fail the build and/or raise an
exception. Defaults to False.
Returns:
A dict of count-object values, or None if count-object run failed.
"""
if previous_result:
assert isinstance(previous_result, dict)
assert all(isinstance(v, long) for v in previous_result.itervalues())
assert 'size' in previous_result
assert 'size-pack' in previous_result
step_result = None
try:
step_result = self(
'count-objects', '-v', stdout=self.m.raw_io.output(),
can_fail_build=can_fail_build, **kwargs)
if not step_result.stdout:
return None
result = {}
for line in step_result.stdout.splitlines():
name, value = line.split(':', 1)
result[name] = long(value.strip())
def results_to_text(results):
return [' %s: %s' % (k, v) for k, v in results.iteritems()]
step_result.presentation.logs['result'] = results_to_text(result)
if previous_result:
delta = {
key: value - previous_result[key]
for key, value in result.iteritems()
if key in previous_result}
step_result.presentation.logs['delta'] = (
['before:'] + results_to_text(previous_result) +
['', 'after:'] + results_to_text(result) +
['', 'delta:'] + results_to_text(delta)
)
size_delta = (
result['size'] + result['size-pack']
- previous_result['size'] - previous_result['size-pack'])
# size_delta is in KiB.
step_result.presentation.step_text = (
'size delta: %+.2f MiB' % (size_delta / 1024.0))
return result
except Exception as ex:
if step_result:
step_result.presentation.logs['exception'] = ['%r' % ex]
step_result.presentation.status = self.m.step.WARNING
if can_fail_build:
raise recipe_api.InfraFailure('count-objects failed: %s' % ex)
return None
def checkout(self, url, ref=None, dir_path=None, recursive=False,
submodules=True, submodule_update_force=False,
keep_paths=None, step_suffix=None,
curl_trace_file=None, can_fail_build=True,
set_got_revision=False, remote_name=None,
display_fetch_size=None, file_name=None,
submodule_update_recursive=True,
use_git_cache=False, progress=True):
"""Performs a full git checkout and returns sha1 of checked out revision.
Args:
url (str): url of remote repo to use as upstream
ref (str): ref to fetch and check out
dir_path (Path): optional directory to clone into
recursive (bool): whether to recursively fetch submodules or not
submodules (bool): whether to sync and update submodules or not
submodule_update_force (bool): whether to update submodules with --force
keep_paths (iterable of strings): paths to ignore during git-clean;
paths are gitignore-style patterns relative to checkout_path.
step_suffix (str): suffix to add to a each step name
curl_trace_file (Path): if not None, dump GIT_CURL_VERBOSE=1 trace to that
file. Useful for debugging git issue reproducible only on bots. It has
a side effect of all stderr output of 'git fetch' going to that file.
can_fail_build (bool): if False, ignore errors during fetch or checkout.
set_got_revision (bool): if True, resolves HEAD and sets got_revision
property.
remote_name (str): name of the git remote to use
display_fetch_size (bool): if True, run `git count-objects` before and
after fetch and display delta. Adds two more steps. Defaults to False.
file_name (str): optional path to a single file to checkout.
submodule_update_recursive (bool): if True, updates submodules
recursively.
use_git_cache (bool): if True, git cache will be used for this checkout.
WARNING, this is EXPERIMENTAL!!! This wasn't tested with:
* submodules
* since origin url is modified
to a local path, may cause problem with scripts that do
"git fetch origin" or "git push origin".
* arbitrary refs such refs/whatever/not-fetched-by-default-to-cache
progress (bool): wether to show progress for fetch or not
Returns: If the checkout was successful, this returns the commit hash of
the checked-out-repo. Otherwise this returns None.
"""
retVal = None
# TODO(robertocn): Break this function and refactor calls to it.
# The problem is that there are way too many unrealated use cases for
# it, and the function's signature is getting unwieldy and its body
# unreadable.
display_fetch_size = display_fetch_size or False
if not dir_path:
dir_path = url.rsplit('/', 1)[-1]
if dir_path.endswith('.git'): # ex: https://host/foobar.git
dir_path = dir_path[:-len('.git')]
# ex: ssh://host:repo/foobar/.git
dir_path = dir_path or dir_path.rsplit('/', 1)[-1]
dir_path = self.m.path['start_dir'].join(dir_path)
if 'checkout' not in self.m.path:
self.m.path['checkout'] = dir_path
git_setup_args = ['--path', dir_path, '--url', url]
if remote_name:
git_setup_args += ['--remote', remote_name]
else:
remote_name = 'origin'
step_suffix = '' if step_suffix is None else ' (%s)' % step_suffix
self.m.python(
'git setup%s' % step_suffix,
self.resource('git_setup.py'),
git_setup_args)
# Some of the commands below require depot_tools to be in PATH.
path = self.m.path.pathsep.join([
str(self.package_repo_resource()), '%(PATH)s'])
with self.m.context(cwd=dir_path):
if use_git_cache:
with self.m.context(env={'PATH': path}):
self('retry', 'cache', 'populate', '-c',
self.m.infra_paths.default_git_cache_dir, url,
name='populate cache',
can_fail_build=can_fail_build)
dir_cmd = self(
'cache', 'exists', '--quiet',
'--cache-dir', self.m.infra_paths.default_git_cache_dir, url,
can_fail_build=can_fail_build,
stdout=self.m.raw_io.output(),
step_test_data=lambda:
self.m.raw_io.test_api.stream_output('mirror_dir'))
mirror_dir = dir_cmd.stdout.strip()
self('remote', 'set-url', 'origin', mirror_dir,
can_fail_build=can_fail_build)
# There are five kinds of refs we can be handed:
# 0) None. In this case, we default to properties['branch'].
# 1) A 40-character SHA1 hash.
# 2) A fully-qualifed arbitrary ref, e.g. 'refs/foo/bar/baz'.
# 3) A fully qualified branch name, e.g. 'refs/heads/master'.
# Chop off 'refs/heads' and now it matches case (4).
# 4) A branch name, e.g. 'master'.
# Note that 'FETCH_HEAD' can be many things (and therefore not a valid
# checkout target) if many refs are fetched, but we only explicitly fetch
# one ref here, so this is safe.
fetch_args = []
if not ref: # Case 0
fetch_remote = remote_name
fetch_ref = self.m.properties.get('branch') or 'master'
checkout_ref = 'FETCH_HEAD'
elif self._GIT_HASH_RE.match(ref): # Case 1.
fetch_remote = remote_name
fetch_ref = ''
checkout_ref = ref
elif ref.startswith('refs/heads/'): # Case 3.
fetch_remote = remote_name
fetch_ref = ref[len('refs/heads/'):]
checkout_ref = 'FETCH_HEAD'
else: # Cases 2 and 4.
fetch_remote = remote_name
fetch_ref = ref
checkout_ref = 'FETCH_HEAD'
fetch_args = [x for x in (fetch_remote, fetch_ref) if x]
if recursive:
fetch_args.append('--recurse-submodules')
if progress:
fetch_args.append('--progress')
fetch_env = {'PATH': path}
fetch_stderr = None
if curl_trace_file:
fetch_env['GIT_CURL_VERBOSE'] = '1'
fetch_stderr = self.m.raw_io.output(leak_to=curl_trace_file)
fetch_step_name = 'git fetch%s' % step_suffix
if display_fetch_size:
count_objects_before_fetch = self.count_objects(
name='count-objects before %s' % fetch_step_name,
step_test_data=lambda: self.m.raw_io.test_api.stream_output(
self.test_api.count_objects_output(1000)))
with self.m.context(env=fetch_env):
self('retry', 'fetch', *fetch_args,
name=fetch_step_name,
stderr=fetch_stderr,
can_fail_build=can_fail_build)
if display_fetch_size:
self.count_objects(
name='count-objects after %s' % fetch_step_name,
previous_result=count_objects_before_fetch,
step_test_data=lambda: self.m.raw_io.test_api.stream_output(
self.test_api.count_objects_output(2000)))
if file_name:
self('checkout', '-f', checkout_ref, '--', file_name,
name='git checkout%s' % step_suffix,
can_fail_build=can_fail_build)
else:
self('checkout', '-f', checkout_ref,
name='git checkout%s' % step_suffix,
can_fail_build=can_fail_build)
rev_parse_step = self('rev-parse', 'HEAD',
name='read revision',
stdout=self.m.raw_io.output(),
can_fail_build=False,
step_test_data=lambda:
self.m.raw_io.test_api.stream_output('deadbeef'))
if rev_parse_step.presentation.status == 'SUCCESS':
sha = rev_parse_step.stdout.strip()
retVal = sha
rev_parse_step.presentation.step_text = "<br/>checked out %r<br/>" % sha
if set_got_revision:
rev_parse_step.presentation.properties['got_revision'] = sha
clean_args = list(itertools.chain(
*[('-e', path) for path in keep_paths or []]))
self('clean', '-f', '-d', '-x', *clean_args,
name='git clean%s' % step_suffix,
can_fail_build=can_fail_build)
if submodules:
self('submodule', 'sync',
name='submodule sync%s' % step_suffix,
can_fail_build=can_fail_build)
submodule_update = ['submodule', 'update', '--init']
if submodule_update_recursive:
submodule_update.append('--recursive')
if submodule_update_force:
submodule_update.append('--force')
self(*submodule_update,
name='submodule update%s' % step_suffix,
can_fail_build=can_fail_build)
return retVal
def get_timestamp(self, commit='HEAD', test_data=None, **kwargs):
"""Find and return the timestamp of the given commit."""
step_test_data = None
if test_data is not None:
step_test_data = lambda: self.m.raw_io.test_api.stream_output(test_data)
return self('show', commit, '--format=%at', '-s',
stdout=self.m.raw_io.output(),
step_test_data=step_test_data).stdout.rstrip()
def rebase(self, name_prefix, branch, dir_path, remote_name=None,
**kwargs):
"""Run rebase HEAD onto branch
Args:
name_prefix (str): a prefix used for the step names
branch (str): a branch name or a hash to rebase onto
dir_path (Path): directory to clone into
remote_name (str): the remote name to rebase from if not origin
"""
remote_name = remote_name or 'origin'
with self.m.context(cwd=dir_path):
try:
self('rebase', '%s/master' % remote_name,
name="%s rebase" % name_prefix, **kwargs)
except self.m.step.StepFailure:
self('rebase', '--abort', name='%s rebase abort' % name_prefix,
**kwargs)
raise
def config_get(self, prop_name, **kwargs):
"""Returns: (str) The Git config output, or None if no output was generated.
Args:
prop_name: (str) The name of the config property to query.
kwargs: Forwarded to '__call__'.
"""
kwargs['name'] = kwargs.get('name', 'git config %s' % (prop_name,))
result = self('config', '--get', prop_name, stdout=self.m.raw_io.output(),
**kwargs)
value = result.stdout
if value:
value = value.strip()
result.presentation.step_text = value
return value
def get_remote_url(self, remote_name=None, **kwargs):
"""Returns: (str) The URL of the remote Git repository, or None.
Args:
remote_name: (str) The name of the remote to query, defaults to 'origin'.
kwargs: Forwarded to '__call__'.
"""
remote_name = remote_name or 'origin'
return self.config_get('remote.%s.url' % (remote_name,), **kwargs)
def bundle_create(self, bundle_path, rev_list_args=None, **kwargs):
"""Run 'git bundle create' on a Git repository.
Args:
bundle_path (Path): The path of the output bundle.
refs (list): The list of refs to include in the bundle. If None, all
refs in the Git checkout will be bundled.
kwargs: Forwarded to '__call__'.
"""
if not rev_list_args:
rev_list_args = ['--all']
self('bundle', 'create', bundle_path, *rev_list_args, **kwargs)
def new_branch(self, branch, name=None, upstream=None, **kwargs):
"""Runs git new-branch on a Git repository, to be used before git cl upload.
Args:
branch (str): new branch name, which must not yet exist.
name (str): step name.
upstream (str): to origin/master.
kwargs: Forwarded to '__call__'.
"""
env = self.m.context.env
env['PATH'] = self.m.path.pathsep.join([
str(self.package_repo_resource()), '%(PATH)s'])
args = ['new-branch', branch]
if upstream:
args.extend(['--upstream', upstream])
if not name:
name = 'git new-branch %s' % branch
with self.m.context(env=env):
return self(*args, name=name, **kwargs)
|
|
from sympy import (Symbol, Rational, Order, exp, ln, log, nan, oo, O, pi, I,
S, Integral, sin, cos, sqrt, conjugate, expand, transpose, symbols,
Function, Add)
from sympy.utilities.pytest import raises
from sympy.abc import w, x, y, z
def test_caching_bug():
#needs to be a first test, so that all caches are clean
#cache it
e = O(w)
#and test that this won't raise an exception
O(w**(-1/x/log(3)*log(5)), w)
def test_free_symbols():
assert Order(1).free_symbols == set()
assert Order(x).free_symbols == set([x])
assert Order(1, x).free_symbols == set([x])
assert Order(x*y).free_symbols == set([x, y])
assert Order(x, x, y).free_symbols == set([x, y])
def test_simple_1():
o = Rational(0)
assert Order(2*x) == Order(x)
assert Order(x)*3 == Order(x)
assert -28*Order(x) == Order(x)
assert Order(Order(x)) == Order(x)
assert Order(Order(x), y) == Order(Order(x), x, y)
assert Order(-23) == Order(1)
assert Order(exp(x)) == Order(1, x)
assert Order(exp(1/x)).expr == exp(1/x)
assert Order(x*exp(1/x)).expr == x*exp(1/x)
assert Order(x**(o/3)).expr == x**(o/3)
assert Order(x**(5*o/3)).expr == x**(5*o/3)
assert Order(x**2 + x + y, x) == O(1, x)
assert Order(x**2 + x + y, y) == O(1, y)
raises(ValueError, lambda: Order(exp(x), x, x))
raises(TypeError, lambda: Order(x, 2 - x))
def test_simple_2():
assert Order(2*x)*x == Order(x**2)
assert Order(2*x)/x == Order(1, x)
assert Order(2*x)*x*exp(1/x) == Order(x**2*exp(1/x))
assert (Order(2*x)*x*exp(1/x)/ln(x)**3).expr == x**2*exp(1/x)*ln(x)**-3
def test_simple_3():
assert Order(x) + x == Order(x)
assert Order(x) + 2 == 2 + Order(x)
assert Order(x) + x**2 == Order(x)
assert Order(x) + 1/x == 1/x + Order(x)
assert Order(1/x) + 1/x**2 == 1/x**2 + Order(1/x)
assert Order(x) + exp(1/x) == Order(x) + exp(1/x)
def test_simple_4():
assert Order(x)**2 == Order(x**2)
def test_simple_5():
assert Order(x) + Order(x**2) == Order(x)
assert Order(x) + Order(x**-2) == Order(x**-2)
assert Order(x) + Order(1/x) == Order(1/x)
def test_simple_6():
assert Order(x) - Order(x) == Order(x)
assert Order(x) + Order(1) == Order(1)
assert Order(x) + Order(x**2) == Order(x)
assert Order(1/x) + Order(1) == Order(1/x)
assert Order(x) + Order(exp(1/x)) == Order(exp(1/x))
assert Order(x**3) + Order(exp(2/x)) == Order(exp(2/x))
assert Order(x**-3) + Order(exp(2/x)) == Order(exp(2/x))
def test_simple_7():
assert 1 + O(1) == O(1)
assert 2 + O(1) == O(1)
assert x + O(1) == O(1)
assert 1/x + O(1) == 1/x + O(1)
def test_simple_8():
assert O(sqrt(-x)) == O(sqrt(x))
assert O(x**2*sqrt(x)) == O(x**(S(5)/2))
assert O(x**3*sqrt(-(-x)**3)) == O(x**(S(9)/2))
assert O(x**(S(3)/2)*sqrt((-x)**3)) == O(x**3)
assert O(x*(-2*x)**(I/2)) == O(x*(-x)**(I/2))
def test_as_expr_variables():
assert Order(x).as_expr_variables(None) == (x, ((x, 0),))
assert Order(x).as_expr_variables((((x, 0),))) == (x, ((x, 0),))
assert Order(y).as_expr_variables(((x, 0),)) == (y, ((x, 0), (y, 0)))
assert Order(y).as_expr_variables(((x, 0), (y, 0))) == (y, ((x, 0), (y, 0)))
def test_contains_0():
assert Order(1, x).contains(Order(1, x))
assert Order(1, x).contains(Order(1))
assert Order(1).contains(Order(1, x)) is False
def test_contains_1():
assert Order(x).contains(Order(x))
assert Order(x).contains(Order(x**2))
assert not Order(x**2).contains(Order(x))
assert not Order(x).contains(Order(1/x))
assert not Order(1/x).contains(Order(exp(1/x)))
assert not Order(x).contains(Order(exp(1/x)))
assert Order(1/x).contains(Order(x))
assert Order(exp(1/x)).contains(Order(x))
assert Order(exp(1/x)).contains(Order(1/x))
assert Order(exp(1/x)).contains(Order(exp(1/x)))
assert Order(exp(2/x)).contains(Order(exp(1/x)))
assert not Order(exp(1/x)).contains(Order(exp(2/x)))
def test_contains_2():
assert Order(x).contains(Order(y)) is None
assert Order(x).contains(Order(y*x))
assert Order(y*x).contains(Order(x))
assert Order(y).contains(Order(x*y))
assert Order(x).contains(Order(y**2*x))
def test_contains_3():
assert Order(x*y**2).contains(Order(x**2*y)) is None
assert Order(x**2*y).contains(Order(x*y**2)) is None
def test_contains_4():
assert Order(sin(1/x**2)).contains(Order(cos(1/x**2))) is None
assert Order(cos(1/x**2)).contains(Order(sin(1/x**2))) is None
def test_contains():
assert Order(1, x) not in Order(1)
assert Order(1) in Order(1, x)
raises(TypeError, lambda: Order(x*y**2) in Order(x**2*y))
def test_add_1():
assert Order(x + x) == Order(x)
assert Order(3*x - 2*x**2) == Order(x)
assert Order(1 + x) == Order(1, x)
assert Order(1 + 1/x) == Order(1/x)
assert Order(ln(x) + 1/ln(x)) == Order(ln(x))
assert Order(exp(1/x) + x) == Order(exp(1/x))
assert Order(exp(1/x) + 1/x**20) == Order(exp(1/x))
def test_ln_args():
assert O(log(x)) + O(log(2*x)) == O(log(x))
assert O(log(x)) + O(log(x**3)) == O(log(x))
assert O(log(x*y)) + O(log(x) + log(y)) == O(log(x*y))
def test_multivar_0():
assert Order(x*y).expr == x*y
assert Order(x*y**2).expr == x*y**2
assert Order(x*y, x).expr == x
assert Order(x*y**2, y).expr == y**2
assert Order(x*y*z).expr == x*y*z
assert Order(x/y).expr == x/y
assert Order(x*exp(1/y)).expr == x*exp(1/y)
assert Order(exp(x)*exp(1/y)).expr == exp(1/y)
def test_multivar_0a():
assert Order(exp(1/x)*exp(1/y)).expr == exp(1/x + 1/y)
def test_multivar_1():
assert Order(x + y).expr == x + y
assert Order(x + 2*y).expr == x + y
assert (Order(x + y) + x).expr == (x + y)
assert (Order(x + y) + x**2) == Order(x + y)
assert (Order(x + y) + 1/x) == 1/x + Order(x + y)
assert Order(x**2 + y*x).expr == x**2 + y*x
def test_multivar_2():
assert Order(x**2*y + y**2*x, x, y).expr == x**2*y + y**2*x
def test_multivar_mul_1():
assert Order(x + y)*x == Order(x**2 + y*x, x, y)
def test_multivar_3():
assert (Order(x) + Order(y)).args in [
(Order(x), Order(y)),
(Order(y), Order(x))]
assert Order(x) + Order(y) + Order(x + y) == Order(x + y)
assert (Order(x**2*y) + Order(y**2*x)).args in [
(Order(x*y**2), Order(y*x**2)),
(Order(y*x**2), Order(x*y**2))]
assert (Order(x**2*y) + Order(y*x)) == Order(x*y)
def test_issue_3468():
y = Symbol('y', negative=True)
z = Symbol('z', complex=True)
# check that Order does not modify assumptions about symbols
Order(x)
Order(y)
Order(z)
assert x.is_positive is None
assert y.is_positive is False
assert z.is_positive is None
def test_leading_order():
assert (x + 1 + 1/x**5).extract_leading_order(x) == ((1/x**5, O(1/x**5)),)
assert (1 + 1/x).extract_leading_order(x) == ((1/x, O(1/x)),)
assert (1 + x).extract_leading_order(x) == ((1, O(1, x)),)
assert (1 + x**2).extract_leading_order(x) == ((1, O(1, x)),)
assert (2 + x**2).extract_leading_order(x) == ((2, O(1, x)),)
assert (x + x**2).extract_leading_order(x) == ((x, O(x)),)
def test_leading_order2():
assert set((2 + pi + x**2).extract_leading_order(x)) == set(((pi, O(1, x)),
(S(2), O(1, x))))
assert set((2*x + pi*x + x**2).extract_leading_order(x)) == set(((2*x, O(x)),
(x*pi, O(x))))
def test_order_leadterm():
assert O(x**2)._eval_as_leading_term(x) == O(x**2)
def test_order_symbols():
e = x*y*sin(x)*Integral(x, (x, 1, 2))
assert O(e) == O(x**2*y, x, y)
assert O(e, x) == O(x**2)
def test_nan():
assert O(nan) == nan
assert not O(x).contains(nan)
def test_O1():
assert O(1, x) * x == O(x)
assert O(1, y) * x == O(1, y)
def test_getn():
# other lines are tested incidentally by the suite
assert O(x).getn() == 1
assert O(x/log(x)).getn() == 1
assert O(x**2/log(x)**2).getn() == 2
assert O(x*log(x)).getn() == 1
raises(NotImplementedError, lambda: (O(x) + O(y)).getn())
def test_diff():
assert O(x**2).diff(x) == O(x)
def test_getO():
assert (x).getO() is None
assert (x).removeO() == x
assert (O(x)).getO() == O(x)
assert (O(x)).removeO() == 0
assert (z + O(x) + O(y)).getO() == O(x) + O(y)
assert (z + O(x) + O(y)).removeO() == z
raises(NotImplementedError, lambda: (O(x) + O(y)).getn())
def test_leading_term():
from sympy import digamma
assert O(1/digamma(1/x)) == O(1/log(x))
def test_eval():
assert Order(x).subs(Order(x), 1) == 1
assert Order(x).subs(x, y) == Order(y)
assert Order(x).subs(y, x) == Order(x)
assert Order(x).subs(x, x + y) == Order(x + y, (x, -y))
assert (O(1)**x).is_Pow
def test_issue_4279():
a, b = symbols('a b')
assert O(a, a, b) + O(1, a, b) == O(1, a, b)
assert O(b, a, b) + O(1, a, b) == O(1, a, b)
assert O(a + b, a, b) + O(1, a, b) == O(1, a, b)
assert O(1, a, b) + O(a, a, b) == O(1, a, b)
assert O(1, a, b) + O(b, a, b) == O(1, a, b)
assert O(1, a, b) + O(a + b, a, b) == O(1, a, b)
def test_issue_4855():
assert 1/O(1) != O(1)
assert 1/O(x) != O(1/x)
assert 1/O(x, (x, oo)) != O(1/x, (x, oo))
f = Function('f')
assert 1/O(f(x)) != O(1/x)
def test_order_conjugate_transpose():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
assert conjugate(Order(x)) == Order(conjugate(x))
assert conjugate(Order(y)) == Order(conjugate(y))
assert conjugate(Order(x**2)) == Order(conjugate(x)**2)
assert conjugate(Order(y**2)) == Order(conjugate(y)**2)
assert transpose(Order(x)) == Order(transpose(x))
assert transpose(Order(y)) == Order(transpose(y))
assert transpose(Order(x**2)) == Order(transpose(x)**2)
assert transpose(Order(y**2)) == Order(transpose(y)**2)
def test_order_noncommutative():
A = Symbol('A', commutative=False)
assert Order(A + A*x, x) == Order(1, x)
assert (A + A*x)*Order(x) == Order(x)
assert (A*x)*Order(x) == Order(x**2, x)
assert expand((1 + Order(x))*A*A*x) == A*A*x + Order(x**2, x)
assert expand((A*A + Order(x))*x) == A*A*x + Order(x**2, x)
assert expand((A + Order(x))*A*x) == A*A*x + Order(x**2, x)
def test_issue_6753():
assert (1 + x**2)**10000*O(x) == O(x)
def test_order_at_infinity():
assert Order(1 + x, (x, oo)) == Order(x, (x, oo))
assert Order(3*x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo))*3 == Order(x, (x, oo))
assert -28*Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(Order(x, (x, oo)), (x, oo)) == Order(x, (x, oo))
assert Order(Order(x, (x, oo)), (y, oo)) == Order(x, (x, oo), (y, oo))
assert Order(3, (x, oo)) == Order(1, (x, oo))
assert Order(x**2 + x + y, (x, oo)) == O(x**2, (x, oo))
assert Order(x**2 + x + y, (y, oo)) == O(y, (y, oo))
assert Order(2*x, (x, oo))*x == Order(x**2, (x, oo))
assert Order(2*x, (x, oo))/x == Order(1, (x, oo))
assert Order(2*x, (x, oo))*x*exp(1/x) == Order(x**2*exp(1/x), (x, oo))
assert Order(2*x, (x, oo))*x*exp(1/x)/ln(x)**3 == Order(x**2*exp(1/x)*ln(x)**-3, (x, oo))
assert Order(x, (x, oo)) + 1/x == 1/x + Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + 1 == 1 + Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + x == x + Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + x**2 == x**2 + Order(x, (x, oo))
assert Order(1/x, (x, oo)) + 1/x**2 == 1/x**2 + Order(1/x, (x, oo)) == Order(1/x, (x, oo))
assert Order(x, (x, oo)) + exp(1/x) == exp(1/x) + Order(x, (x, oo))
assert Order(x, (x, oo))**2 == Order(x**2, (x, oo))
assert Order(x, (x, oo)) + Order(x**2, (x, oo)) == Order(x**2, (x, oo))
assert Order(x, (x, oo)) + Order(x**-2, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + Order(1/x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) - Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + Order(1, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + Order(x**2, (x, oo)) == Order(x**2, (x, oo))
assert Order(1/x, (x, oo)) + Order(1, (x, oo)) == Order(1, (x, oo))
assert Order(x, (x, oo)) + Order(exp(1/x), (x, oo)) == Order(x, (x, oo))
assert Order(x**3, (x, oo)) + Order(exp(2/x), (x, oo)) == Order(x**3, (x, oo))
assert Order(x**-3, (x, oo)) + Order(exp(2/x), (x, oo)) == Order(exp(2/x), (x, oo))
# issue 7207
assert Order(exp(x), (x, oo)).expr == Order(2*exp(x), (x, oo)).expr == exp(x)
assert Order(y**x, (x, oo)).expr == Order(2*y**x, (x, oo)).expr == exp(log(y)*x)
def test_mixing_order_at_zero_and_infinity():
assert (Order(x, (x, 0)) + Order(x, (x, oo))).is_Add
assert Order(x, (x, 0)) + Order(x, (x, oo)) == Order(x, (x, oo)) + Order(x, (x, 0))
assert Order(Order(x, (x, oo))) == Order(x, (x, oo))
# not supported (yet)
raises(NotImplementedError, lambda: Order(x, (x, 0))*Order(x, (x, oo)))
raises(NotImplementedError, lambda: Order(x, (x, oo))*Order(x, (x, 0)))
raises(NotImplementedError, lambda: Order(Order(x, (x, oo)), y))
raises(NotImplementedError, lambda: Order(Order(x), (x, oo)))
def test_order_at_some_point():
assert Order(x, (x, 1)) == Order(1, (x, 1))
assert Order(2*x - 2, (x, 1)) == Order(x - 1, (x, 1))
assert Order(-x + 1, (x, 1)) == Order(x - 1, (x, 1))
assert Order(x - 1, (x, 1))**2 == Order((x - 1)**2, (x, 1))
assert Order(x - 2, (x, 2)) - O(x - 2, (x, 2)) == Order(x - 2, (x, 2))
def test_order_subs_limits():
# issue 3333
assert (1 + Order(x)).subs(x, 1/x) == 1 + Order(1/x, (x, oo))
assert (1 + Order(x)).limit(x, 0) == 1
# issue 5769
assert ((x + Order(x**2))/x).limit(x, 0) == 1
assert Order(x**2).subs(x, y - 1) == Order((y - 1)**2, (y, 1))
assert Order(10*x**2, (x, 2)).subs(x, y - 1) == Order(1, (y, 3))
def test_issue_9192():
assert O(1)*O(1) == O(1)
assert O(1)**O(1) == O(1)
def test_performance_of_adding_order():
l = list(x**i for i in range(1000))
l.append(O(x**1001))
assert Add(*l).subs(x,1) == O(1)
|
|
# Copyright (C) 2018 and later: Unicode, Inc. and others.
# License & terms of use: http://www.unicode.org/copyright.html
# Python 2/3 Compatibility (ICU-20299)
# TODO(ICU-20301): Remove this.
from __future__ import print_function
from . import *
from .. import *
from .. import utils
from ..request_types import *
def get_gnumake_rules(build_dirs, requests, makefile_vars, **kwargs):
makefile_string = ""
# Common Variables
common_vars = kwargs["common_vars"]
for key, value in sorted(makefile_vars.items()):
makefile_string += "{KEY} = {VALUE}\n".format(
KEY = key,
VALUE = value
)
makefile_string += "\n"
# Directories
dirs_timestamp_file = "{TMP_DIR}/dirs.timestamp".format(**common_vars)
makefile_string += "DIRS = {TIMESTAMP_FILE}\n\n".format(
TIMESTAMP_FILE = dirs_timestamp_file
)
makefile_string += "{TIMESTAMP_FILE}:\n\t$(MKINSTALLDIRS) {ALL_DIRS}\n\techo timestamp > {TIMESTAMP_FILE}\n\n".format(
TIMESTAMP_FILE = dirs_timestamp_file,
ALL_DIRS = " ".join(build_dirs).format(**common_vars)
)
# Generate Rules
make_rules = []
for request in requests:
make_rules += get_gnumake_rules_helper(request, **kwargs)
# Main Commands
for rule in make_rules:
if isinstance(rule, MakeFilesVar):
makefile_string += "{NAME} = {FILE_LIST}\n\n".format(
NAME = rule.name,
FILE_LIST = files_to_makefile(rule.files, wrap = True, **kwargs),
)
continue
if isinstance(rule, MakeStringVar):
makefile_string += "define {NAME}\n{CONTENT}\nendef\nexport {NAME}\n\n".format(
NAME = rule.name,
CONTENT = rule.content
)
continue
assert isinstance(rule, MakeRule)
header_line = "{OUT_FILE}: {DEP_FILES} {DEP_LITERALS} | $(DIRS)".format(
OUT_FILE = files_to_makefile([rule.output_file], **kwargs),
DEP_FILES = files_to_makefile(rule.dep_files, wrap = True, **kwargs),
DEP_LITERALS = " ".join(rule.dep_literals)
)
if len(rule.cmds) == 0:
makefile_string += "%s\n\n" % header_line
continue
makefile_string += "{HEADER_LINE}\n{RULE_LINES}\n\n".format(
HEADER_LINE = header_line,
RULE_LINES = "\n".join("\t%s" % cmd for cmd in rule.cmds)
)
return makefile_string
def files_to_makefile(files, common_vars, wrap = False, **kwargs):
if len(files) == 0:
return ""
dirnames = [utils.dir_for(file).format(**common_vars) for file in files]
join_str = " \\\n\t\t" if wrap and len(files) > 2 else " "
if len(files) == 1:
return "%s/%s" % (dirnames[0], files[0].filename)
elif len(set(dirnames)) == 1:
return "$(addprefix %s/,%s)" % (dirnames[0], join_str.join(file.filename for file in files))
else:
return join_str.join("%s/%s" % (d, f.filename) for d,f in zip(dirnames, files))
def get_gnumake_rules_helper(request, common_vars, **kwargs):
if isinstance(request, PrintFileRequest):
var_name = "%s_CONTENT" % request.name.upper()
return [
MakeStringVar(
name = var_name,
content = request.content
),
MakeRule(
name = request.name,
dep_literals = [],
dep_files = [],
output_file = request.output_file,
cmds = [
"echo \"$${VAR_NAME}\" > {MAKEFILENAME}".format(
VAR_NAME = var_name,
MAKEFILENAME = files_to_makefile([request.output_file], common_vars),
**common_vars
)
]
)
]
if isinstance(request, CopyRequest):
return [
MakeRule(
name = request.name,
dep_literals = [],
dep_files = [request.input_file],
output_file = request.output_file,
cmds = ["cp %s %s" % (
files_to_makefile([request.input_file], common_vars),
files_to_makefile([request.output_file], common_vars))
]
)
]
if isinstance(request, VariableRequest):
return [
MakeFilesVar(
name = request.name.upper(),
files = request.input_files
)
]
if request.tool.name == "make":
cmd_template = "$(MAKE) {ARGS}"
elif request.tool.name == "gentest":
cmd_template = "$(INVOKE) $(GENTEST) {ARGS}"
else:
assert isinstance(request.tool, IcuTool)
cmd_template = "$(INVOKE) $(TOOLBINDIR)/{TOOL} {{ARGS}}".format(
TOOL = request.tool.name
)
if isinstance(request, SingleExecutionRequest):
cmd = utils.format_single_request_command(request, cmd_template, common_vars)
dep_files = request.all_input_files()
if len(request.output_files) > 1:
# Special case for multiple output files: Makefile rules should have only one
# output file apiece. More information:
# https://www.gnu.org/software/automake/manual/html_node/Multiple-Outputs.html
timestamp_var_name = "%s_ALL" % request.name.upper()
timestamp_file = TmpFile("%s.timestamp" % request.name)
rules = [
MakeFilesVar(
name = timestamp_var_name,
files = [timestamp_file]
),
MakeRule(
name = "%s_all" % request.name,
dep_literals = [],
dep_files = dep_files,
output_file = timestamp_file,
cmds = [
cmd,
"echo timestamp > {MAKEFILENAME}".format(
MAKEFILENAME = files_to_makefile([timestamp_file], common_vars)
)
]
)
]
for i, file in enumerate(request.output_files):
rules += [
MakeRule(
name = "%s_%d" % (request.name, i),
dep_literals = ["$(%s)" % timestamp_var_name],
dep_files = [],
output_file = file,
cmds = []
)
]
return rules
elif len(dep_files) > 5:
# For nicer printing, for long input lists, use a helper variable.
dep_var_name = "%s_DEPS" % request.name.upper()
return [
MakeFilesVar(
name = dep_var_name,
files = dep_files
),
MakeRule(
name = request.name,
dep_literals = ["$(%s)" % dep_var_name],
dep_files = [],
output_file = request.output_files[0],
cmds = [cmd]
)
]
else:
return [
MakeRule(
name = request.name,
dep_literals = [],
dep_files = dep_files,
output_file = request.output_files[0],
cmds = [cmd]
)
]
if isinstance(request, RepeatedExecutionRequest):
rules = []
dep_literals = []
# To keep from repeating the same dep files many times, make a variable.
if len(request.common_dep_files) > 0:
dep_var_name = "%s_DEPS" % request.name.upper()
dep_literals = ["$(%s)" % dep_var_name]
rules += [
MakeFilesVar(
name = dep_var_name,
files = request.common_dep_files
)
]
# Add a rule for each individual file.
for loop_vars in utils.repeated_execution_request_looper(request):
(_, specific_dep_files, input_file, output_file) = loop_vars
name_suffix = input_file[input_file.filename.rfind("/")+1:input_file.filename.rfind(".")]
cmd = utils.format_repeated_request_command(
request,
cmd_template,
loop_vars,
common_vars
)
rules += [
MakeRule(
name = "%s_%s" % (request.name, name_suffix),
dep_literals = dep_literals,
dep_files = specific_dep_files + [input_file],
output_file = output_file,
cmds = [cmd]
)
]
return rules
assert False
|
|
class CodeConstructor:
"""Contains lists of variables and expressions to be written as code.
`CodeConstructor` objects contain:
1) An ordered list of atoms for the code to use
2) A PNCollection of PNSymbol objects
3) A PNCollection of expressions to be calculated
Once the `CodeConstructor` is initialized with these objects, it
can be used to construct various types of code. For example, the
`CppDeclarations` method will output a list of declarations of the
atoms. Similar methods are available for function input
arguments, class initializer lists, and the final evaluations
needed to calculate the input `Expressions`.
Support for other languages or constructions can be added by
adding more method functions to this class.
Note that it is generally necessary to obey a strict ordering for
defining variables. The functions of this class assume that the
ordering in which the variables were defined in python should
remain the same in the output code. Because of the structure of
the `PNCollection` objects, it should be hard to define the
variables out of order, so this should not require anything from
the user. (That is why `PNCollection` is a subclass of the basic
`OrderedDictionary` object.) However, if new functions are added
here, they must obey that ordering.
"""
def __init__(self, Variables, Expressions):
AtomSet = set([])
self.Variables = Variables
self.Expressions = Expressions
for Expression in self.Expressions:
try:
AtomSet.update(Expression.substitution_atoms)
except TypeError:
pass
LastAtomsLength = 0
while(len(AtomSet) != LastAtomsLength):
LastAtomsLength = len(AtomSet)
for Atom in list(AtomSet):
if (Atom.substitution_atoms):
AtomSet.update(Atom.substitution_atoms)
self.Atoms = []
for sym in self.Variables:
if sym in AtomSet:
self.Atoms.append(sym)
@staticmethod
def const(e):
if e.constant:
return 'const '
else:
return ''
@staticmethod
def dtype(e):
if e.datatype:
return e.datatype
else:
return 'double'
def AddDependencies(self, Expressions):
AtomSet = set([])
for Expression in Expressions:
if (Expression.substitution_atoms):
AtomSet.update(Expression.substitution_atoms)
LastAtomsLength = 0
while(len(AtomSet) != LastAtomsLength):
LastAtomsLength = len(AtomSet)
for Atom in list(AtomSet):
if (Atom.substitution_atoms):
AtomSet.update(Atom.substitution_atoms)
OldAtoms = self.Atoms[:]
self.Atoms = []
for sym in self.Variables:
if sym in AtomSet or sym in OldAtoms:
self.Atoms.append(sym)
# for Expression in Expressions:
# try:
# for sym in Expression.Variables:
# if sym in AtomSet or sym in OldAtoms:
# self.Atoms.append(sym)
# except:
# pass
def CppDeclarations(self, Indent=4):
"""Create declaration statements for C++
For example, if the `Variables` object contains atoms m1, m2,
t, and x referred to in the `Expressions` object, where m1 and
m2 are constant, and t and x are variables, the declaration
list should be
const double m1, m2;
double t, x;
The code knows which atoms need to be declared at the
beginning, and which ones should be `const`, for example. For
C++, the default datatype is `double`; if the atom was created
with a different datatype, that will be used appropriately.
"""
from textwrap import TextWrapper
wrapper = TextWrapper(width=120)
wrapper.initial_indent = ''
wrapper.subsequent_indent = ''
datatype = ''
Declarations = ''
Names = []
for atom in self.Atoms:
thisdatatype = CodeConstructor.const(atom) + CodeConstructor.dtype(atom) + ' '
if thisdatatype != datatype:
if Names:
Declarations += wrapper.fill(', '.join(Names)) + ";\n"
Names = []
datatype = thisdatatype
wrapper.initial_indent = ' '*Indent + thisdatatype
wrapper.subsequent_indent = ' '*len(wrapper.initial_indent)
Names.append(self.Variables[atom])
if Names:
Declarations += wrapper.fill(', '.join(Names)) + ";\n"
return Declarations.rstrip()
def CppInputArguments(self, Indent=12):
"""Create basic input arguments for C++
The fundamental variables are listed, along with their data
types and `const` if the variable is constant. This would be
an appropriate string to represent the input arguments for a
function or class constructor to calculate the `Expressions`
of this CodeConstructor object.
For example, if the `Variables` object contains atoms m1, m2,
t, and x referred to in the `Expressions` object, where m1 and
m2 are constant, and t and x are variables, the input argument
list should be
const double m1_i, const double m2_i, double t_i, double x_i
"""
from textwrap import TextWrapper
wrapper = TextWrapper(width=120)
wrapper.initial_indent = ' '*Indent
wrapper.subsequent_indent = wrapper.initial_indent
InputArguments = ['const {0} {1}_i'.format(self.dtype(atom), self.Variables[atom])
for atom in self.Atoms if atom.fundamental]
return wrapper.fill(', '.join(InputArguments)).lstrip()
def CppInitializations(self, Indent=4):
"""Create initialization list for C++
For example, if the `Variables` object contains atoms m1, m2,
t, and x referred to in the `Expressions` object, where m1 and
m2 are constant, and t and x are variables, the initialization
list should be
m1(m1_i), m2(m2_i), t(t_i), x(x_i)
The quantities m1_i, etc., appear in the input-argument list
output by the method `CppInputArguments`.
"""
from textwrap import TextWrapper
wrapper = TextWrapper(width=120)
wrapper.initial_indent = ' '*Indent
wrapper.subsequent_indent = wrapper.initial_indent
def Initialization(atom):
if atom.datatype and (atom.datatype=='std::vector<double>' or atom.datatype=='std::vector<std::complex<double> >'):
return '{0}({1})'.format(self.Variables[atom], len(atom.substitution))
if atom.fundamental:
return '{0}({0}_i)'.format(self.Variables[atom])
else:
return '{0}({1})'.format(self.Variables[atom], atom.ccode())
Initializations = [Initialization(atom) for atom in self.Atoms]
return wrapper.fill(', '.join(Initializations))
def CppEvaluations(self, Indent=4):
"""Evaluate all derived variables in C++
This function uses the `substitution` expressions for the
derived variables. This output is appropriate for updating
the values of the variables at each step of an integration,
for example.
"""
from textwrap import TextWrapper
wrapper = TextWrapper(width=120)
wrapper.initial_indent = ' '*Indent
wrapper.subsequent_indent = wrapper.initial_indent + ' '
def Evaluation(atom):
def Ccode(a) :
try:
return a.ccode()
except :
from sympy.printing import ccode
return ccode(a)
if atom.datatype and (atom.datatype=='std::vector<double>' or atom.datatype=='std::vector<std::complex<double> >') :
return '\n'.join([wrapper.fill('{0}[{1}] = {2};'.format(self.Variables[atom], i, Ccode(atom.substitution[i])))
for i in range(len(atom.substitution))])
else:
return wrapper.fill('{0} = {1};'.format(self.Variables[atom], atom.ccode()))
return '\n'.join([Evaluation(atom) for atom in self.Atoms if not atom.fundamental and not atom.constant])
def CppEvaluateExpressions(self, Indent=4, Expressions=None):
"""Declare and define the `Expressions` for C++
The output of this function declares are defines the
`Expressions` as individual variables. An optional dictionary
of expressions allows just a subset of this object's
expressions to be output; if this argument is not present, all
will be output.
"""
from textwrap import TextWrapper
wrapper = TextWrapper(width=120)
wrapper.initial_indent = ' '*Indent
wrapper.subsequent_indent = wrapper.initial_indent+' '
Evaluations = []
if not Expressions:
Expressions=self.Expressions
for Expression in Expressions:
try:
Evaluations.append(wrapper.fill('{0}{1} {2} = {3};'.format(self.const(Expression), self.dtype(Expression),
Expressions[Expression], Expression.ccode())))
except TypeError:
pass
return '\n'.join(Evaluations)
def CppExpressionsAsFunctions(self, Indent=4, Expressions=None):
"""Define functions to calculate the `Expressions` in C++
The output of this function gives C++ functions to calculate
the `Expressions`, assuming the functions are member methods
in a class, and so can access the atoms of the expression
without explicit arguments. An optional dictionary of
expressions allows just a subset of this object's expressions
to be output; if this argument is not present, all will be
output.
"""
def dtype(e):
if e.datatype:
return e.datatype
else:
return 'double'
from textwrap import TextWrapper
from PNObjects import PNCollection
wrapper = TextWrapper(width=120)
wrapper.initial_indent = ' '*Indent + ' return '
wrapper.subsequent_indent = ' '*Indent + ' '
Evaluations = []
if not Expressions:
Expressions=self.Expressions
for Expression in Expressions:
ExprColl = PNCollection()
for atom in Expression.substitution_atoms:
if atom not in self.Variables:
try:
ExprColl.AddDerivedVariable(str(atom), atom.substitution,
substitution_atoms=atom.substitution_atoms,
datatype=atom.datatype)
except TypeError:
pass
MiniConstructor = CodeConstructor(self.Variables, ExprColl)
Evaluations.append(
' '*Indent + dtype(Expression) + ' ' + Expressions[Expression] + '() {\n'
+ MiniConstructor.CppEvaluateExpressions(Indent+2) + '\n'
+ wrapper.fill(Expression.ccode())
+ ';\n' + ' '*Indent + '}'
)
return '\n'.join(Evaluations)
|
|
from abc import ABCMeta
from abonapp.models import Abon
from accounts_app.models import UserProfile
from django.conf import settings
from django.shortcuts import resolve_url
from django.test import TestCase, override_settings
from group_app.models import Group
from gw_app.models import NASModel
from gw_app.nas_managers import MikrotikTransmitter
class MyBaseTestCase(metaclass=ABCMeta):
def _client_get_check_login(self, url):
"""
Checks if url is protected from unauthorized access
:param url:
:return: authorized response
"""
r = self.client.get(url)
self.assertRedirects(
r, "%s?next=%s" % (
getattr(settings, 'LOGIN_URL'), url
)
)
self.client.force_login(self.adminuser)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
return r
def setUp(self):
grp = Group.objects.create(title='Grp1')
a1 = Abon.objects.create_user(
telephone='+79781234567',
username='abon',
password='passw1'
)
a1.group = grp
a1.save(update_fields=('group',))
my_admin = UserProfile.objects.create_superuser(
'+79781234567',
'local_superuser', 'ps'
)
self.adminuser = my_admin
self.abon = a1
self.group = grp
class NASModelTestCase(MyBaseTestCase, TestCase):
def setUp(self):
super(NASModelTestCase, self).setUp()
nas = NASModel.objects.create(
title='Title',
ip_address='192.168.8.12',
ip_port=123,
auth_login='admin',
auth_passw='admin',
default=True,
nas_type='mktk'
)
self.nas = nas
@override_settings(LANGUAGE_CODE='en', LANGUAGES=(('en', 'English'),))
def test_create(self):
url = resolve_url('gw_app:add')
self._client_get_check_login(url)
# test success create nas
r = self.client.post(url, data={
'title': 'Test success nas',
'ip_address': '192.168.8.10',
'ip_port': 1254,
'auth_login': '_',
'auth_passw': '_',
'nas_type': 'mktk'
})
self.assertEqual(r.status_code, 302)
msg = r.cookies.get('messages')
self.assertIn('New NAS has been created', msg.output())
NASModel.objects.get(
title='Test success nas',
ip_address='192.168.8.10',
ip_port=1254, auth_login='_',
auth_passw='_'
)
# test error ip_port big range
r = self.client.post(url, data={
'title': 'New nas',
'ip_address': '192.168.8.13',
'ip_port': 8755877855798,
'auth_login': '_',
'auth_passw': '_'
})
self.assertEqual(r.status_code, 200)
self.assertFormError(
response=r, form='form', field='ip_port',
errors='Ensure this value is less than or equal to %(limit_value)s.' % {
'limit_value': 32767
})
# test get request
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
# test error duplicates title
r = self.client.post(url, data={
'title': 'Test success nas',
'ip_address': '192.168.8.14',
'ip_port': 2543,
'auth_login': '_w',
'auth_passw': '_v'
})
self.assertEqual(r.status_code, 200)
self.assertFormError(
response=r, form='form', field='title',
errors='%(model_name)s with this %(field_label)s already exists.' % {
'model_name': NASModel._meta.verbose_name,
'field_label': NASModel._meta.get_field('title').verbose_name
}
)
# test error duplicates default
r = self.client.post(url, data={
'title': 'New again nas',
'ip_address': '192.168.8.15',
'ip_port': 9873,
'auth_login': '_w',
'auth_passw': '_v',
'default': True
})
self.assertEqual(r.status_code, 200)
self.assertFormError(
response=r, form='form', field='default',
errors='Can be only one default gateway'
)
# test error duplicates ip_address
r = self.client.post(url, data={
'title': 'New again nas2',
'ip_address': '192.168.8.10',
'ip_port': 1254,
'auth_login': '_w',
'auth_passw': '_v'
})
self.assertEqual(r.status_code, 200)
self.assertFormError(
response=r, form='form', field='ip_address',
errors='%(model_name)s with this %(field_label)s already exists.' % {
'model_name': NASModel._meta.verbose_name,
'field_label': NASModel._meta.get_field(
'ip_address'
).verbose_name
})
@override_settings(LANGUAGE_CODE='en', LANGUAGES=(('en', 'English'),))
def test_change(self):
url = resolve_url('gw_app:edit', self.nas.pk)
self._client_get_check_login(url)
# test get request
self.client.get(url)
# test success change
r = self.client.post(url, data={
'title': 'New again nas2 changed',
'ip_address': '192.168.8.12',
'ip_port': 7865,
'auth_login': '_w_c',
'auth_passw': '_v_c',
'nas_type': 'mktk'
})
self.assertRedirects(r, resolve_url('gw_app:edit', self.nas.pk))
msg = r.cookies.get('messages')
self.assertIn('Update successfully', msg.output())
NASModel.objects.get(
title='New again nas2 changed', ip_address='192.168.8.12',
ip_port=7865, auth_login='_w_c', auth_passw='_v_c'
)
@override_settings(LANGUAGE_CODE='en', LANGUAGES=(('en', 'English'),))
def test_delete(self):
url = resolve_url('gw_app:add')
self._client_get_check_login(url)
r = self.client.post(url, data={
'title': 'Test success nas_2',
'ip_address': '192.168.8.11',
'ip_port': 1254,
'auth_login': '_',
'auth_passw': '_',
'nas_type': 'mktk'
})
self.assertEqual(r.status_code, 302)
o = NASModel.objects.get(
title='Test success nas_2', ip_address='192.168.8.11',
ip_port=1254, auth_login='_', auth_passw='_'
)
url = resolve_url('gw_app:del', o.pk)
# test get request
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
# test deleting
r = self.client.post(url)
self.assertRedirects(r, resolve_url('gw_app:home'))
msg = r.cookies.get('messages')
self.assertIn('Server successfully removed', msg.output())
try:
NASModel.objects.get(title='Test success nas_2')
raise self.failureException("NAS not removed")
except NASModel.DoesNotExist:
pass
# try to remove default nas
nas_id = self.nas.pk
r = self.client.post(resolve_url('gw_app:del', nas_id))
# self.assertRedirects(
# r, expected_url=resolve_url('gw_app:edit', nas_id)
# )
msg = r.cookies.get('messages')
self.assertIn('You cannot remove default server', msg.output())
def test_get_nas_manager(self):
r = self.nas.get_nas_manager_klass()
self.assertIs(r, MikrotikTransmitter)
r = self.nas.get_nas_manager()
self.assertIsInstance(r, MikrotikTransmitter)
|
|
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for utilities module."""
import json
import os
from googleapiclient import discovery
from googleapiclient import errors
from googleapiclient import http as googleapiclient_http
import httplib2
import mock
import tensorflow as tf
from tensorflow_cloud import version
from tensorflow_cloud.utils import google_api_client
class GoogleApiClientTest(tf.test.TestCase):
def setUp(self):
super(GoogleApiClientTest, self).setUp()
self.addCleanup(mock.patch.stopall)
# Setting wait time to 1 sec to speed up the tests execution.
google_api_client._POLL_INTERVAL_IN_SECONDS = 1
self._project_id = "project-a"
self._job_id = "job_id"
self.mock_discovery_build = mock.patch.object(
discovery, "build", autospec=True
).start()
self.mock_apiclient = mock.Mock()
self.mock_discovery_build.return_value = self.mock_apiclient
self.mock_request = mock.Mock()
self.mock_apiclient.projects().jobs(
).get.return_value = self.mock_request
self.mock_apiclient.projects().jobs(
).cancel.return_value = self.mock_request
self._local_config_path = os.path.join(
self.get_temp_dir(), "config.json")
google_api_client._LOCAL_CONFIG_PATH = self._local_config_path
# TODO(b/177023448) Remove mock on logging.error here and below.
def test_wait_for_aip_training_job_completion_non_blocking_success(self):
self.mock_request.execute.return_value = {
"state": "SUCCEEDED",
}
status = google_api_client.wait_for_aip_training_job_completion(
self._job_id, self._project_id)
self.assertTrue(status)
self.mock_request.execute.assert_called_once()
job_name = "projects/{}/jobs/{}".format(self._project_id, self._job_id)
self.mock_apiclient.projects().jobs().get.assert_called_with(
name=job_name)
def test_wait_for_aip_training_job_completion_non_blocking_cancelled(self):
self.mock_request.execute.return_value = {
"state": "CANCELLED",
}
status = google_api_client.wait_for_aip_training_job_completion(
self._job_id, self._project_id)
self.assertTrue(status)
self.mock_request.execute.assert_called_once()
job_name = "projects/{}/jobs/{}".format(self._project_id, self._job_id)
self.mock_apiclient.projects().jobs().get.assert_called_with(
name=job_name)
def test_wait_for_aip_training_job_completion_non_blocking_failed(self):
self.mock_request.execute.return_value = {
"state": "FAILED", "errorMessage": "test_error_message"}
status = google_api_client.wait_for_aip_training_job_completion(
self._job_id, self._project_id)
self.assertFalse(status)
self.mock_request.execute.assert_called_once()
def test_wait_for_aip_training_job_completion_multiple_checks_success(self):
self.mock_request.execute.side_effect = [
{"state": "PREPARING"},
{"state": "RUNNING"},
{"state": "SUCCEEDED"}
]
status = google_api_client.wait_for_aip_training_job_completion(
self._job_id, self._project_id)
self.assertTrue(status)
self.assertEqual(3, self.mock_request.execute.call_count)
def test_wait_for_aip_training_job_completion_multiple_checks_failed(self):
self.mock_request.execute.side_effect = [
{"state": "PREPARING"},
{"state": "RUNNING"},
{"state": "FAILED", "errorMessage": "test_error_message"}]
status = google_api_client.wait_for_aip_training_job_completion(
self._job_id, self._project_id)
self.assertFalse(status)
self.assertEqual(3, self.mock_request.execute.call_count)
def test_is_aip_training_job_running_with_completed_job(self):
self.mock_request.execute.side_effect = [
{"state": "SUCCEEDED"},
{"state": "CANCELLED"},
{"state": "FAILED", "errorMessage": "test_error_message"}]
succeeded_status = google_api_client.is_aip_training_job_running(
self._job_id, self._project_id)
self.assertFalse(succeeded_status)
job_name = "projects/{}/jobs/{}".format(self._project_id, self._job_id)
self.mock_apiclient.projects().jobs().get.assert_called_with(
name=job_name)
cancelled_status = google_api_client.is_aip_training_job_running(
self._job_id, self._project_id)
self.assertFalse(cancelled_status)
failed_status = google_api_client.is_aip_training_job_running(
self._job_id, self._project_id)
self.assertFalse(failed_status)
self.assertEqual(3, self.mock_request.execute.call_count)
def test_is_aip_training_job_running_with_running_job(self):
self.mock_request.execute.side_effect = [
{"state": "QUEUED"},
{"state": "PREPARING"},
{"state": "RUNNING"},
{"state": "CANCELLING"}]
queued_status = google_api_client.is_aip_training_job_running(
self._job_id, self._project_id)
self.assertTrue(queued_status)
job_name = "projects/{}/jobs/{}".format(self._project_id, self._job_id)
self.mock_apiclient.projects().jobs().get.assert_called_with(
name=job_name)
preparing_status = google_api_client.is_aip_training_job_running(
self._job_id, self._project_id)
self.assertTrue(preparing_status)
running_status = google_api_client.is_aip_training_job_running(
self._job_id, self._project_id)
self.assertTrue(running_status)
canceling_status = google_api_client.is_aip_training_job_running(
self._job_id, self._project_id)
self.assertTrue(canceling_status)
self.assertEqual(4, self.mock_request.execute.call_count)
def test_stop_aip_training_job_with_running_job(self):
self.mock_request.execute.return_value = {}
google_api_client.stop_aip_training_job(self._job_id, self._project_id)
job_name = "projects/{}/jobs/{}".format(self._project_id, self._job_id)
self.mock_apiclient.projects().jobs().cancel.assert_called_with(
name=job_name)
def test_stop_aip_training_job_with_completed_job(self):
self.mock_request.execute.side_effect = errors.HttpError(
httplib2.Response(info={"status": 400}), b""
)
google_api_client.stop_aip_training_job(self._job_id, self._project_id)
job_name = "projects/{}/jobs/{}".format(self._project_id, self._job_id)
self.mock_apiclient.projects().jobs().cancel.assert_called_with(
name=job_name)
def test_stop_aip_training_job_with_failing_request(self):
self.mock_request.execute.side_effect = errors.HttpError(
httplib2.Response(info={"status": 404}), b"")
job_name = "projects/{}/jobs/{}".format(self._project_id, self._job_id)
with self.assertRaises(errors.HttpError):
google_api_client.stop_aip_training_job(
self._job_id, self._project_id)
self.mock_apiclient.projects().jobs().cancel.assert_called_with(
name=job_name)
def test_get_client_environment_name_with_kaggle(self):
os.environ["KAGGLE_CONTAINER_NAME"] = "test_container_name"
self.assertEqual(
google_api_client.get_client_environment_name(),
google_api_client.ClientEnvironment.KAGGLE_NOTEBOOK.name)
def test_get_client_environment_name_with_hosted_notebook(self):
os.environ["DL_PATH"] = "test_dl_path"
os.environ["USER"] = "jupyter"
self.assertEqual(
google_api_client.get_client_environment_name(),
google_api_client.ClientEnvironment.HOSTED_NOTEBOOK.name)
def test_get_client_environment_name_with_hosted_dlvm(self):
os.environ["DL_PATH"] = "test_dl_path"
self.assertEqual(
google_api_client.get_client_environment_name(),
google_api_client.ClientEnvironment.DLVM.name)
@mock.patch.object(google_api_client, "_is_module_present", autospec=True)
@mock.patch.object(google_api_client, "_get_env_variable", autospec=True)
def test_get_client_environment_name_with_hosted_unknown(
self, mock_getenv, mock_modules):
mock_getenv.return_value = None
mock_modules.return_value = {}
self.assertEqual(
google_api_client.get_client_environment_name(),
google_api_client.ClientEnvironment.UNKNOWN.name)
@mock.patch.object(google_api_client, "_is_module_present", autospec=True)
@mock.patch.object(google_api_client, "_get_env_variable", autospec=True)
def test_get_client_environment_name_with_hosted_colab(
self, mock_getenv, mock_modules):
mock_getenv.return_value = None
mock_modules.return_value = True
self.assertEqual(
google_api_client.get_client_environment_name(),
google_api_client.ClientEnvironment.COLAB.name)
@mock.patch.object(google_api_client, "_is_module_present", autospec=True)
@mock.patch.object(google_api_client, "_get_env_variable", autospec=True)
def test_get_client_environment_name_with_hosted_dl_container(
self, mock_getenv, mock_modules):
mock_getenv.return_value = None
mock_modules.side_effect = [False, True]
self.assertEqual(
google_api_client.get_client_environment_name(),
google_api_client.ClientEnvironment.DL_CONTAINER.name)
def test_get_or_set_consent_status_rejected(self):
config_data = {}
config_data["telemetry_rejected"] = True
# Create the config path if it does not already exist
os.makedirs(os.path.dirname(self._local_config_path), exist_ok=True)
with open(self._local_config_path, "w") as config_json:
json.dump(config_data, config_json)
self.assertFalse(google_api_client.get_or_set_consent_status())
def test_get_or_set_consent_status_verified(self):
config_data = {}
config_data["notification_version"] = version.__version__
# Create the config path if it does not already exist
os.makedirs(os.path.dirname(self._local_config_path), exist_ok=True)
with open(self._local_config_path, "w") as config_json:
json.dump(config_data, config_json)
self.assertTrue(google_api_client.get_or_set_consent_status())
def test_get_or_set_consent_status_notify_user(self):
if os.path.exists(self._local_config_path):
os.remove(self._local_config_path)
self.assertTrue(google_api_client.get_or_set_consent_status())
with open(self._local_config_path) as config_json:
config_data = json.load(config_json)
self.assertDictContainsSubset(
config_data, {"notification_version": version.__version__})
@mock.patch.object(google_api_client,
"get_or_set_consent_status", autospec=True)
def test_TFCloudHttpRequest_with_rejected_consent(
self, mock_consent_status):
mock_consent_status.return_value = False
http_request = google_api_client.TFCloudHttpRequest(
googleapiclient_http.HttpMockSequence([({"status": "200"}, "{}")]),
object(),
"fake_uri",
)
self.assertIsInstance(http_request, googleapiclient_http.HttpRequest)
self.assertIn("user-agent", http_request.headers)
self.assertDictEqual(
{"user-agent": f"tf-cloud/{version.__version__} ()"},
http_request.headers)
@mock.patch.object(google_api_client,
"get_or_set_consent_status", autospec=True)
@mock.patch.object(google_api_client,
"get_client_environment_name", autospec=True)
def test_TFCloudHttpRequest_with_consent(
self, mock_get_env_name, mock_consent_status):
mock_consent_status.return_value = True
mock_get_env_name.return_value = "TEST_ENV"
google_api_client.TFCloudHttpRequest.set_telemetry_dict({})
http_request = google_api_client.TFCloudHttpRequest(
googleapiclient_http.HttpMockSequence([({"status": "200"}, "{}")]),
object(),
"fake_uri",
)
self.assertIsInstance(http_request, googleapiclient_http.HttpRequest)
self.assertIn("user-agent", http_request.headers)
header_comment = "client_environment:TEST_ENV;"
full_header = f"tf-cloud/{version.__version__} ({header_comment})"
self.assertDictEqual({"user-agent": full_header}, http_request.headers)
@mock.patch.object(google_api_client,
"get_or_set_consent_status", autospec=True)
@mock.patch.object(google_api_client,
"get_client_environment_name", autospec=True)
def test_TFCloudHttpRequest_with_additional_metrics(
self, mock_get_env_name, mock_consent_status):
google_api_client.TFCloudHttpRequest.set_telemetry_dict(
{"TEST_KEY1": "TEST_VALUE1"})
mock_consent_status.return_value = True
mock_get_env_name.return_value = "TEST_ENV"
http_request = google_api_client.TFCloudHttpRequest(
googleapiclient_http.HttpMockSequence([({"status": "200"}, "{}")]),
object(),
"fake_uri",
)
self.assertIsInstance(http_request, googleapiclient_http.HttpRequest)
self.assertIn("user-agent", http_request.headers)
header_comment = "TEST_KEY1:TEST_VALUE1;client_environment:TEST_ENV;"
full_header = f"tf-cloud/{version.__version__} ({header_comment})"
self.assertDictEqual({"user-agent": full_header}, http_request.headers)
# Verify when telemetry dict is refreshed it is used in new http request
google_api_client.TFCloudHttpRequest.set_telemetry_dict(
{"TEST_KEY2": "TEST_VALUE2"})
mock_consent_status.return_value = True
mock_get_env_name.return_value = "TEST_ENV"
http_request = google_api_client.TFCloudHttpRequest(
googleapiclient_http.HttpMockSequence([({"status": "200"}, "{}")]),
object(),
"fake_uri",
)
header_comment = "TEST_KEY2:TEST_VALUE2;client_environment:TEST_ENV;"
full_header = f"tf-cloud/{version.__version__} ({header_comment})"
self.assertDictEqual({"user-agent": full_header}, http_request.headers)
if __name__ == "__main__":
tf.test.main()
|
|
#!/usr/bin/python
import sys, struct, codecs
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
STRING_TABLE = 0x6F80 # FaithX
STRING_TABLE = 0x10b60 # Original
#STRING_TABLE = 0x108e0 + 12 # No idea
STRING_TABLE = 0x10b30 # Mine?
storage_map = {}
FORMS = [
("Exit", 0xC600),
#('Exit', 0xc640),
#('Boot', 0xc6f0),
#('Power', 0xc810),
#('Security',0xd140),
#('Advanced',0xd390),
#('Main', 0x106b0),
#('OEM', 0x10a20)
]
def fguid(s):
a, b, c, d = struct.unpack("<IHH8s", s)
ds = ''.join('%02x'%ord(c) for c in d)
return "%08x-%04x-%04x-%s-%s"%(a,b,c,ds[:4], ds[4:])
def hexdump(s):
return ' '.join('%02x'%ord(c) for c in s)
class HiiPack(object):
def __init__(self, data, offset):
self.offset = offset
#print " Constructing HiiPack at 0x%x" % offset
hdr = data[offset:offset+6]
#print " Has HDRlen: 0x%x DataLen: 0x%x" % (len(hdr), len(data))
#print repr(hdr)
self.length, self.type = struct.unpack("<IH", hdr)
#print " Real Length: 0x%x, type = 0x%x" % (self.length, self.type)
#print " Table end 0x%x" % (offset+len(hdr)+self.length)
self.end_offset = offset + len(hdr) + self.length
assert self.length + len(hdr) < len(data)
self.data = data[offset+6:offset+self.length]
class StringTable(HiiPack):
def __init__(self, data, offset):
#print "Constructing StringTable"
HiiPack.__init__(self, data, offset)
assert self.type == 0x2
self.strings = []
hdr = self.data[:16]
lnoff, plnoff, count, attributes = struct.unpack("<IIII", hdr)
#print type(hdr), len(hdr), len(self.data), count
offsets = struct.unpack("<%dI"%count, self.data[16:16+count*4])
self.name = self._getstring(lnoff)
self.printablename = self._getstring(plnoff)
for i in range(count):
self.strings.append(self._getstring(offsets[i]))
def _getstring(self, off):
return self.data[off-6:].decode('utf-16le').split('\0')[0]
def __getitem__(self, a):
return self.strings.__getitem__(a)
def showinfo(self, ts=''):
print ts+"String table:"
print ts+" Language: %s (%s)"%(self.name, self.printablename)
print ts+" String count: %d"%len(self.strings)
class FormOp(object):
EFI_IFR_FORM_OP = 0x01
EFI_IFR_SUBTITLE_OP = 0x02
EFI_IFR_TEXT_OP = 0x03
EFI_IFR_GRAPHIC_OP = 0x04
EFI_IFR_ONE_OF_OP = 0x05
EFI_IFR_CHECKBOX_OP = 0x06
EFI_IFR_NUMERIC_OP = 0x07
EFI_IFR_PASSWORD_OP = 0x08
EFI_IFR_ONE_OF_OPTION_OP = 0x09 # ONEOF OPTION field
EFI_IFR_SUPPRESS_IF_OP = 0x0A
EFI_IFR_END_FORM_OP = 0x0B
EFI_IFR_HIDDEN_OP = 0x0C
EFI_IFR_END_FORM_SET_OP = 0x0D
EFI_IFR_FORM_SET_OP = 0x0E
EFI_IFR_REF_OP = 0x0F
EFI_IFR_END_ONE_OF_OP = 0x10
EFI_IFR_END_OP = EFI_IFR_END_ONE_OF_OP
EFI_IFR_INCONSISTENT_IF_OP = 0x11
EFI_IFR_EQ_ID_VAL_OP = 0x12
EFI_IFR_EQ_ID_ID_OP = 0x13
EFI_IFR_EQ_ID_LIST_OP = 0x14
EFI_IFR_AND_OP = 0x15
EFI_IFR_OR_OP = 0x16
EFI_IFR_NOT_OP = 0x17
EFI_IFR_END_IF_OP = 0x18 # for endif of
# inconsistentif,
# suppressif, grayoutif
EFI_IFR_GRAYOUT_IF_OP = 0x19
EFI_IFR_DATE_OP = 0x1A
EFI_IFR_TIME_OP = 0x1B
EFI_IFR_STRING_OP = 0x1C
EFI_IFR_LABEL_OP = 0x1D
EFI_IFR_SAVE_DEFAULTS_OP = 0x1E
EFI_IFR_RESTORE_DEFAULTS_OP= 0x1F
EFI_IFR_BANNER_OP = 0x20
EFI_IFR_INVENTORY_OP = 0x21
EFI_IFR_EQ_VAR_VAL_OP = 0x22
EFI_IFR_ORDERED_LIST_OP = 0x23
EFI_IFR_VARSTORE_OP = 0x24
EFI_IFR_VARSTORE_SELECT_OP = 0x25
EFI_IFR_VARSTORE_SELECT_PAIR_OP = 0x26
EFI_IFR_LAST_OPCODE = EFI_IFR_VARSTORE_SELECT_PAIR_OP
EFI_IFR_OEM_OP = 0xFE
EFI_IFR_NV_ACCESS_COMMAND = 0xFF
INDENTS = {
#0 : 0,
#0x73 : 0,
EFI_IFR_FORM_OP : 1,
EFI_IFR_SUBTITLE_OP : 0,
EFI_IFR_TEXT_OP : 0,
EFI_IFR_GRAPHIC_OP : 0,
EFI_IFR_ONE_OF_OP : 1,
EFI_IFR_CHECKBOX_OP : 0,
EFI_IFR_NUMERIC_OP : 0,
EFI_IFR_PASSWORD_OP : 0,
EFI_IFR_ONE_OF_OPTION_OP : 0,
EFI_IFR_SUPPRESS_IF_OP : 1,
EFI_IFR_END_FORM_OP : -1,
EFI_IFR_HIDDEN_OP : 0,
EFI_IFR_END_FORM_SET_OP : -1,
EFI_IFR_FORM_SET_OP : 1,
EFI_IFR_REF_OP : 0,
EFI_IFR_END_OP : -1,
EFI_IFR_INCONSISTENT_IF_OP : 0,
EFI_IFR_EQ_ID_VAL_OP : 0,
EFI_IFR_EQ_ID_ID_OP : 0,
EFI_IFR_EQ_ID_LIST_OP : 0,
EFI_IFR_AND_OP : 0,
EFI_IFR_OR_OP : 0,
EFI_IFR_NOT_OP : 0,
EFI_IFR_END_IF_OP : -1,
EFI_IFR_GRAYOUT_IF_OP : 1,
EFI_IFR_DATE_OP : 0,
EFI_IFR_TIME_OP : 0,
EFI_IFR_STRING_OP : 0,
EFI_IFR_LABEL_OP : 0,
EFI_IFR_SAVE_DEFAULTS_OP : 0,
EFI_IFR_RESTORE_DEFAULTS_OP: 0,
EFI_IFR_BANNER_OP : 0,
EFI_IFR_INVENTORY_OP : 0,
EFI_IFR_EQ_VAR_VAL_OP : 0,
EFI_IFR_ORDERED_LIST_OP : 0,
EFI_IFR_VARSTORE_OP : 0,
EFI_IFR_VARSTORE_SELECT_OP : 0,
EFI_IFR_VARSTORE_SELECT_PAIR_OP : 0,
EFI_IFR_LAST_OPCODE : 0,
EFI_IFR_OEM_OP : 0,
EFI_IFR_NV_ACCESS_COMMAND : 0,
}
def __init__(self, data, stable):
self.stable = stable
self.opcode, self.length = struct.unpack("<BB", data[:2])
self.payload = data[2:self.length]
if self.opcode not in self.INDENTS:
raise RuntimeError("Undefined opcode: 0x%x" % self.opcode)
self.indent = self.INDENTS[self.opcode]
def get_info(self):
guid, fsid, hid, cb, cls, subcls, nvsize = struct.unpack("<16sHHQHHH", self.payload)
return self.stable[fsid]
def showinfo(self, s, ts=''):
if self.opcode == self.EFI_IFR_FORM_OP:
id, title = struct.unpack("<HH", self.payload)
print ts+"Form ID:0x%04x Name:'%s'"%(id, s[title])
elif self.opcode == self.EFI_IFR_SUBTITLE_OP:
print ts+"Subtitle: '%s'"%s[struct.unpack("<H", self.payload)[0]]
elif self.opcode == self.EFI_IFR_TEXT_OP:
if len(self.payload) != 9:
print ts+"BROKEN TEXT OP %r"%self.payload
else:
hid, tid, t2id, flags, key=struct.unpack("<HHHBH", self.payload)
print ts+"Text: '%s','%s' Flags:0x%x Key:0x%x"%(s[tid],s[t2id],flags,key)
if s[hid] and s[hid] != ' ':
print ts+"\Help text: '%s'"%s[hid]
elif self.opcode == self.EFI_IFR_FORM_SET_OP:
guid, fsid, hid, cb, cls, subcls, nvsize = struct.unpack("<16sHHQHHH", self.payload)
print ts+"Form Set '%s' Class %d-%d NvSize 0x%x Callback 0x%x"%(s[fsid],cls, subcls, nvsize, cb)
print ts+"\GUID: %s"%fguid(guid)
if s[hid] and s[hid] != ' ':
print ts+"\Help text: '%s'"%s[hid]
elif self.opcode == self.EFI_IFR_END_FORM_SET_OP:
print ts+"End Form Set"
elif self.opcode == self.EFI_IFR_END_FORM_OP:
print ts+"End Form"
elif self.opcode == self.EFI_IFR_GRAYOUT_IF_OP:
print ts+"Grayout If"
elif self.opcode == self.EFI_IFR_SUPPRESS_IF_OP:
print ts+"Suppress If"
elif self.opcode == self.EFI_IFR_END_IF_OP:
print ts+"End If",hexdump(self.payload)
elif self.opcode == self.EFI_IFR_EQ_ID_VAL_OP:
qid, width, val = struct.unpack("<HBH", self.payload)
print ts+"EQ [0x%x<%d>] == 0x%x"%(qid, width, val)
elif self.opcode == self.EFI_IFR_EQ_ID_ID_OP:
qid, width, qid2, width2, val = struct.unpack("<HBHB", self.payload)
print ts+"EQ [0x%x<%d>] == [0x%x.%d]"%(qid, width, qid2, width2, val)
elif self.opcode == self.EFI_IFR_EQ_ID_LIST_OP:
qid, width, length = struct.unpack("<HBH", self.payload[:5])
l = struct.unpack("<%dH"%length, self.payload[5:])
print ts+"LIST [0x%x<%d>] in (%s)"%(qid, width, ','.join(["0x%x"%i for i in l]))
elif self.opcode == self.EFI_IFR_AND_OP:
print ts+"AND"
elif self.opcode == self.EFI_IFR_OR_OP:
print ts+"OR"
elif self.opcode == self.EFI_IFR_NOT_OP:
print ts+"NOT"
elif self.opcode == self.EFI_IFR_ONE_OF_OP:
qid, width, pid, hid = struct.unpack("<HBHH", self.payload)
storage_map[qid] = s[pid]
print ts+"One Of [0x%x<%d>] '%s'"%(qid, width, s[pid])
if s[hid] and s[hid] != ' ':
print ts+"\Help text: '%s'"%s[hid]
elif self.opcode == self.EFI_IFR_ONE_OF_OPTION_OP:
oid, value, flags, key = struct.unpack("<HHBH", self.payload)
print ts+"Option '%s' = 0x%x Flags 0x%x Key 0x%x"%(s[oid], value, flags, key)
elif self.opcode == self.EFI_IFR_END_ONE_OF_OP:
print ts+"End One Of"
elif self.opcode == self.EFI_IFR_LABEL_OP:
lid = struct.unpack("<H", self.payload)[0]
print ts+"Label ID: 0x%x"%lid
elif self.opcode == self.EFI_IFR_REF_OP:
fid, pid, hid, flags, key = struct.unpack("<HHHBH", self.payload)
print ts+"Reference: '%s' Form ID 0x%x Flags 0x%x Key 0x%x"%(s[pid], fid, flags, key)
if s[hid] and s[hid] != ' ':
print ts+"\Help text: '%s'"%s[hid]
elif self.opcode in (self.EFI_IFR_TIME_OP, self.EFI_IFR_DATE_OP, self.EFI_IFR_NUMERIC_OP):
qid, width, pid, hid, flags, key, min, max, step, default = struct.unpack("<HBHHBHHHHH", self.payload)
t = {self.EFI_IFR_TIME_OP:'Time', self.EFI_IFR_DATE_OP:'Date', self.EFI_IFR_NUMERIC_OP:'Numeric'}[self.opcode]
print ts+"%s: '%s' [0x%x<%d>] %d-%d Step %d Default %d Flags 0x%x"%(t, s[pid], qid, width, min, max, step, default, flags)
if s[hid] and s[hid] != ' ':
print ts+"\Help text: '%s'"%s[hid]
elif self.opcode == self.EFI_IFR_PASSWORD_OP:
qid, width, pid, hid, flags, key, mins, maxs, encoding = struct.unpack("<HBHHBHBBH", self.payload)
storage_map[qid] = s[pid]
print ts+"Password: '%s' [0x%x<%d>] Flags 0x%x Key 0x%x Size %d-%d Encoding %d"%(s[pid], qid, width, flags, key, mins, maxs, encoding)
if s[hid] and s[hid] != ' ':
print ts+"\Help text: '%s'"%s[hid]
else:
print ts+"Opcode 0x%x (%d)"%(self.opcode, self.length),hexdump(self.payload)
class Form(HiiPack):
def __init__(self, data, offset, stable=None):
#print "Constructing form.."
HiiPack.__init__(self, data, offset)
data = self.data
self.opcodes = []
while len(data):
op = FormOp(data, stable)
#print "Created ", len(self.opcodes), op.length
assert op.length
data = data[op.length:]
self.opcodes.append(op)
def fetch_opcodes(self, opcode_wanted):
return filter(lambda x: x.opcode == opcode_wanted, self.opcodes)
def __repr__(self):
formset = self.fetch_opcodes(FormOp.EFI_IFR_FORM_SET_OP)
assert len(formset) == 1
return "<FormSet name=%s offset=0x%x>" % (formset[0].get_info(), self.offset)
return "%s" % formset
def locate_formset(self):
pass
def showinfo(self, stringtable, ts=''):
ind = 0
in_if = False
fstk = []
for op in self.opcodes:
if op.opcode == op.EFI_IFR_FORM_OP:
fstk.append(ind)
if op.indent < 0:
ind += op.indent
ots = ts+' '*ind
if in_if and op.opcode in (op.EFI_IFR_SUPPRESS_IF_OP, op.EFI_IFR_GRAYOUT_IF_OP):
ots = ts+' '*(ind-1)+'+'
try:
op.showinfo(stringtable, ots)
#except:
# print ts+"ERROR DECODING OPCODE 0x%x LEN 0x%x"%(op.opcode, op.length)
finally:
pass
if (not in_if or op.opcode not in (op.EFI_IFR_SUPPRESS_IF_OP, op.EFI_IFR_GRAYOUT_IF_OP) ) and op.indent > 0:
ind += op.indent
if op.opcode in (op.EFI_IFR_SUPPRESS_IF_OP, op.EFI_IFR_GRAYOUT_IF_OP):
in_if = True
elif op.opcode == op.EFI_IFR_END_IF_OP:
in_if = False
if op.opcode == op.EFI_IFR_END_FORM_OP:
xind = fstk.pop()
if xind != ind:
print "WARNING: Indentation mismatch"
ind = xind
#filename = "vt_bios.fd"
#filename = sys.argv[1]
#filename = "../test1/fv-00000010.bin"
#filename = "../test1/fe3542fe-c1d3-4ef8-7c65-8048606ff670-SetupUtility.sec0.sub2.pe"
#pe = open(filename, "rb").read()
def dump_setup(pe):
strings = StringTable(pe, STRING_TABLE)
strings.showinfo()
for fn, off in FORMS[0:]:
print
print "Reading form '%s'"%fn, off
f = Form(pe, off)
#f.showinfo(strings, ' ')
#print "Storage map:"
#for k in sorted(storage_map.keys()):
#print " 0x%x: %s"%(k,storage_map[k])
|
|
# Schema definitions for RNA browser database.
# @author Matthew Norris <matthew.norris@jic.ac.uk
from sqlalchemy import Column, Integer, String, Text, Enum, Float, ForeignKey, ForeignKeyConstraint
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from sqlalchemy.dialects import mysql
import database, settings
# gotta put the migrate stuff here so it can see the models
from app import app
app.config['SQLALCHEMY_DATABASE_URI'] = settings.database_uri
db = SQLAlchemy(app)
# Add a single position to the structure. The value points to another place that this
# particular position pairs with.
def values_str_add(str_in, value):
value = str(value)
if str_in == "":
str_in = value
else:
str_in += "\t"+value
return str_in
# Get positions array by extracting the values_str string
def values_str_unpack_int(str_in):
positions = list(map(float, str_in.split("\t")))
return positions
def values_str_unpack_float(str_in):
out = []
bits = str_in.split("\t")
for bit in bits:
out.append(None if bit == "None" else float(bit))
return out
# A Gene describes a locus identifier for a gene, plus any metadata associated with the locus.
# Genes are generic - they can be associated with multiple strains.
class Gene(db.Model):
__tablename__ = "gene"
id = Column(String(256), primary_key=True) # TAIR locus ID (e.g. AT1G01225)
def __init__(self, id=None):
self.id = id
def __repr__(self):
return "<Gene %r>" % (self.id)
# A Transcript is effectively an RNA sequence identifier, which can be shared amongst multiple strains.
# Sequences are mapped to Transcripts via the Feature entity.
class Transcript(db.Model):
__tablename__ = "transcript"
id = Column(String(256), primary_key=True) # TAIR transcript ID (e.g. AT1G01225.1)
gene_id = Column(String(256), ForeignKey("gene.id"), nullable=False)
def __init__(self, id=None, gene_id=None):
self.id = id
self.gene_id = gene_id
def __repr__(self):
return "<Transcript %r>" % (self.id)
# Retrieve sequences for a transcript, keyed by strain ID.
def get_sequences(self, strain_id=None):
if strain_id != None:
strain_sql = "AND feature.strain_id = :strain_id"
else:
strain_sql = ""
# given the transcript ID, fetch the feature sequences in the correct order.
sql = """
SELECT
feature.strain_id,
feature.direction,
SUBSTR(
chromosome.sequence,
feature.start,
feature.end - feature.start + 1
) seq
FROM chromosome, feature
WHERE
feature.strain_id = chromosome.strain_id AND
feature.chromosome_id = chromosome.chromosome_id AND
feature.type_id = 'exon'
AND feature.transcript_id = :transcript_id
{0}
ORDER BY feature.strain_id, start
"""
sql = sql.format(strain_sql)
sql_params = {"transcript_id": str(self.id)}
if strain_id != None:
sql_params["strain_id"] = strain_id
results = database.db_session.execute(sql, sql_params)
# collect data about the sequences
transcript_seqs = {}
for row in results:
strain_id = row["strain_id"]
# print("Found ["+strain_id+"]")
if strain_id not in transcript_seqs:
transcript_seqs[strain_id] = {}
transcript_seqs[strain_id]["seq"] = Seq("")
# APPEND the feature sequence
transcript_seqs[strain_id]["seq"] += row["seq"]
transcript_seqs[strain_id]["direction"] = row["direction"]
# make collection of SeqRecord objects.
seqs_out = {}
for strain_id in transcript_seqs:
seq = transcript_seqs[strain_id]["seq"]
# if direction is reverse, do reverse complement
if transcript_seqs[strain_id]["direction"] == "reverse":
seq = seq.reverse_complement()
seq = Seq(str(seq).replace("T", "U"))
seqs_out[strain_id] = SeqRecord(seq, id=strain_id, description="")
return seqs_out
# convenience method to fetch a single SeqRecord sequence.
# Sequence is always reverse complemented if it's a backwards gene
def get_sequence(self, strain_id=None):
if strain_id == None:
strain_id = settings.reference_strain_id
vals = list(self.get_sequences(strain_id).values())
if len(vals) > 0:
return vals[0]
else:
return None
def get_sequence_str(self, strain_id=None):
return str(self.get_sequence(strain_id).seq)
# Describes a strain.
class Strain(db.Model):
__tablename__ = "strain"
id = Column(String(256), nullable=False, primary_key=True)
description = Column(Text, nullable=False)
def __init__(self, id=None, description=None):
self.id = id
self.description = description
def __repr__(self):
return "<Strain %r>" % (self.id)
# Describes the sequence of a chromosome for a particular strain. This is the only place
# where nucleotide sequence data is stored.
class Chromosome(db.Model):
__tablename__ = "chromosome"
strain_id = Column(String(256), ForeignKey("strain.id"), primary_key=True)
chromosome_id = Column(String(256), primary_key=True)
sequence = Column(Text(4294967295), nullable=False)
def __init__(self, strain_id=None, chromosome_id=None, sequence=None):
self.strain_id = strain_id
self.chromosome_id = chromosome_id
self.sequence = sequence
def __repr__(self):
return "<Chromosome "+self.strain_id+", "+self.chromosome_id+" >"
# TranscriptSequenceFeatures annotations of the ChromosomeSequence. This is the main destination of
# all the *.gff3 data.
class Feature(db.Model):
__tablename__ = "feature"
# This constraint maps the Feature to a unique Chromosome entry.
__table_args__ = (
ForeignKeyConstraint(
["strain_id", "chromosome_id"],
["chromosome.strain_id", "chromosome.chromosome_id"]
),
)
# A unique identifier for this feature.
id = Column(Integer, primary_key=True)
# Transcript identifier - this is a string
transcript_id = Column(String(256), ForeignKey("transcript.id"), nullable=False)
# What kind of annotation is this? Maybe have a foreign key pointing to a special meta table?
# Or use an enum
type_id = Column(String(256), nullable=False)
# These properties describe where we can find the associated sequence.
strain_id = Column(String(256), ForeignKey("strain.id"), nullable=False)
chromosome_id = Column(String(256), nullable=False)
start = Column(Integer, nullable=False)
end = Column(Integer, nullable=False)
direction = Column(Enum("forward", "reverse"), nullable=False)
def __init__(self, transcript_id=None, type_id=None, strain_id=None, chromosome_id=None, start=None, end=None, direction=None):
self.transcript_id = transcript_id
self.type_id = type_id
self.strain_id = strain_id
self.chromosome_id = chromosome_id
self.start = start
self.end = end
self.direction = direction
def __repr__(self):
return "<Feature %r>" % (self.id)
# GeneLocation describes the location of a gene for a particular strain. This table is redundant
# since everything needed is already in the Feature table. But it is cached here for speed.
class GeneLocation(db.Model):
__tablename__ = "gene_location"
# This constraint maps the GeneLocation to a unique Chromosome entry.
__table_args__ = (
ForeignKeyConstraint(
["strain_id", "chromosome_id"],
["chromosome.strain_id", "chromosome.chromosome_id"]
),
)
gene_id = Column(String(256), ForeignKey("gene.id"), nullable=False, primary_key=True)
strain_id = Column(String(256), ForeignKey("strain.id"), nullable=False, primary_key=True)
chromosome_id = Column(String(256), nullable=False)
start = Column(Integer, nullable=False)
end = Column(Integer, nullable=False)
direction = Column(Enum("forward", "reverse"), nullable=False)
def __init__(self, gene_id=None, strain_id=None, chromosome_id=None, start=None, end=None, direction=None):
self.gene_id = gene_id
self.strain_id = strain_id
self.chromosome_id = chromosome_id
self.start = start
self.end = end
self.direction = direction
def __repr__(self):
return "<GeneLocation "+self.gene_id+", "+self.strain_id+">";
class NucleotideMeasurementRun(db.Model):
__tablename__ = "nucleotide_measurement_run"
id = Column(Integer, primary_key=True, autoincrement=False)
strain_id = Column(String(256), ForeignKey("strain.id"), primary_key=False)
description = Column(Text, nullable=False)
def __init__(self, id, strain_id, description):
self.id = id
self.strain_id = strain_id
self.description = description
def __repr__(self):
return "<NucleotideMeasurementRun %r>" % (self.id)
class StructurePredictionRun(db.Model):
__tablename__ = "structure_prediction_run"
id = Column(Integer, primary_key=True, autoincrement=False)
strain_id = Column(String(256), ForeignKey("strain.id"), primary_key=False)
description = Column(Text, nullable=False)
def __init__(self, id, strain_id, description):
self.id = id
self.strain_id = strain_id
self.description = description
def __repr__(self):
return "<StructurePredictionRun %r>" % (self.id)
# Represents plus and minus counts for calculating reactivities. Before normalisation.
# Not actually reactivities, these are counts
# TODO rename to counts
class RawReactivities(db.Model):
__tablename__ = "raw_reactivities"
id = Column(Integer, primary_key=True, autoincrement=True)
nucleotide_measurement_run_id = Column(Integer, ForeignKey("nucleotide_measurement_run.id"))
transcript_id = Column(String(256), ForeignKey("transcript.id"))
minus_values = Column(Text, nullable=False)
plus_values = Column(Text, nullable=False)
def __init__(
self,
nucleotide_measurement_run_id,
transcript_id,
minus_values,
plus_values):
self.nucleotide_measurement_run_id = nucleotide_measurement_run_id
self.transcript_id = transcript_id
self.minus_values = minus_values
self.plus_values = plus_values
def __repr__(self):
return "<RawReactivities %r>" % (self.id)
# Table that represents raw counts from each lane
# Lanes are identified by biological and technical replicate IDs
class RawReplicateCounts(db.Model):
__tablename__ = "raw_replicate_counts"
id = Column(Integer, primary_key=True, autoincrement=True)
nucleotide_measurement_run_id = Column(Integer, ForeignKey("nucleotide_measurement_run.id"))
transcript_id = Column(String(256), ForeignKey("transcript.id"))
minusplus_id = Column(String(256), nullable=False)
bio_replicate_id = Column(Integer, nullable=False)
tech_replicate_id = Column(Integer, nullable=False)
values = Column(Text, nullable=False)
def __init__(
self,
nucleotide_measurement_run_id,
transcript_id,
minusplus_id,
bio_replicate_id,
tech_replicate_id,
values):
self.nucleotide_measurement_run_id = nucleotide_measurement_run_id
self.transcript_id = transcript_id
self.minusplus_id = minusplus_id
self.bio_replicate_id = bio_replicate_id
self.tech_replicate_id = tech_replicate_id
self.values = values
def __repr__(self):
return "<RawReactivities %r>" % (self.id)
# Represents nucleotide specific measurements for a single transcript
# Generated from mappping
# Can represent normalised reactivities or alternatively ribosome profiling counts.
class NucleotideMeasurementSet(db.Model):
__tablename__ = "nucleotide_measurement_set"
id = Column(Integer, primary_key=True, autoincrement=True)
nucleotide_measurement_run_id = Column(Integer, ForeignKey("nucleotide_measurement_run.id"))
transcript_id = Column(String(256), ForeignKey("transcript.id"))
# Average number of mappings per base before any normalisation is applied
coverage = Column(Float, nullable=False)
values = Column(Text, nullable=False)
def __init__(
self,
nucleotide_measurement_run_id,
transcript_id,
coverage,
values):
self.nucleotide_measurement_run_id = nucleotide_measurement_run_id
self.transcript_id = transcript_id
self.coverage = coverage
self.values = values
def __repr__(self):
return "<NucleotideMeasurementSet %r>" % (self.id)
# Represents a structure prediction for a single RNA sequence
# The structure has base pairs and base pair probabilities stored in text fields
class Structure(db.Model):
__tablename__ = "structure"
id = Column(Integer, primary_key=True, autoincrement=True)
structure_prediction_run_id = Column(Integer, ForeignKey("structure_prediction_run.id"), nullable=False)
transcript_id = Column(String(256), ForeignKey("transcript.id"), nullable=False)
energy = Column(Float, nullable=False)
pc1 = Column(Float, nullable=False, default=0)
pc2 = Column(Float, nullable=False, default=0)
structure = Column(Text, nullable=False)
bpps = Column(Text, nullable=True)
def __init__(self, structure_prediction_run_id, transcript_id, energy, structure="", pc1=0, pc2=0):
self.structure_prediction_run_id = structure_prediction_run_id
self.transcript_id = transcript_id
self.energy = energy
self.pc1 = pc1
self.pc2 = pc2
self.structure = structure
def add_value(self, value):
self.structure = values_str_add(self.structure, value)
def get_values(self):
return values_str_unpack_int(self.structure)
def get_bpp_values(self):
if self.bpps == None:
return None
values = self.bpps.split("\t")
out = []
for value in values:
if value == "NA":
out.append(None)
else:
out.append(float(value))
return out
def __repr__(self):
return "<Structure %r>" % (self.id)
# Represents a base pair probability matrix. One per transcript at the moment
# In the future we might allow constrained BPPMs, which will be one per run ID or something
# Bppm is stored as a big text field
# This table is used when downloading the entire BPPM matrix as text
class Bppm(db.Model):
__tablename__ = "bppm"
id = Column(Integer, primary_key=True, autoincrement=True)
transcript_id = Column(String(256), ForeignKey("transcript.id"), nullable=False)
data = Column(mysql.LONGTEXT, nullable=False)
def __init__(self, transcript_id, data):
self.transcript_id = transcript_id
self.data = data # probabilities are log10 transformed
def __repr__(self):
return "<Bppm %r>" % (self.id)
# gotta put the migrate stuff here so it can see the models
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
|
|
# coding=utf-8
#
# Copyright 2014 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver wrapping the Ironic API, such that Nova may provision
bare metal resources.
"""
import base64
import gzip
import shutil
import tempfile
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import importutils
import six
from nova.api.metadata import base as instance_metadata
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import objects
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.ironic import client_wrapper
from nova.virt.ironic import ironic_states
from nova.virt.ironic import patcher
ironic = None
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
_POWER_STATE_MAP = {
ironic_states.POWER_ON: power_state.RUNNING,
ironic_states.NOSTATE: power_state.NOSTATE,
ironic_states.POWER_OFF: power_state.SHUTDOWN,
}
_UNPROVISION_STATES = (ironic_states.ACTIVE, ironic_states.DEPLOYFAIL,
ironic_states.ERROR, ironic_states.DEPLOYWAIT,
ironic_states.DEPLOYING)
_NODE_FIELDS = ('uuid', 'power_state', 'target_power_state', 'provision_state',
'target_provision_state', 'last_error', 'maintenance',
'properties', 'instance_uuid')
def map_power_state(state):
try:
return _POWER_STATE_MAP[state]
except KeyError:
LOG.warning(_LW("Power state %s not found."), state)
return power_state.NOSTATE
def _get_nodes_supported_instances(cpu_arch=None):
"""Return supported instances for a node."""
if not cpu_arch:
return []
return [(cpu_arch,
hv_type.BAREMETAL,
vm_mode.HVM)]
def _log_ironic_polling(what, node, instance):
power_state = (None if node.power_state is None else
'"%s"' % node.power_state)
tgt_power_state = (None if node.target_power_state is None else
'"%s"' % node.target_power_state)
prov_state = (None if node.provision_state is None else
'"%s"' % node.provision_state)
tgt_prov_state = (None if node.target_provision_state is None else
'"%s"' % node.target_provision_state)
LOG.debug('Still waiting for ironic node %(node)s to %(what)s: '
'power_state=%(power_state)s, '
'target_power_state=%(tgt_power_state)s, '
'provision_state=%(prov_state)s, '
'target_provision_state=%(tgt_prov_state)s',
dict(what=what,
node=node.uuid,
power_state=power_state,
tgt_power_state=tgt_power_state,
prov_state=prov_state,
tgt_prov_state=tgt_prov_state),
instance=instance)
class IronicDriver(virt_driver.ComputeDriver):
"""Hypervisor driver for Ironic - bare metal provisioning."""
capabilities = {"has_imagecache": False,
"supports_recreate": False,
"supports_migrate_to_same_host": False,
"supports_attach_interface": False
}
def __init__(self, virtapi, read_only=False):
super(IronicDriver, self).__init__(virtapi)
global ironic
if ironic is None:
ironic = importutils.import_module('ironicclient')
# NOTE(deva): work around a lack of symbols in the current version.
if not hasattr(ironic, 'exc'):
ironic.exc = importutils.import_module('ironicclient.exc')
if not hasattr(ironic, 'client'):
ironic.client = importutils.import_module(
'ironicclient.client')
self.firewall_driver = firewall.load_driver(
default='nova.virt.firewall.NoopFirewallDriver')
self.node_cache = {}
self.node_cache_time = 0
self.ironicclient = client_wrapper.IronicClientWrapper()
def _get_node(self, node_uuid):
"""Get a node by its UUID."""
return self.ironicclient.call('node.get', node_uuid,
fields=_NODE_FIELDS)
def _validate_instance_and_node(self, instance):
"""Get the node associated with the instance.
Check with the Ironic service that this instance is associated with a
node, and return the node.
"""
try:
return self.ironicclient.call('node.get_by_instance_uuid',
instance.uuid, fields=_NODE_FIELDS)
except ironic.exc.NotFound:
raise exception.InstanceNotFound(instance_id=instance.uuid)
def _node_resources_unavailable(self, node_obj):
"""Determine whether the node's resources are in an acceptable state.
Determines whether the node's resources should be presented
to Nova for use based on the current power, provision and maintenance
state. This is called after _node_resources_used, so any node that
is not used and not in AVAILABLE should be considered in a 'bad' state,
and unavailable for scheduling. Returns True if unacceptable.
"""
bad_power_states = [
ironic_states.ERROR, ironic_states.NOSTATE]
# keep NOSTATE around for compatibility
good_provision_states = [
ironic_states.AVAILABLE, ironic_states.NOSTATE]
return (node_obj.maintenance or
node_obj.power_state in bad_power_states or
node_obj.provision_state not in good_provision_states or
(node_obj.provision_state in good_provision_states and
node_obj.instance_uuid is not None))
def _node_resources_used(self, node_obj):
"""Determine whether the node's resources are currently used.
Determines whether the node's resources should be considered used
or not. A node is used when it is either in the process of putting
a new instance on the node, has an instance on the node, or is in
the process of cleaning up from a deleted instance. Returns True if
used.
If we report resources as consumed for a node that does not have an
instance on it, the resource tracker will notice there's no instances
consuming resources and try to correct us. So only nodes with an
instance attached should report as consumed here.
"""
return node_obj.instance_uuid is not None
def _parse_node_properties(self, node):
"""Helper method to parse the node's properties."""
properties = {}
for prop in ('cpus', 'memory_mb', 'local_gb'):
try:
properties[prop] = int(node.properties.get(prop, 0))
except (TypeError, ValueError):
LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer.'),
{'uuid': node.uuid, 'prop': prop})
properties[prop] = 0
raw_cpu_arch = node.properties.get('cpu_arch', None)
try:
cpu_arch = arch.canonicalize(raw_cpu_arch)
except exception.InvalidArchitectureName:
cpu_arch = None
if not cpu_arch:
LOG.warning(_LW("cpu_arch not defined for node '%s'"), node.uuid)
properties['cpu_arch'] = cpu_arch
properties['raw_cpu_arch'] = raw_cpu_arch
properties['capabilities'] = node.properties.get('capabilities')
return properties
def _parse_node_instance_info(self, node, props):
"""Helper method to parse the node's instance info.
If a property cannot be looked up via instance_info, use the original
value from the properties dict. This is most likely to be correct;
it should only be incorrect if the properties were changed directly
in Ironic while an instance was deployed.
"""
instance_info = {}
# add this key because it's different in instance_info for some reason
props['vcpus'] = props['cpus']
for prop in ('vcpus', 'memory_mb', 'local_gb'):
original = props[prop]
try:
instance_info[prop] = int(node.instance_info.get(prop,
original))
except (TypeError, ValueError):
LOG.warning(_LW('Node %(uuid)s has a malformed "%(prop)s". '
'It should be an integer but its value '
'is "%(value)s".'),
{'uuid': node.uuid, 'prop': prop,
'value': node.instance_info.get(prop)})
instance_info[prop] = original
return instance_info
def _node_resource(self, node):
"""Helper method to create resource dict from node stats."""
properties = self._parse_node_properties(node)
vcpus = properties['cpus']
memory_mb = properties['memory_mb']
local_gb = properties['local_gb']
raw_cpu_arch = properties['raw_cpu_arch']
cpu_arch = properties['cpu_arch']
nodes_extra_specs = {}
# NOTE(deva): In Havana and Icehouse, the flavor was required to link
# to an arch-specific deploy kernel and ramdisk pair, and so the flavor
# also had to have extra_specs['cpu_arch'], which was matched against
# the ironic node.properties['cpu_arch'].
# With Juno, the deploy image(s) may be referenced directly by the
# node.driver_info, and a flavor no longer needs to contain any of
# these three extra specs, though the cpu_arch may still be used
# in a heterogeneous environment, if so desired.
# NOTE(dprince): we use the raw cpu_arch here because extra_specs
# filters aren't canonicalized
nodes_extra_specs['cpu_arch'] = raw_cpu_arch
# NOTE(gilliard): To assist with more precise scheduling, if the
# node.properties contains a key 'capabilities', we expect the value
# to be of the form "k1:v1,k2:v2,etc.." which we add directly as
# key/value pairs into the node_extra_specs to be used by the
# ComputeCapabilitiesFilter
capabilities = properties['capabilities']
if capabilities:
for capability in str(capabilities).split(','):
parts = capability.split(':')
if len(parts) == 2 and parts[0] and parts[1]:
nodes_extra_specs[parts[0].strip()] = parts[1]
else:
LOG.warning(_LW("Ignoring malformed capability '%s'. "
"Format should be 'key:val'."), capability)
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
if self._node_resources_used(node):
# Node is in the process of deploying, is deployed, or is in
# the process of cleaning up from a deploy. Report all of its
# resources as in use.
instance_info = self._parse_node_instance_info(node, properties)
# Use instance_info instead of properties here is because the
# properties of a deployed node can be changed which will count
# as available resources.
vcpus_used = vcpus = instance_info['vcpus']
memory_mb_used = memory_mb = instance_info['memory_mb']
local_gb_used = local_gb = instance_info['local_gb']
# Always checking allows us to catch the case where Nova thinks there
# are available resources on the Node, but Ironic does not (because it
# is not in a usable state): https://launchpad.net/bugs/1503453
if self._node_resources_unavailable(node):
# The node's current state is such that it should not present any
# of its resources to Nova
vcpus = 0
memory_mb = 0
local_gb = 0
dic = {
'hypervisor_hostname': str(node.uuid),
'hypervisor_type': self._get_hypervisor_type(),
'hypervisor_version': self._get_hypervisor_version(),
# The Ironic driver manages multiple hosts, so there are
# likely many different CPU models in use. As such it is
# impossible to provide any meaningful info on the CPU
# model of the "host"
'cpu_info': None,
'vcpus': vcpus,
'vcpus_used': vcpus_used,
'local_gb': local_gb,
'local_gb_used': local_gb_used,
'disk_available_least': local_gb - local_gb_used,
'memory_mb': memory_mb,
'memory_mb_used': memory_mb_used,
'supported_instances': _get_nodes_supported_instances(cpu_arch),
'stats': nodes_extra_specs,
'numa_topology': None,
}
return dic
def _start_firewall(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(instance, network_info)
def _add_driver_fields(self, node, instance, image_meta, flavor,
preserve_ephemeral=None):
patch = patcher.create(node).get_deploy_patch(instance,
image_meta,
flavor,
preserve_ephemeral)
# Associate the node with an instance
patch.append({'path': '/instance_uuid', 'op': 'add',
'value': instance.uuid})
try:
# FIXME(lucasagomes): The "retry_on_conflict" parameter was added
# to basically causes the deployment to fail faster in case the
# node picked by the scheduler is already associated with another
# instance due bug #1341420.
self.ironicclient.call('node.update', node.uuid, patch,
retry_on_conflict=False)
except ironic.exc.BadRequest:
msg = (_("Failed to add deploy parameters on node %(node)s "
"when provisioning the instance %(instance)s")
% {'node': node.uuid, 'instance': instance.uuid})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def _cleanup_deploy(self, node, instance, network_info):
self._unplug_vifs(node, instance, network_info)
self._stop_firewall(instance, network_info)
def _wait_for_active(self, instance):
"""Wait for the node to be marked as ACTIVE in Ironic."""
instance.refresh()
if (instance.task_state == task_states.DELETING or
instance.vm_state in (vm_states.ERROR, vm_states.DELETED)):
raise exception.InstanceDeployFailure(
_("Instance %s provisioning was aborted") % instance.uuid)
node = self._validate_instance_and_node(instance)
if node.provision_state == ironic_states.ACTIVE:
# job is done
LOG.debug("Ironic node %(node)s is now ACTIVE",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if node.target_provision_state in (ironic_states.DELETED,
ironic_states.AVAILABLE):
# ironic is trying to delete it now
raise exception.InstanceNotFound(instance_id=instance.uuid)
if node.provision_state in (ironic_states.NOSTATE,
ironic_states.AVAILABLE):
# ironic already deleted it
raise exception.InstanceNotFound(instance_id=instance.uuid)
if node.provision_state == ironic_states.DEPLOYFAIL:
# ironic failed to deploy
msg = (_("Failed to provision instance %(inst)s: %(reason)s")
% {'inst': instance.uuid, 'reason': node.last_error})
raise exception.InstanceDeployFailure(msg)
_log_ironic_polling('become ACTIVE', node, instance)
def _wait_for_power_state(self, instance, message):
"""Wait for the node to complete a power state change."""
node = self._validate_instance_and_node(instance)
if node.target_power_state == ironic_states.NOSTATE:
raise loopingcall.LoopingCallDone()
_log_ironic_polling(message, node, instance)
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function.
:param host: the hostname of the compute host.
"""
return
def _get_hypervisor_type(self):
"""Get hypervisor type."""
return 'ironic'
def _get_hypervisor_version(self):
"""Returns the version of the Ironic API service endpoint."""
return client_wrapper.IRONIC_API_VERSION[0]
def instance_exists(self, instance):
"""Checks the existence of an instance.
Checks the existence of an instance. This is an override of the
base method for efficiency.
:param instance: The instance object.
:returns: True if the instance exists. False if not.
"""
try:
self._validate_instance_and_node(instance)
return True
except exception.InstanceNotFound:
return False
def _get_node_list(self, **kwargs):
"""Helper function to return the list of nodes.
If unable to connect ironic server, an empty list is returned.
:returns: a list of raw node from ironic
"""
try:
node_list = self.ironicclient.call("node.list", **kwargs)
except exception.NovaException:
node_list = []
return node_list
def list_instances(self):
"""Return the names of all the instances provisioned.
:returns: a list of instance names.
"""
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
node_list = self._get_node_list(associated=True, limit=0)
context = nova_context.get_admin_context()
return [objects.Instance.get_by_uuid(context,
i.instance_uuid).name
for i in node_list]
def list_instance_uuids(self):
"""Return the UUIDs of all the instances provisioned.
:returns: a list of instance UUIDs.
"""
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
return list(n.instance_uuid
for n in self._get_node_list(associated=True, limit=0))
def node_is_available(self, nodename):
"""Confirms a Nova hypervisor node exists in the Ironic inventory.
:param nodename: The UUID of the node.
:returns: True if the node exists, False if not.
"""
# NOTE(comstud): We can cheat and use caching here. This method
# just needs to return True for nodes that exist. It doesn't
# matter if the data is stale. Sure, it's possible that removing
# node from Ironic will cause this method to return True until
# the next call to 'get_available_nodes', but there shouldn't
# be much harm. There's already somewhat of a race.
if not self.node_cache:
# Empty cache, try to populate it.
self._refresh_cache()
if nodename in self.node_cache:
return True
# NOTE(comstud): Fallback and check Ironic. This case should be
# rare.
try:
self._get_node(nodename)
return True
except ironic.exc.NotFound:
return False
def _refresh_cache(self):
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
node_cache = {}
for node in self._get_node_list(detail=True, limit=0):
node_cache[node.uuid] = node
self.node_cache = node_cache
self.node_cache_time = time.time()
def get_available_nodes(self, refresh=False):
"""Returns the UUIDs of all nodes in the Ironic inventory.
:param refresh: Boolean value; If True run update first. Ignored by
this driver.
:returns: a list of UUIDs
"""
# NOTE(jroll) we refresh the cache every time this is called
# because it needs to happen in the resource tracker
# periodic task. This task doesn't pass refresh=True,
# unfortunately.
self._refresh_cache()
node_uuids = list(self.node_cache.keys())
LOG.debug("Returning %(num_nodes)s available node(s)",
dict(num_nodes=len(node_uuids)))
return node_uuids
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: the UUID of the node.
:returns: a dictionary describing resources.
"""
# NOTE(comstud): We can cheat and use caching here. This method is
# only called from a periodic task and right after the above
# get_available_nodes() call is called.
if not self.node_cache:
# Well, it's also called from init_host(), so if we have empty
# cache, let's try to populate it.
self._refresh_cache()
cache_age = time.time() - self.node_cache_time
if nodename in self.node_cache:
LOG.debug("Using cache for node %(node)s, age: %(age)s",
{'node': nodename, 'age': cache_age})
node = self.node_cache[nodename]
else:
LOG.debug("Node %(node)s not found in cache, age: %(age)s",
{'node': nodename, 'age': cache_age})
node = self._get_node(nodename)
return self._node_resource(node)
def get_info(self, instance):
"""Get the current state and resource usage for this instance.
If the instance is not found this method returns (a dictionary
with) NOSTATE and all resources == 0.
:param instance: the instance object.
:returns: a InstanceInfo object
"""
try:
node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
return hardware.InstanceInfo(
state=map_power_state(ironic_states.NOSTATE))
properties = self._parse_node_properties(node)
memory_kib = properties['memory_mb'] * 1024
if memory_kib == 0:
LOG.warning(_LW("Warning, memory usage is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance.uuid,
'node': instance.node})
num_cpu = properties['cpus']
if num_cpu == 0:
LOG.warning(_LW("Warning, number of cpus is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance.uuid,
'node': instance.node})
return hardware.InstanceInfo(state=map_power_state(node.power_state),
max_mem_kb=memory_kib,
mem_kb=memory_kib,
num_cpu=num_cpu)
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def macs_for_instance(self, instance):
"""List the MAC addresses of an instance.
List of MAC addresses for the node which this instance is
associated with.
:param instance: the instance object.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
try:
node = self._get_node(instance.node)
except ironic.exc.NotFound:
return None
ports = self.ironicclient.call("node.list_ports", node.uuid)
return set([p.address for p in ports])
def _generate_configdrive(self, instance, node, network_info,
extra_md=None, files=None):
"""Generate a config drive.
:param instance: The instance object.
:param node: The node object.
:param network_info: Instance network information.
:param extra_md: Optional, extra metadata to be added to the
configdrive.
:param files: Optional, a list of paths to files to be added to
the configdrive.
"""
if not extra_md:
extra_md = {}
i_meta = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with tempfile.NamedTemporaryFile() as uncompressed:
with configdrive.ConfigDriveBuilder(instance_md=i_meta) as cdb:
cdb.make_drive(uncompressed.name)
with tempfile.NamedTemporaryFile() as compressed:
# compress config drive
with gzip.GzipFile(fileobj=compressed, mode='wb') as gzipped:
uncompressed.seek(0)
shutil.copyfileobj(uncompressed, gzipped)
# base64 encode config drive
compressed.seek(0)
return base64.b64encode(compressed.read())
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Deploy an instance.
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image dict returned by nova.image.glance
that defines the image from which to boot this instance.
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in
instance.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
LOG.debug('Spawn called for instance', instance=instance)
# The compute manager is meant to know the node uuid, so missing uuid
# is a significant issue. It may mean we've been passed the wrong data.
node_uuid = instance.get('node')
if not node_uuid:
raise ironic.exc.BadRequest(
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
node = self._get_node(node_uuid)
flavor = instance.flavor
self._add_driver_fields(node, instance, image_meta, flavor)
# NOTE(Shrews): The default ephemeral device needs to be set for
# services (like cloud-init) that depend on it being returned by the
# metadata server. Addresses bug https://launchpad.net/bugs/1324286.
if flavor.ephemeral_gb:
instance.default_ephemeral_device = '/dev/sda1'
instance.save()
# validate we are ready to do the deploy
validate_chk = self.ironicclient.call("node.validate", node_uuid)
if (not validate_chk.deploy.get('result')
or not validate_chk.power.get('result')):
# something is wrong. undo what we have done
self._cleanup_deploy(node, instance, network_info)
raise exception.ValidationError(_(
"Ironic node: %(id)s failed to validate."
" (deploy: %(deploy)s, power: %(power)s)")
% {'id': node.uuid,
'deploy': validate_chk.deploy,
'power': validate_chk.power})
# prepare for the deploy
try:
self._plug_vifs(node, instance, network_info)
self._start_firewall(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error preparing deploy for instance "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance.uuid,
'node': node_uuid})
self._cleanup_deploy(node, instance, network_info)
# Config drive
configdrive_value = None
if configdrive.required_by(instance):
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
try:
configdrive_value = self._generate_configdrive(
instance, node, network_info, extra_md=extra_md,
files=injected_files)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to build configdrive: %s") %
six.text_type(e))
LOG.error(msg, instance=instance)
self._cleanup_deploy(node, instance, network_info)
LOG.info(_LI("Config drive for instance %(instance)s on "
"baremetal node %(node)s created."),
{'instance': instance['uuid'], 'node': node_uuid})
# trigger the node deploy
try:
self.ironicclient.call("node.set_provision_state", node_uuid,
ironic_states.ACTIVE,
configdrive=configdrive_value)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to request Ironic to provision instance "
"%(inst)s: %(reason)s"),
{'inst': instance.uuid,
'reason': six.text_type(e)})
LOG.error(msg)
self._cleanup_deploy(node, instance, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
instance)
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully provisioned Ironic node %s'),
node.uuid, instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error deploying instance %(instance)s on "
"baremetal node %(node)s."),
{'instance': instance.uuid,
'node': node_uuid})
def _unprovision(self, instance, node):
"""This method is called from destroy() to unprovision
already provisioned node after required checks.
"""
try:
self.ironicclient.call("node.set_provision_state", node.uuid,
"deleted")
except Exception as e:
# if the node is already in a deprovisioned state, continue
# This should be fixed in Ironic.
# TODO(deva): This exception should be added to
# python-ironicclient and matched directly,
# rather than via __name__.
if getattr(e, '__name__', None) != 'InstanceDeployFailure':
raise
# using a dict because this is modified in the local method
data = {'tries': 0}
def _wait_for_provision_state():
try:
node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
LOG.debug("Instance already removed from Ironic",
instance=instance)
raise loopingcall.LoopingCallDone()
if node.provision_state in (ironic_states.NOSTATE,
ironic_states.CLEANING,
ironic_states.CLEANWAIT,
ironic_states.CLEANFAIL,
ironic_states.AVAILABLE):
# From a user standpoint, the node is unprovisioned. If a node
# gets into CLEANFAIL state, it must be fixed in Ironic, but we
# can consider the instance unprovisioned.
LOG.debug("Ironic node %(node)s is in state %(state)s, "
"instance is now unprovisioned.",
dict(node=node.uuid, state=node.provision_state),
instance=instance)
raise loopingcall.LoopingCallDone()
if data['tries'] >= max(0, CONF.ironic.api_max_retries) + 1:
msg = (_("Error destroying the instance on node %(node)s. "
"Provision state still '%(state)s'.")
% {'state': node.provision_state,
'node': node.uuid})
LOG.error(msg)
raise exception.NovaException(msg)
else:
data['tries'] += 1
_log_ironic_polling('unprovision', node, instance)
# wait for the state transition to finish
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def destroy(self, context, instance, network_info,
block_device_info=None, destroy_disks=True, migrate_data=None):
"""Destroy the specified instance, if it can be found.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param destroy_disks: Indicates if disks should be
destroyed. Ignored by this driver.
:param migrate_data: implementation specific params.
Ignored by this driver.
"""
LOG.debug('Destroy called for instance', instance=instance)
try:
node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
instance.uuid)
# NOTE(deva): if nova.compute.ComputeManager._delete_instance()
# is called on a non-existing instance, the only way
# to delete it is to return from this method
# without raising any exceptions.
return
if node.provision_state in _UNPROVISION_STATES:
self._unprovision(instance, node)
self._cleanup_deploy(node, instance, network_info)
LOG.info(_LI('Successfully unprovisioned Ironic node %s'),
node.uuid, instance=instance)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
NOTE: Ironic does not support soft-off, so this method
always performs a hard-reboot.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param reboot_type: Either a HARD or SOFT reboot. Ignored by
this driver.
:param block_device_info: Info pertaining to attached volumes.
Ignored by this driver.
:param bad_volumes_callback: Function to handle any bad volumes
encountered. Ignored by this driver.
"""
LOG.debug('Reboot called for instance', instance=instance)
node = self._validate_instance_and_node(instance)
self.ironicclient.call("node.set_power_state", node.uuid, 'reboot')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'reboot')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully rebooted Ironic node %s'),
node.uuid, instance=instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
NOTE: Ironic does not support soft-off, so this method ignores
timeout and retry_interval parameters.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param instance: The instance object.
:param timeout: time to wait for node to shutdown. Ignored by
this driver.
:param retry_interval: How often to signal node while waiting
for it to shutdown. Ignored by this driver.
"""
LOG.debug('Power off called for instance', instance=instance)
node = self._validate_instance_and_node(instance)
self.ironicclient.call("node.set_power_state", node.uuid, 'off')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully powered off Ironic node %s'),
node.uuid, instance=instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
LOG.debug('Power on called for instance', instance=instance)
node = self._validate_instance_and_node(instance)
self.ironicclient.call("node.set_power_state", node.uuid, 'on')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state, instance, 'power on')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully powered on Ironic node %s'),
node.uuid, instance=instance)
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store.
Invoked when security group rules are updated.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store.
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
:param instance: The instance object.
"""
self.firewall_driver.refresh_instance_security_rules(instance)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Set up filtering rules.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.unfilter_instance(instance, network_info)
def _plug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance.uuid,
'network_info': network_info_str})
# start by ensuring the ports are clear
self._unplug_vifs(node, instance, network_info)
ports = self.ironicclient.call("node.list_ports", node.uuid)
if len(network_info) > len(ports):
raise exception.VirtualInterfacePlugException(_(
"Ironic node: %(id)s virtual to physical interface count"
" missmatch"
" (Vif count: %(vif_count)d, Pif count: %(pif_count)d)")
% {'id': node.uuid,
'vif_count': len(network_info),
'pif_count': len(ports)})
if len(network_info) > 0:
# not needed if no vif are defined
for vif in network_info:
for pif in ports:
if vif['address'] == pif.address:
# attach what neutron needs directly to the port
port_id = six.text_type(vif['id'])
patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
self.ironicclient.call("port.update", pif.uuid, patch)
break
def _unplug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance.uuid,
'network_info': network_info_str})
if network_info and len(network_info) > 0:
ports = self.ironicclient.call("node.list_ports", node.uuid,
detail=True)
# not needed if no vif are defined
for pif in ports:
if 'vif_port_id' in pif.extra:
# we can not attach a dict directly
patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}]
try:
self.ironicclient.call("port.update", pif.uuid, patch)
except ironic.exc.BadRequest:
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
node = self._get_node(instance.node)
self._plug_vifs(node, instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
node = self._get_node(instance.node)
self._unplug_vifs(node, instance, network_info)
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
preserve the ephemeral partition. We cannot call spawn() from
here because it will attempt to set the instance_uuid value
again, which is not allowed by the Ironic API. It also requires
the instance to not have an 'active' provision state, but we
cannot safely change that. Given that, we implement only the
portions of spawn() we need within rebuild().
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image object returned by nova.image.glance
that defines the image from which to boot this instance. Ignored
by this driver.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param bdms: block-device-mappings to use for rebuild. Ignored
by this driver.
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param network_info: Instance network information. Ignored by
this driver.
:param recreate: Boolean value; if True the instance is
recreated on a new hypervisor - all the cleanup of old state is
skipped. Ignored by this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
"""
LOG.debug('Rebuild called for instance', instance=instance)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(expected_task_state=[task_states.REBUILDING])
node_uuid = instance.node
node = self._get_node(node_uuid)
self._add_driver_fields(node, instance, image_meta, instance.flavor,
preserve_ephemeral)
# Trigger the node rebuild/redeploy.
try:
self.ironicclient.call("node.set_provision_state",
node_uuid, ironic_states.REBUILD)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
msg = (_("Failed to request Ironic to rebuild instance "
"%(inst)s: %(reason)s") % {'inst': instance.uuid,
'reason': six.text_type(e)})
raise exception.InstanceDeployFailure(msg)
# Although the target provision state is REBUILD, it will actually go
# to ACTIVE once the redeploy is finished.
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Instance was successfully rebuilt'), instance=instance)
def network_binding_host_id(self, context, instance):
"""Get host ID to associate with network ports.
This defines the binding:host_id parameter to the port-create
calls for Neutron. If using a flat network, use the default behavior
and allow the port to bind immediately. If using separate networks
for the control plane and tenants, return None here to indicate
that the port should not yet be bound; Ironic will make a port-update
call to Neutron later to tell Neutron to bind the port.
:param context: request context
:param instance: nova.objects.instance.Instance that the network
ports will be associated with
:returns: a string representing the host ID
"""
node = self._get_node(instance.node)
if getattr(node, 'network_provider', 'none') == 'none':
# flat network, go ahead and allow the port to be bound
return super(IronicDriver, self).network_binding_host_id(
context, instance)
return None
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupMeta'
db.create_table(
'sentry_groupmeta', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'group',
self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Group'])
), ('key', self.gf('django.db.models.fields.CharField')(max_length=64)),
('value', self.gf('django.db.models.fields.TextField')()),
)
)
db.send_create_signal('sentry', ['GroupMeta'])
# Adding unique constraint on 'GroupMeta', fields ['group', 'key', 'value']
db.create_unique('sentry_groupmeta', ['group_id', 'key'])
def backwards(self, orm):
# Removing unique constraint on 'GroupMeta', fields ['group', 'key', 'value']
db.delete_unique('sentry_groupmeta', ['group_id', 'key'])
# Deleting model 'GroupMeta'
db.delete_table('sentry_groupmeta')
models = {
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '30'
})
},
'contenttypes.contenttype': {
'Meta': {
'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"
},
'app_label': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'model': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '100'
})
},
'sentry.event': {
'Meta': {
'object_name': 'Event',
'db_table': "'sentry_message'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'server_name': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'time_spent': ('django.db.models.fields.FloatField', [], {
'null': 'True'
})
},
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
),
'views': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.View']",
'symmetrical': 'False',
'blank': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupMeta'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {
'unique_together': "(('project', 'group', 'date'),)",
'object_name': 'MessageCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.messagefiltervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'MessageFilterValue'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.messageindex': {
'Meta': {
'unique_together': "(('column', 'value', 'object_id'),)",
'object_name': 'MessageIndex'
},
'column': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.project': {
'Meta': {
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'owner': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'owned_project_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectdomain': {
'Meta': {
'unique_together': "(('project', 'domain'),)",
'object_name': 'ProjectDomain'
},
'domain': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'domain_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.projectmember': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'ProjectMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'project_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.view': {
'Meta': {
'object_name': 'View'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '100'
}),
'verbose_name':
('django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True'
}),
'verbose_name_plural':
('django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True'
})
}
}
complete_apps = ['sentry']
|
|
######################################################################
# CliNER - documents.py #
# #
# Willie Boag wboag@cs.uml.edu #
# #
# Purpose: Build model for given training data. #
######################################################################
import string
import re
import nltk
import os
from tools import clean_text, normalize_tokens
labels = { 'O':0,
'B-problem':1, 'B-test':2, 'B-treatment':3,
'I-problem':4, 'I-test':5, 'I-treatment':6,
}
id2tag = { v:k for k,v in labels.items() }
class Document:
def __init__(self, txt, con=None):
# read data
retVal = read_i2b2(txt, con)
# Internal representation natural for i2b2 format
self._tok_sents = retVal[0]
# Store token labels
if con:
self._tok_concepts = retVal[1]
self._labels = tok_concepts_to_labels(self._tok_sents,
self._tok_concepts)
# save filename
self._filename = txt
def getName(self):
return os.path.basename(self._filename).split('.')[0]
def getExtension(self):
return 'con'
def getTokenizedSentences(self):
return self._tok_sents
def getTokenLabels(self):
return self._labels
def conlist(self):
return self._labels
def write(self, pred_labels=None):
"""
Purpose: Return the given concept label predictions in i2b2 format
@param pred_labels. <list-of-lists> of predicted_labels
@return <string> of i2b2-concept-file-formatted data
"""
# Return value
retStr = ''
# If given labels to write, use them. Default to classifications
if pred_labels != None:
token_labels = pred_labels
elif self._labels != None:
token_labels = self._labels
else:
raise Exception('Cannot write concept file: must specify labels')
concept_tuples = tok_labels_to_concepts(self._tok_sents, token_labels)
# For each classification
for classification in concept_tuples:
# Ensure 'none' classifications are skipped
if classification[0] == 'none':
raise('Classification label "none" should never happen')
concept = classification[0]
lineno = classification[1]
start = classification[2]
end = classification[3]
# A list of words (corresponding line from the text file)
text = self._tok_sents[lineno-1]
#print("\n" + "-" * 80)
#print("classification: ", classification)
#print("lineno: ", lineno)
#print("start: ", start)
#print("end ", end)
#print("text: ", text)
#print('len(text): ', len(text))
#print("text[start]: ", text[start])
#print("concept: ", concept)
datum = text[start]
for j in range(start, end):
datum += " " + text[j+1]
datum = datum.lower()
#print('datum: ', datum)
# Line:TokenNumber of where the concept starts and ends
idx1 = "%d:%d" % (lineno, start)
idx2 = "%d:%d" % (lineno, end )
# Classification
label = concept
# Print format
retStr += "c=\"%s\" %s %s||t=\"%s\"\n" % (datum, idx1, idx2, label)
# return formatted data
return retStr.strip()
def read_i2b2(txt, con):
"""
read_i2b2()
@param txt. A file path for the tokenized medical record
@param con. A file path for the i2b2 annotated concepts for txt
"""
tokenized_sents = []
sent_tokenize = lambda text: text.split('\n')
word_tokenize = lambda text: text.split(' ')
# Read in the medical text
with open(txt) as f:
# Original text file
text = f.read().strip('\n')
# tokenize
sentences = sent_tokenize(text)
for sentence in sentences:
sent = clean_text(sentence.rstrip())
# lowercase
sent = sent.lower()
toks = word_tokenize(sent)
# normalize tokens
normed_toks = normalize_tokens(toks)
#for w in normed_toks:
# print(w)
#print()
tokenized_sents.append(normed_toks)
# If an accompanying concept file was specified, read it
tok_concepts = []
if con:
with open(con) as f:
for line in f.readlines():
# Empty line
if not line.strip():
continue
# parse concept line
concept_regex = '^c="(.*)" (\d+):(\d+) (\d+):(\d+)\|\|t="(.*)"$'
match = re.search(concept_regex, line.strip())
groups = match.groups()
# retrieve regex info
concept_text = groups[0]
start_lineno = int(groups[1])
start_tok_ind = int(groups[2])
end_lineno = int(groups[3])
end_tok_ind = int(groups[4])
concept_label = groups[5]
# pre-process text for error-check
#matching_line = tokenized_sents[start_lineno-1]
#matching_toks = matching_line[start_tok_ind:end_tok_ind+1]
#matching_text = ' '.join(matching_toks).lower()
#concept_text = ' '.join(word_tokenize(concept_text))
# error-check info
assert start_lineno==end_lineno, 'concept must span single line'
#assert concept_text==matching_text, 'something wrong with inds'
# add the concept info
tup = (concept_label, start_lineno, start_tok_ind, end_tok_ind)
tok_concepts.append(tup)
# Safe guard against concept file having duplicate entries
tok_concepts = list(set(tok_concepts))
# Concept file does not guarantee ordering by line number
tok_concepts = sorted(tok_concepts, key=lambda t:t[1:])
# Ensure no overlapping concepts (that would be bad)
for i in range(len(tok_concepts)-1):
c1 = tok_concepts[i]
c2 = tok_concepts[i+1]
if c1[1] == c2[1]:
if c1[2] <= c2[2] and c2[2] <= c1[3]:
fname = os.path.basename(con)
error1='%s has overlapping entities on line %d'%(fname,c1[1])
error2="It can't be processed until you remove one"
error3='Please modify this file: %s' % con
error4='\tentity 1: c="%s" %d:%d %d:%d||t="%s"'%(' '.join(tokenized_sents[c1[1]-1][c1[2]:c1[3]+1]),
c1[1], c1[2], c1[1], c1[3], c1[0])
error5='\tentity 2: c="%s" %d:%d %d:%d||t="%s"'%(' '.join(tokenized_sents[c2[1]-1][c2[2]:c2[3]+1]),
c2[1], c2[2], c2[1], c2[3], c2[0])
error_msg = '\n\n%s\n%s\n\n%s\n\n%s\n%s\n' % (error1,error2,error3,error4,error5)
raise DocumentException(error_msg)
return tokenized_sents, tok_concepts
def tok_concepts_to_labels(tokenized_sents, tok_concepts):
# parallel to tokens
labels = [ ['O' for tok in sent] for sent in tokenized_sents ]
# fill each concept's tokens appropriately
for concept in tok_concepts:
label,lineno,start_tok,end_tok = concept
labels[lineno-1][start_tok] = 'B-%s' % label
for i in range(start_tok+1,end_tok+1):
labels[lineno-1][i] = 'I-%s' % label
# test it out
'''
for i in range(len(tokenized_sents)):
assert len(tokenized_sents[i]) == len(labels[i])
for tok,lab in zip(tokenized_sents[i],labels[i]):
if lab != 'O': print( '\t',)
print (lab, tok)
print()
exit()
'''
return labels
def tok_labels_to_concepts(tokenized_sents, tok_labels):
'''
for gold,sent in zip(tok_labels, tokenized_sents):
print(gold)
print(sent)
print()
'''
# convert 'B-treatment' into ('B','treatment') and 'O' into ('O',None)
def split_label(label):
if label == 'O':
iob,tag = 'O', None
else:
iob,tag = label.split('-')
return iob, tag
# preprocess predictions to "correct" starting Is into Bs
corrected = []
for lineno,labels in enumerate(tok_labels):
corrected_line = []
for i in range(len(labels)):
#'''
# is this a candidate for error?
iob,tag = split_label(labels[i])
if iob == 'I':
# beginning of line has no previous
if i == 0:
print( 'CORRECTING! A')
new_label = 'B' + labels[i][1:]
else:
# ensure either its outside OR mismatch type
prev_iob,prev_tag = split_label(labels[i-1])
if prev_iob == 'O' or prev_tag != tag:
print( 'CORRECTING! B')
new_label = 'B' + labels[i][1:]
else:
new_label = labels[i]
else:
new_label = labels[i]
#'''
corrected_line.append(new_label)
corrected.append( corrected_line )
'''
for i,(trow,crow) in enumerate(zip(tok_labels, corrected)):
if trow != crow:
for j,(t,c) in enumerate(zip(trow,crow)):
if t != c:
print('lineno: ', i)
print (t, '\t', c)
print()
print()
exit()
'''
tok_labels = corrected
concepts = []
for i,labs in enumerate(tok_labels):
N = len(labs)
begins = [ j for j,lab in enumerate(labs) if (lab[0] == 'B') ]
for start in begins:
# "B-test" --> "-test"
label = labs[start][1:]
# get ending token index
end = start
while (end < N-1) and tok_labels[i][end+1].startswith('I') and tok_labels[i][end+1][1:] == label:
end += 1
# concept tuple
concept_tuple = (label[1:], i+1, start, end)
concepts.append(concept_tuple)
'''
# test it out
for i in range(len(tokenized_sents)):
assert len(tokenized_sents[i]) == len(tok_labels[i])
for tok,lab in zip(tokenized_sents[i],tok_labels[i]):
if lab != 'O': print( '\t',)
print (lab, tok)
print()
exit()
'''
# test it out
test_tok_labels = tok_concepts_to_labels(tokenized_sents, concepts)
#'''
for lineno,(test,gold,sent) in enumerate(zip(test_tok_labels, tok_labels, tokenized_sents)):
for i,(a,b) in enumerate(zip(test,gold)):
#'''
if not ((a == b)or(a[0]=='B' and b[0]=='I' and a[1:]==b[1:])):
print()
print( 'lineno: ', lineno)
print()
print( 'generated: ', test[i-3:i+4])
print( 'predicted: ', gold[i-3:i+4])
print( sent[i-3:i+4])
print( 'a[0]: ', a[0])
print( 'b[0]: ', b[0])
print( 'a[1:]: ', a[1:])
print( 'b[1:]: ', b[1:])
print( 'a[1:] == b[a:]: ', a[1:] == b[1:])
print()
#'''
assert (a == b) or (a[0]=='B' and b[0]=='I' and a[1:]==b[1:])
i += 1
#'''
assert test_tok_labels == tok_labels
return concepts
class DocumentException(Exception):
pass
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ui.py:
Provides the main user functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
import re
import os
import sys
import copy
import time
import warnings
import contextlib
from io import StringIO
import numpy as np
from . import core
from . import basic
from . import cds
from . import daophot
from . import ecsv
from . import sextractor
from . import ipac
from . import latex
from . import html
from . import rst
from . import fastbasic
from . import cparser
from . import fixedwidth
from astropy.table import Table, vstack, MaskedColumn
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning
_read_trace = []
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
# Default setting for guess parameter in read()
_GUESS = True
def _probably_html(table, maxchars=100000):
"""
Determine if ``table`` probably contains HTML content. See PR #3693 and issue
#3691 for context.
"""
if not isinstance(table, str):
try:
# If table is an iterable (list of strings) then take the first
# maxchars of these. Make sure this is something with random
# access to exclude a file-like object
table[0]
table[:1]
size = 0
for i, line in enumerate(table):
size += len(line)
if size > maxchars:
break
table = os.linesep.join(table[:i+1])
except Exception:
pass
if isinstance(table, str):
# Look for signs of an HTML table in the first maxchars characters
table = table[:maxchars]
# URL ending in .htm or .html
if re.match(r'( http[s]? | ftp | file ) :// .+ \.htm[l]?$', table,
re.IGNORECASE | re.VERBOSE):
return True
# Filename ending in .htm or .html which exists
if re.search(r'\.htm[l]?$', table[-5:], re.IGNORECASE) and os.path.exists(table):
return True
# Table starts with HTML document type declaration
if re.match(r'\s* <! \s* DOCTYPE \s* HTML', table, re.IGNORECASE | re.VERBOSE):
return True
# Look for <TABLE .. >, <TR .. >, <TD .. > tag openers.
if all(re.search(fr'< \s* {element} [^>]* >', table, re.IGNORECASE | re.VERBOSE)
for element in ('table', 'tr', 'td')):
return True
return False
def set_guess(guess):
"""
Set the default value of the ``guess`` parameter for read()
Parameters
----------
guess : bool
New default ``guess`` value (e.g., True or False)
"""
global _GUESS
_GUESS = guess
def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs):
"""
Initialize a table reader allowing for common customizations. Most of the
default behavior for various parameters is determined by the Reader class.
Parameters
----------
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED). Default is :class:`Basic`.
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dictionary of converters.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns.
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns.
names : list
List of names corresponding to each data column.
include_names : list, optional
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``).
fill_values : dict
Specification of fill values for bad or missing table values.
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``).
Returns
-------
reader : `~astropy.io.ascii.BaseReader` subclass
ASCII format reader instance
"""
# This function is a light wrapper around core._get_reader to provide a
# public interface with a default Reader.
if Reader is None:
# Default reader is Basic unless fast reader is forced
fast_reader = _get_fast_reader_dict(kwargs)
if fast_reader['enable'] == 'force':
Reader = fastbasic.FastBasic
else:
Reader = basic.Basic
reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs)
return reader
def _get_format_class(format, ReaderWriter, label):
if format is not None and ReaderWriter is not None:
raise ValueError(f'Cannot supply both format and {label} keywords')
if format is not None:
if format in core.FORMAT_CLASSES:
ReaderWriter = core.FORMAT_CLASSES[format]
else:
raise ValueError('ASCII format {!r} not in allowed list {}'
.format(format, sorted(core.FORMAT_CLASSES)))
return ReaderWriter
def _get_fast_reader_dict(kwargs):
"""Convert 'fast_reader' key in kwargs into a dict if not already and make sure
'enable' key is available.
"""
fast_reader = copy.deepcopy(kwargs.get('fast_reader', True))
if isinstance(fast_reader, dict):
fast_reader.setdefault('enable', 'force')
else:
fast_reader = {'enable': fast_reader}
return fast_reader
def read(table, guess=None, **kwargs):
# Docstring defined below
del _read_trace[:]
# Downstream readers might munge kwargs
kwargs = copy.deepcopy(kwargs)
# Convert 'fast_reader' key in kwargs into a dict if not already and make sure
# 'enable' key is available.
fast_reader = _get_fast_reader_dict(kwargs)
kwargs['fast_reader'] = fast_reader
if fast_reader['enable'] and fast_reader.get('chunk_size'):
return _read_in_chunks(table, **kwargs)
if 'fill_values' not in kwargs:
kwargs['fill_values'] = [('', '0')]
# If an Outputter is supplied in kwargs that will take precedence.
if 'Outputter' in kwargs: # user specified Outputter, not supported for fast reading
fast_reader['enable'] = False
format = kwargs.get('format')
# Dictionary arguments are passed by reference per default and thus need
# special protection:
new_kwargs = copy.deepcopy(kwargs)
kwargs['fast_reader'] = copy.deepcopy(fast_reader)
# Get the Reader class based on possible format and Reader kwarg inputs.
Reader = _get_format_class(format, kwargs.get('Reader'), 'Reader')
if Reader is not None:
new_kwargs['Reader'] = Reader
format = Reader._format_name
# Remove format keyword if there, this is only allowed in read() not get_reader()
if 'format' in new_kwargs:
del new_kwargs['format']
if guess is None:
guess = _GUESS
if guess:
# If ``table`` is probably an HTML file then tell guess function to add
# the HTML reader at the top of the guess list. This is in response to
# issue #3691 (and others) where libxml can segfault on a long non-HTML
# file, thus prompting removal of the HTML reader from the default
# guess list.
new_kwargs['guess_html'] = _probably_html(table)
# If `table` is a filename or readable file object then read in the
# file now. This prevents problems in Python 3 with the file object
# getting closed or left at the file end. See #3132, #3013, #3109,
# #2001. If a `readme` arg was passed that implies CDS format, in
# which case the original `table` as the data filename must be left
# intact.
if 'readme' not in new_kwargs:
encoding = kwargs.get('encoding')
try:
with get_readable_fileobj(table, encoding=encoding) as fileobj:
table = fileobj.read()
except ValueError: # unreadable or invalid binary file
raise
except Exception:
pass
else:
# Ensure that `table` has at least one \r or \n in it
# so that the core.BaseInputter test of
# ('\n' not in table and '\r' not in table)
# will fail and so `table` cannot be interpreted there
# as a filename. See #4160.
if not re.search(r'[\r\n]', table):
table = table + os.linesep
# If the table got successfully read then look at the content
# to see if is probably HTML, but only if it wasn't already
# identified as HTML based on the filename.
if not new_kwargs['guess_html']:
new_kwargs['guess_html'] = _probably_html(table)
# Get the table from guess in ``dat``. If ``dat`` comes back as None
# then there was just one set of kwargs in the guess list so fall
# through below to the non-guess way so that any problems result in a
# more useful traceback.
dat = _guess(table, new_kwargs, format, fast_reader)
if dat is None:
guess = False
if not guess:
if format is None:
reader = get_reader(**new_kwargs)
format = reader._format_name
# Try the fast reader version of `format` first if applicable. Note that
# if user specified a fast format (e.g. format='fast_basic') this test
# will fail and the else-clause below will be used.
if fast_reader['enable'] and f'fast_{format}' in core.FAST_CLASSES:
fast_kwargs = copy.deepcopy(new_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
fast_reader_rdr = get_reader(**fast_kwargs)
try:
dat = fast_reader_rdr.read(table)
_read_trace.append({'kwargs': copy.deepcopy(fast_kwargs),
'Reader': fast_reader_rdr.__class__,
'status': 'Success with fast reader (no guessing)'})
except (core.ParameterError, cparser.CParserError, UnicodeEncodeError) as err:
# special testing value to avoid falling back on the slow reader
if fast_reader['enable'] == 'force':
raise core.InconsistentTableError(
'fast reader {} exception: {}'
.format(fast_reader_rdr.__class__, err))
# If the fast reader doesn't work, try the slow version
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with slow reader after failing'
' with fast (no guessing)'})
else:
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with specified Reader class '
'(no guessing)'})
return dat
read.__doc__ = core.READ_DOCSTRING
def _guess(table, read_kwargs, format, fast_reader):
"""
Try to read the table using various sets of keyword args. Start with the
standard guess list and filter to make it unique and consistent with
user-supplied read keyword args. Finally, if none of those work then
try the original user-supplied keyword args.
Parameters
----------
table : str, file-like, list
Input table as a file name, file-like object, list of strings, or
single newline-separated string.
read_kwargs : dict
Keyword arguments from user to be supplied to reader
format : str
Table format
fast_reader : dict
Options for the C engine fast reader. See read() function for details.
Returns
-------
dat : `~astropy.table.Table` or None
Output table or None if only one guess format was available
"""
# Keep a trace of all failed guesses kwarg
failed_kwargs = []
# Get an ordered list of read() keyword arg dicts that will be cycled
# through in order to guess the format.
full_list_guess = _get_guess_kwargs_list(read_kwargs)
# If a fast version of the reader is available, try that before the slow version
if (fast_reader['enable'] and format is not None and f'fast_{format}' in
core.FAST_CLASSES):
fast_kwargs = copy.deepcopy(read_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES[f'fast_{format}']
full_list_guess = [fast_kwargs] + full_list_guess
else:
fast_kwargs = None
# Filter the full guess list so that each entry is consistent with user kwarg inputs.
# This also removes any duplicates from the list.
filtered_guess_kwargs = []
fast_reader = read_kwargs.get('fast_reader')
for guess_kwargs in full_list_guess:
# If user specified slow reader then skip all fast readers
if (fast_reader['enable'] is False and
guess_kwargs['Reader'] in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: reader only available in fast version',
'dt': '{:.3f} ms'.format(0.0)})
continue
# If user required a fast reader then skip all non-fast readers
if (fast_reader['enable'] == 'force' and
guess_kwargs['Reader'] not in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: no fast version of reader available',
'dt': '{:.3f} ms'.format(0.0)})
continue
guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs?
for key, val in read_kwargs.items():
# Do guess_kwargs.update(read_kwargs) except that if guess_args has
# a conflicting key/val pair then skip this guess entirely.
if key not in guess_kwargs:
guess_kwargs[key] = copy.deepcopy(val)
elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs:
guess_kwargs_ok = False
break
if not guess_kwargs_ok:
# User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g.
# user supplies delimiter="|" but the guess wants to try delimiter=" ",
# so skip the guess entirely.
continue
# Add the guess_kwargs to filtered list only if it is not already there.
if guess_kwargs not in filtered_guess_kwargs:
filtered_guess_kwargs.append(guess_kwargs)
# If there are not at least two formats to guess then return no table
# (None) to indicate that guessing did not occur. In that case the
# non-guess read() will occur and any problems will result in a more useful
# traceback.
if len(filtered_guess_kwargs) <= 1:
return None
# Define whitelist of exceptions that are expected from readers when
# processing invalid inputs. Note that OSError must fall through here
# so one cannot simply catch any exception.
guess_exception_classes = (core.InconsistentTableError, ValueError, TypeError,
AttributeError, core.OptionalTableImportError,
core.ParameterError, cparser.CParserError)
# Now cycle through each possible reader and associated keyword arguments.
# Try to read the table using those args, and if an exception occurs then
# keep track of the failed guess and move on.
for guess_kwargs in filtered_guess_kwargs:
t0 = time.time()
try:
# If guessing will try all Readers then use strict req'ts on column names
if 'Reader' not in read_kwargs:
guess_kwargs['strict_names'] = True
reader = get_reader(**guess_kwargs)
reader.guessing = True
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': reader.__class__,
'status': 'Success (guessing)',
'dt': '{:.3f} ms'.format((time.time() - t0) * 1000)})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'status': '{}: {}'.format(err.__class__.__name__,
str(err)),
'dt': '{:.3f} ms'.format((time.time() - t0) * 1000)})
failed_kwargs.append(guess_kwargs)
else:
# Failed all guesses, try the original read_kwargs without column requirements
try:
reader = get_reader(**read_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(read_kwargs),
'Reader': reader.__class__,
'status': 'Success with original kwargs without strict_names '
'(guessing)'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'status': '{}: {}'.format(err.__class__.__name__,
str(err))})
failed_kwargs.append(read_kwargs)
lines = ['\nERROR: Unable to guess table format with the guesses listed below:']
for kwargs in failed_kwargs:
sorted_keys = sorted([x for x in sorted(kwargs)
if x not in ('Reader', 'Outputter')])
reader_repr = repr(kwargs.get('Reader', basic.Basic))
keys_vals = ['Reader:' + re.search(r"\.(\w+)'>", reader_repr).group(1)]
kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys)
keys_vals.extend([f'{key}: {val!r}' for key, val in kwargs_sorted])
lines.append(' '.join(keys_vals))
msg = ['',
'************************************************************************',
'** ERROR: Unable to guess table format with the guesses listed above. **',
'** **',
'** To figure out why the table did not read, use guess=False and **',
'** fast_reader=False, along with any appropriate arguments to read(). **',
'** In particular specify the format and any known attributes like the **',
'** delimiter. **',
'************************************************************************']
lines.extend(msg)
raise core.InconsistentTableError('\n'.join(lines))
def _get_guess_kwargs_list(read_kwargs):
"""
Get the full list of reader keyword argument dicts that are the basis
for the format guessing process. The returned full list will then be:
- Filtered to be consistent with user-supplied kwargs
- Cleaned to have only unique entries
- Used one by one to try reading the input table
Note that the order of the guess list has been tuned over years of usage.
Maintainers need to be very careful about any adjustments as the
reasoning may not be immediately evident in all cases.
This list can (and usually does) include duplicates. This is a result
of the order tuning, but these duplicates get removed later.
Parameters
----------
read_kwargs : dict
User-supplied read keyword args
Returns
-------
guess_kwargs_list : list
List of read format keyword arg dicts
"""
guess_kwargs_list = []
# If the table is probably HTML based on some heuristics then start with the
# HTML reader.
if read_kwargs.pop('guess_html', None):
guess_kwargs_list.append(dict(Reader=html.HTML))
# Start with ECSV because an ECSV file will be read by Basic. This format
# has very specific header requirements and fails out quickly.
guess_kwargs_list.append(dict(Reader=ecsv.Ecsv))
# Now try readers that accept the user-supplied keyword arguments
# (actually include all here - check for compatibility of arguments later).
# FixedWidthTwoLine would also be read by Basic, so it needs to come first;
# same for RST.
for reader in (fixedwidth.FixedWidthTwoLine, rst.RST,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastRdb, basic.Rdb,
fastbasic.FastTab, basic.Tab,
cds.Cds, daophot.Daophot, sextractor.SExtractor,
ipac.Ipac, latex.Latex, latex.AASTex):
guess_kwargs_list.append(dict(Reader=reader))
# Cycle through the basic-style readers using all combinations of delimiter
# and quotechar.
for Reader in (fastbasic.FastCommentedHeader, basic.CommentedHeader,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastNoHeader, basic.NoHeader):
for delimiter in ("|", ",", " ", r"\s"):
for quotechar in ('"', "'"):
guess_kwargs_list.append(dict(
Reader=Reader, delimiter=delimiter, quotechar=quotechar))
return guess_kwargs_list
def _read_in_chunks(table, **kwargs):
"""
For fast_reader read the ``table`` in chunks and vstack to create
a single table, OR return a generator of chunk tables.
"""
fast_reader = kwargs['fast_reader']
chunk_size = fast_reader.pop('chunk_size')
chunk_generator = fast_reader.pop('chunk_generator', False)
fast_reader['parallel'] = False # No parallel with chunks
tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs)
if chunk_generator:
return tbl_chunks
tbl0 = next(tbl_chunks)
masked = tbl0.masked
# Numpy won't allow resizing the original so make a copy here.
out_cols = {col.name: col.data.copy() for col in tbl0.itercols()}
str_kinds = ('S', 'U')
for tbl in tbl_chunks:
masked |= tbl.masked
for name, col in tbl.columns.items():
# Concatenate current column data and new column data
# If one of the inputs is string-like and the other is not, then
# convert the non-string to a string. In a perfect world this would
# be handled by numpy, but as of numpy 1.13 this results in a string
# dtype that is too long (https://github.com/numpy/numpy/issues/10062).
col1, col2 = out_cols[name], col.data
if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds:
col2 = np.array(col2.tolist(), dtype=col1.dtype.kind)
elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds:
col1 = np.array(col1.tolist(), dtype=col2.dtype.kind)
# Choose either masked or normal concatenation
concatenate = np.ma.concatenate if masked else np.concatenate
out_cols[name] = concatenate([col1, col2])
# Make final table from numpy arrays, converting dict to list
out_cols = [out_cols[name] for name in tbl0.colnames]
out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta,
copy=False)
return out
def _read_in_chunks_generator(table, chunk_size, **kwargs):
"""
For fast_reader read the ``table`` in chunks and return a generator
of tables for each chunk.
"""
@contextlib.contextmanager
def passthrough_fileobj(fileobj, encoding=None):
"""Stub for get_readable_fileobj, which does not seem to work in Py3
for input File-like object, see #6460"""
yield fileobj
# Set up to coerce `table` input into a readable file object by selecting
# an appropriate function.
# Convert table-as-string to a File object. Finding a newline implies
# that the string is not a filename.
if (isinstance(table, str) and ('\n' in table or '\r' in table)):
table = StringIO(table)
fileobj_context = passthrough_fileobj
elif hasattr(table, 'read') and hasattr(table, 'seek'):
fileobj_context = passthrough_fileobj
else:
# string filename or pathlib
fileobj_context = get_readable_fileobj
# Set up for iterating over chunks
kwargs['fast_reader']['return_header_chars'] = True
header = '' # Table header (up to start of data)
prev_chunk_chars = '' # Chars from previous chunk after last newline
first_chunk = True # True for the first chunk, False afterward
with fileobj_context(table, encoding=kwargs.get('encoding')) as fh:
while True:
chunk = fh.read(chunk_size)
# Got fewer chars than requested, must be end of file
final_chunk = len(chunk) < chunk_size
# If this is the last chunk and there is only whitespace then break
if final_chunk and not re.search(r'\S', chunk):
break
# Step backwards from last character in chunk and find first newline
for idx in range(len(chunk) - 1, -1, -1):
if final_chunk or chunk[idx] == '\n':
break
else:
raise ValueError('no newline found in chunk (chunk_size too small?)')
# Stick on the header to the chunk part up to (and including) the
# last newline. Make sure the small strings are concatenated first.
complete_chunk = (header + prev_chunk_chars) + chunk[:idx + 1]
prev_chunk_chars = chunk[idx + 1:]
# Now read the chunk as a complete table
tbl = read(complete_chunk, guess=False, **kwargs)
# For the first chunk pop the meta key which contains the header
# characters (everything up to the start of data) then fix kwargs
# so it doesn't return that in meta any more.
if first_chunk:
header = tbl.meta.pop('__ascii_fast_reader_header_chars__')
first_chunk = False
yield tbl
if final_chunk:
break
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'names', 'include_names', 'exclude_names', 'strip_whitespace')
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if 'strip_whitespace' not in kwargs:
kwargs['strip_whitespace'] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader))
and not isinstance(kwargs.get('comment', ''), str)):
raise ValueError("for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing.")
return writer
def write(table, output=None, format=None, Writer=None, fast_writer=True, *,
overwrite=None, **kwargs):
# Docstring inserted below
if isinstance(output, str):
if os.path.lexists(output):
if overwrite is None:
warnings.warn(
"{} already exists. "
"Automatically overwriting ASCII files is deprecated. "
"Use the argument 'overwrite=True' in the future.".format(
output), AstropyDeprecationWarning)
elif not overwrite:
raise OSError(f"{output} already exists")
if output is None:
output = sys.stdout
# Ensure that `table` is a Table subclass.
names = kwargs.get('names')
if isinstance(table, Table):
# Note that making a copy of the table here is inefficient but
# without this copy a number of tests break (e.g. in test_fixedwidth).
# See #7605.
new_tbl = table.__class__(table, names=names)
# This makes a copy of the table columns. This is subject to a
# corner-case problem if writing a table with masked columns to ECSV
# where serialize_method is set to 'data_mask'. In this case that
# attribute gets dropped in the copy, so do the copy here. This
# should be removed when `info` really contains all the attributes
# (#6720).
for new_col, col in zip(new_tbl.itercols(), table.itercols()):
if isinstance(col, MaskedColumn):
new_col.info.serialize_method = col.info.serialize_method
table = new_tbl
else:
table = Table(table, names=names)
table0 = table[:0].copy()
core._apply_include_exclude_names(table0, kwargs.get('names'),
kwargs.get('include_names'), kwargs.get('exclude_names'))
diff_format_with_names = set(kwargs.get('formats', [])) - set(table0.colnames)
if diff_format_with_names:
warnings.warn(
'The keys {} specified in the formats argument does not match a column name.'
.format(diff_format_with_names), AstropyWarning)
if table.has_mixin_columns:
fast_writer = False
Writer = _get_format_class(format, Writer, 'Writer')
writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs)
if writer._format_name in core.FAST_CLASSES:
writer.write(table, output)
return
lines = writer.write(table)
# Write the lines to output
outstr = os.linesep.join(lines)
if not hasattr(output, 'write'):
# NOTE: we need to specify newline='', otherwise the default
# behavior is for Python to translate \r\n (which we write because
# of os.linesep) into \r\r\n. Specifying newline='' disables any
# auto-translation.
output = open(output, 'w', newline='')
output.write(outstr)
output.write(os.linesep)
output.close()
else:
output.write(outstr)
output.write(os.linesep)
write.__doc__ = core.WRITE_DOCSTRING
def get_read_trace():
"""
Return a traceback of the attempted read formats for the last call to
`~astropy.io.ascii.read` where guessing was enabled. This is primarily for
debugging.
The return value is a list of dicts, where each dict includes the keyword
args ``kwargs`` used in the read call and the returned ``status``.
Returns
-------
trace : list of dicts
Ordered list of format guesses and status
"""
return copy.deepcopy(_read_trace)
|
|
import os
import re
from runpy import run_path
import sys
import warnings
import click
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
from toolz import valfilter, concatv
from zipline.algorithm import TradingAlgorithm
from zipline.data.bundles.core import load
from zipline.data.data_portal import DataPortal
from zipline.finance.trading import TradingEnvironment
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders import USEquityPricingLoader
from zipline.utils.calendars import get_calendar
from zipline.utils.factory import create_simulation_parameters
import zipline.utils.paths as pth
class _RunAlgoError(click.ClickException, ValueError):
"""Signal an error that should have a different message if invoked from
the cli.
Parameters
----------
pyfunc_msg : str
The message that will be shown when called as a python function.
cmdline_msg : str
The message that will be shown on the command line.
"""
exit_code = 1
def __init__(self, pyfunc_msg, cmdline_msg):
super(_RunAlgoError, self).__init__(cmdline_msg)
self.pyfunc_msg = pyfunc_msg
def __str__(self):
return self.pyfunc_msg
def _run(handle_data,
initialize,
before_trading_start,
analyze,
algofile,
algotext,
defines,
data_frequency,
capital_base,
data,
bundle,
bundle_timestamp,
start,
end,
output,
print_algo,
local_namespace,
environ):
"""Run a backtest for the given algorithm.
This is shared between the cli and :func:`zipline.run_algo`.
"""
if algotext is not None:
if local_namespace:
ip = get_ipython() # noqa
namespace = ip.user_ns
else:
namespace = {}
for assign in defines:
try:
name, value = assign.split('=', 2)
except ValueError:
raise ValueError(
'invalid define %r, should be of the form name=value' %
assign,
)
try:
# evaluate in the same namespace so names may refer to
# eachother
namespace[name] = eval(value, namespace)
except Exception as e:
raise ValueError(
'failed to execute definition for name %r: %s' % (name, e),
)
elif defines:
raise _RunAlgoError(
'cannot pass define without `algotext`',
"cannot pass '-D' / '--define' without '-t' / '--algotext'",
)
else:
namespace = {}
if algofile is not None:
algotext = algofile.read()
if print_algo:
if PYGMENTS:
highlight(
algotext,
PythonLexer(),
TerminalFormatter(),
outfile=sys.stdout,
)
else:
click.echo(algotext)
if bundle is not None:
bundle_data = load(
bundle,
environ,
bundle_timestamp,
)
prefix, connstr = re.split(
r'sqlite:///',
str(bundle_data.asset_finder.engine.url),
maxsplit=1,
)
if prefix:
raise ValueError(
"invalid url %r, must begin with 'sqlite:///'" %
str(bundle_data.asset_finder.engine.url),
)
env = TradingEnvironment(asset_db_path=connstr)
first_trading_day =\
bundle_data.equity_minute_bar_reader.first_trading_day
data = DataPortal(
env.asset_finder, get_calendar("TSE"),
first_trading_day=first_trading_day,
equity_minute_reader=bundle_data.equity_minute_bar_reader,
equity_daily_reader=bundle_data.equity_daily_bar_reader,
adjustment_reader=bundle_data.adjustment_reader,
)
pipeline_loader = USEquityPricingLoader(
bundle_data.equity_daily_bar_reader,
bundle_data.adjustment_reader,
)
def choose_loader(column):
if column in USEquityPricing.columns:
return pipeline_loader
raise ValueError(
"No PipelineLoader registered for column %s." % column
)
else:
env = None
choose_loader = None
perf = TradingAlgorithm(
namespace=namespace,
capital_base=capital_base,
env=env,
get_pipeline_loader=choose_loader,
sim_params=create_simulation_parameters(
start=start,
end=end,
capital_base=capital_base,
data_frequency=data_frequency,
),
**{
'initialize': initialize,
'handle_data': handle_data,
'before_trading_start': before_trading_start,
'analyze': analyze,
} if algotext is None else {
'algo_filename': getattr(algofile, 'name', '<algorithm>'),
'script': algotext,
}
).run(
data,
overwrite_sim_params=False,
)
if output == '-':
click.echo(str(perf))
elif output != os.devnull: # make the zipline magic not write any data
perf.to_pickle(output)
return perf
# All of the loaded extensions. We don't want to load an extension twice.
_loaded_extensions = set()
def load_extensions(default, extensions, strict, environ, reload=False):
"""Load all of the given extensions. This should be called by run_algo
or the cli.
Parameters
----------
default : bool
Load the default exension (~/.zipline/extension.py)?
extension : iterable[str]
The paths to the extensions to load. If the path ends in ``.py`` it is
treated as a script and executed. If it does not end in ``.py`` it is
treated as a module to be imported.
strict : bool
Should failure to load an extension raise. If this is false it will
still warn.
environ : mapping
The environment to use to find the default extension path.
reload : bool, optional
Reload any extensions that have already been loaded.
"""
if default:
default_extension_path = pth.default_extension(environ=environ)
pth.ensure_file(default_extension_path)
# put the default extension first so other extensions can depend on
# the order they are loaded
extensions = concatv([default_extension_path], extensions)
for ext in extensions:
if ext in _loaded_extensions and not reload:
continue
try:
# load all of the zipline extensionss
if ext.endswith('.py'):
run_path(ext, run_name='<extension>')
else:
__import__(ext)
except Exception as e:
if strict:
# if `strict` we should raise the actual exception and fail
raise
# without `strict` we should just log the failure
warnings.warn(
'Failed to load extension: %r\n%s' % (ext, e),
stacklevel=2
)
else:
_loaded_extensions.add(ext)
def run_algorithm(start,
end,
initialize,
capital_base,
handle_data=None,
before_trading_start=None,
analyze=None,
data_frequency='daily',
data=None,
bundle=None,
bundle_timestamp=None,
default_extension=True,
extensions=(),
strict_extensions=True,
environ=os.environ):
"""Run a trading algorithm.
Parameters
----------
start : datetime
The start date of the backtest.
end : datetime
The end date of the backtest..
initialize : callable[context -> None]
The initialize function to use for the algorithm. This is called once
at the very begining of the backtest and should be used to set up
any state needed by the algorithm.
capital_base : float
The starting capital for the backtest.
handle_data : callable[(context, BarData) -> None], optional
The handle_data function to use for the algorithm. This is called
every minute when ``data_frequency == 'minute'`` or every day
when ``data_frequency == 'daily'``.
before_trading_start : callable[(context, BarData) -> None], optional
The before_trading_start function for the algorithm. This is called
once before each trading day (after initialize on the first day).
analyze : callable[(context, pd.DataFrame) -> None], optional
The analyze function to use for the algorithm. This function is called
once at the end of the backtest and is passed the context and the
performance data.
data_frequency : {'daily', 'minute'}, optional
The data frequency to run the algorithm at.
data : pd.DataFrame, pd.Panel, or DataPortal, optional
The ohlcv data to run the backtest with.
This argument is mutually exclusive with:
``bundle``
``bundle_timestamp``
bundle : str, optional
The name of the data bundle to use to load the data to run the backtest
with. This defaults to 'quantopian-quandl'.
This argument is mutually exclusive with ``data``.
bundle_timestamp : datetime, optional
The datetime to lookup the bundle data for. This defaults to the
current time.
This argument is mutually exclusive with ``data``.
default_extension : bool, optional
Should the default zipline extension be loaded. This is found at
``$ZIPLINE_ROOT/extension.py``
extensions : iterable[str], optional
The names of any other extensions to load. Each element may either be
a dotted module path like ``a.b.c`` or a path to a python file ending
in ``.py`` like ``a/b/c.py``.
strict_extensions : bool, optional
Should the run fail if any extensions fail to load. If this is false,
a warning will be raised instead.
environ : mapping[str -> str], optional
The os environment to use. Many extensions use this to get parameters.
This defaults to ``os.environ``.
Returns
-------
perf : pd.DataFrame
The daily performance of the algorithm.
See Also
--------
zipline.data.bundles.bundles : The available data bundles.
"""
load_extensions(default_extension, extensions, strict_extensions, environ)
non_none_data = valfilter(bool, {
'data': data is not None,
'bundle': bundle is not None,
})
if not non_none_data:
# if neither data nor bundle are passed use 'quantopian-quandl'
bundle = 'quantopian-quandl'
if len(non_none_data) != 1:
raise ValueError(
'must specify one of `data`, `data_portal`, or `bundle`,'
' got: %r' % non_none_data,
)
if 'bundle' not in non_none_data and bundle_timestamp is not None:
raise ValueError(
'cannot specify `bundle_timestamp` without passing `bundle`',
)
return _run(
handle_data=handle_data,
initialize=initialize,
before_trading_start=before_trading_start,
analyze=analyze,
algofile=None,
algotext=None,
defines=(),
data_frequency=data_frequency,
capital_base=capital_base,
data=data,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=os.devnull,
print_algo=False,
local_namespace=False,
environ=environ,
)
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import six
from rally.task.processing import histogram as histo
from rally.task.processing import utils
from rally.ui import utils as ui_utils
def _prepare_data(data):
durations = []
idle_durations = []
atomic_durations = {}
output = {}
output_errors = []
output_stacked = []
errors = []
# NOTE(maretskiy): We need this extra iteration
# to determine something that we should know about the data
# before starting its processing.
atomic_names = set()
output_names = set()
for r in data["result"]:
atomic_names.update(r["atomic_actions"].keys())
output_names.update(r["scenario_output"]["data"].keys())
for idx, r in enumerate(data["result"]):
# NOTE(maretskiy): Sometimes we miss iteration data.
# So we care about data integrity by setting zero values
if len(r["atomic_actions"]) < len(atomic_names):
for atomic_name in atomic_names:
r["atomic_actions"].setdefault(atomic_name, 0)
if len(r["scenario_output"]["data"]) < len(output_names):
for output_name in output_names:
r["scenario_output"]["data"].setdefault(output_name, 0)
if r["scenario_output"]["errors"]:
output_errors.append((idx, r["scenario_output"]["errors"]))
for param, value in r["scenario_output"]["data"].items():
try:
output[param].append(value)
except KeyError:
output[param] = [value]
if r["error"]:
type_, message, traceback = r["error"]
errors.append({"iteration": idx,
"type": type_,
"message": message,
"traceback": traceback})
# NOTE(maretskiy): Reset failed durations (no sense to display)
r["duration"] = 0
r["idle_duration"] = 0
durations.append(r["duration"])
idle_durations.append(r["idle_duration"])
for met, duration in r["atomic_actions"].items():
try:
atomic_durations[met].append(duration)
except KeyError:
atomic_durations[met] = [duration]
for k, v in six.iteritems(output):
output_stacked.append({"key": k, "values": utils.compress(v)})
for k, v in six.iteritems(atomic_durations):
atomic_durations[k] = utils.compress(v)
return {
"total_durations": {
"duration": utils.compress(durations),
"idle_duration": utils.compress(idle_durations)},
"atomic_durations": atomic_durations,
"output": output_stacked,
"output_errors": output_errors,
"errors": errors,
"sla": data["sla"],
"load_duration": data["load_duration"],
"full_duration": data["full_duration"],
}
def _process_main_duration(result, data):
histogram_data = [r["duration"] for r in result["result"]
if not r["error"]]
histograms = []
if histogram_data:
hvariety = histo.hvariety(histogram_data)
for i in range(len(hvariety)):
histograms.append(histo.Histogram(histogram_data,
hvariety[i]["number_of_bins"],
hvariety[i]["method"]))
stacked_area = []
for key in "duration", "idle_duration":
stacked_area.append({
"key": key,
"values": [(i, round(d, 2))
for i, d in data["total_durations"][key]],
})
return {
"pie": [
{"key": "success", "value": len(histogram_data)},
{"key": "errors", "value": len(data["errors"])},
],
"iter": stacked_area,
"histogram": [
{
"key": "task",
"method": histogram.method,
"values": [{"x": round(x, 2), "y": float(y)}
for x, y in zip(histogram.x_axis, histogram.y_axis)]
} for histogram in histograms
],
}
def _process_atomic(result, data):
def avg(lst, key=None):
lst = lst if not key else map(lambda x: x[key], lst)
return utils.mean(lst)
# NOTE(boris-42): In our result["result"] we have next structure:
# {"error": NoneOrDict,
# "atomic_actions": {
# "action1": <duration>,
# "action2": <duration>
# }
# }
# Our goal is to get next structure:
# [{"key": $atomic_actions.action,
# "values": [[order, $atomic_actions.duration
# if not $error else 0], ...}]
#
# Order of actions in "atomic_action" is similar for
# all iteration. So we should take first non "error"
# iteration. And get in atomitc_iter list:
# [{"key": "action", "values":[]}]
stacked_area = []
for row in result["result"]:
if not row["error"] and "atomic_actions" in row:
stacked_area = [{"key": a, "values": []}
for a in row["atomic_actions"]]
break
# NOTE(boris-42): pie is similar to stacked_area, only difference is in
# structure of values. In case of $error we shouldn't put
# anything in pie. In case of non error we should put just
# $atomic_actions.duration (without order)
pie = []
histogram_data = []
if stacked_area:
pie = copy.deepcopy(stacked_area)
histogram_data = copy.deepcopy(stacked_area)
for i, res in enumerate(result["result"]):
# in case of error put (order, 0.0) to all actions of stacked area
if res["error"]:
for k in range(len(stacked_area)):
stacked_area[k]["values"].append([i + 1, 0.0])
continue
# in case of non error put real durations to pie and stacked area
for j, action in enumerate(res["atomic_actions"].keys()):
# in case any single atomic action failed, put 0
action_duration = res["atomic_actions"][action] or 0.0
pie[j]["values"].append(action_duration)
histogram_data[j]["values"].append(action_duration)
# filter out empty action lists in pie / histogram to avoid errors
pie = filter(lambda x: x["values"], pie)
histogram_data = [x for x in histogram_data if x["values"]]
histograms = [[] for atomic_action in range(len(histogram_data))]
for i, atomic_action in enumerate(histogram_data):
hvariety = histo.hvariety(atomic_action["values"])
for v in range(len(hvariety)):
histograms[i].append(histo.Histogram(atomic_action["values"],
hvariety[v]["number_of_bins"],
hvariety[v]["method"],
atomic_action["key"]))
stacked_area = []
for name, durations in six.iteritems(data["atomic_durations"]):
stacked_area.append({
"key": name,
"values": [(i, round(d, 2)) for i, d in durations],
})
return {
"histogram": [[
{
"key": action.key,
"disabled": i,
"method": action.method,
"values": [{"x": round(x, 2), "y": y}
for x, y in zip(action.x_axis, action.y_axis)]
} for action in atomic_action_list]
for i, atomic_action_list in enumerate(histograms)
],
"iter": stacked_area,
"pie": [{"key": x["key"], "value": avg(x["values"])} for x in pie]
}
def _get_atomic_action_durations(result):
raw = result.get("result", [])
actions_data = utils.get_atomic_actions_data(raw)
table = []
total = []
for action in actions_data:
durations = actions_data[action]
if durations:
data = [action,
round(min(durations), 3),
round(utils.median(durations), 3),
round(utils.percentile(durations, 0.90), 3),
round(utils.percentile(durations, 0.95), 3),
round(max(durations), 3),
round(utils.mean(durations), 3),
"%.1f%%" % (len(durations) * 100.0 / len(raw)),
len(raw)]
else:
data = [action, None, None, None, None, None, None, 0, len(raw)]
# Save 'total' - it must be appended last
if action == "total":
total = data
continue
table.append(data)
if total:
table.append(total)
return table
def _process_results(results):
output = []
source_dict = {}
for result in results:
table_cols = ["Action",
"Min (sec)",
"Median (sec)",
"90%ile (sec)",
"95%ile (sec)",
"Max (sec)",
"Avg (sec)",
"Success",
"Count"]
table_rows = _get_atomic_action_durations(result)
scenario_name, kw, pos = (result["key"]["name"],
result["key"]["kw"], result["key"]["pos"])
data = _prepare_data(result)
cls = scenario_name.split(".")[0]
met = scenario_name.split(".")[1]
name = "%s%s" % (met, (pos and " [%d]" % (int(pos) + 1) or ""))
try:
source_dict[scenario_name].append(kw)
except KeyError:
source_dict[scenario_name] = [kw]
output.append({
"cls": cls,
"met": met,
"pos": int(pos),
"name": name,
"runner": kw["runner"]["type"],
"config": json.dumps({scenario_name: [kw]}, indent=2),
"iterations": _process_main_duration(result, data),
"atomic": _process_atomic(result, data),
"table_cols": table_cols,
"table_rows": table_rows,
"output": data["output"],
"output_errors": data["output_errors"],
"errors": data["errors"],
"load_duration": data["load_duration"],
"full_duration": data["full_duration"],
"sla": data["sla"],
"sla_success": all([sla["success"] for sla in data["sla"]]),
"iterations_num": len(result["result"]),
})
source = json.dumps(source_dict, indent=2, sort_keys=True)
scenarios = sorted(output, key=lambda r: "%s%s" % (r["cls"], r["name"]))
return source, scenarios
def plot(results):
template = ui_utils.get_template("task/report.mako")
source, scenarios = _process_results(results)
return template.render(data=json.dumps(scenarios),
source=json.dumps(source))
|
|
from __future__ import absolute_import, print_function, division
import os
import urwid
import urwid.util
import six
import netlib
from mitmproxy import utils
from mitmproxy.console import signals
from mitmproxy.flow import export
from netlib import human
try:
import pyperclip
except:
pyperclip = False
VIEW_FLOW_REQUEST = 0
VIEW_FLOW_RESPONSE = 1
METHOD_OPTIONS = [
("get", "g"),
("post", "p"),
("put", "u"),
("head", "h"),
("trace", "t"),
("delete", "d"),
("options", "o"),
("edit raw", "e"),
]
def is_keypress(k):
"""
Is this input event a keypress?
"""
if isinstance(k, six.string_types):
return True
def highlight_key(str, key, textattr="text", keyattr="key"):
l = []
parts = str.split(key, 1)
if parts[0]:
l.append((textattr, parts[0]))
l.append((keyattr, key))
if parts[1]:
l.append((textattr, parts[1]))
return l
KEY_MAX = 30
def format_keyvals(lst, key="key", val="text", indent=0):
"""
Format a list of (key, value) tuples.
If key is None, it's treated specially:
- We assume a sub-value, and add an extra indent.
- The value is treated as a pre-formatted list of directives.
"""
ret = []
if lst:
maxk = min(max(len(i[0]) for i in lst if i and i[0]), KEY_MAX)
for i, kv in enumerate(lst):
if kv is None:
ret.append(urwid.Text(""))
else:
if isinstance(kv[1], urwid.Widget):
v = kv[1]
elif kv[1] is None:
v = urwid.Text("")
else:
v = urwid.Text([(val, kv[1])])
ret.append(
urwid.Columns(
[
("fixed", indent, urwid.Text("")),
(
"fixed",
maxk,
urwid.Text([(key, kv[0] or "")])
),
v
],
dividechars = 2
)
)
return ret
def shortcuts(k):
if k == " ":
k = "page down"
elif k == "ctrl f":
k = "page down"
elif k == "ctrl b":
k = "page up"
elif k == "j":
k = "down"
elif k == "k":
k = "up"
return k
def fcol(s, attr):
s = six.text_type(s)
return (
"fixed",
len(s),
urwid.Text(
[
(attr, s)
]
)
)
if urwid.util.detected_encoding:
SYMBOL_REPLAY = u"\u21ba"
SYMBOL_RETURN = u"\u2190"
SYMBOL_MARK = u"\u25cf"
else:
SYMBOL_REPLAY = u"[r]"
SYMBOL_RETURN = u"<-"
SYMBOL_MARK = "[m]"
# Save file to disk
def save_data(path, data):
if not path:
return
try:
if isinstance(data, bytes):
mode = "wb"
else:
mode = "w"
with open(path, mode) as f:
f.write(data)
except IOError as v:
signals.status_message.send(message=v.strerror)
def ask_save_overwrite(path, data):
if not path:
return
path = os.path.expanduser(path)
if os.path.exists(path):
def save_overwrite(k):
if k == "y":
save_data(path, data)
signals.status_prompt_onekey.send(
prompt = "'" + path + "' already exists. Overwrite?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = save_overwrite
)
else:
save_data(path, data)
def ask_save_path(data, prompt="File path"):
signals.status_prompt_path.send(
prompt = prompt,
callback = ask_save_overwrite,
args = (data, )
)
def ask_scope_and_callback(flow, cb, *args):
request_has_content = flow.request and flow.request.raw_content
response_has_content = flow.response and flow.response.raw_content
if request_has_content and response_has_content:
signals.status_prompt_onekey.send(
prompt = "Save",
keys = (
("request", "q"),
("response", "s"),
("both", "b"),
),
callback = cb,
args = (flow,) + args
)
elif response_has_content:
cb("s", flow, *args)
else:
cb("q", flow, *args)
def copy_to_clipboard_or_prompt(data):
# pyperclip calls encode('utf-8') on data to be copied without checking.
# if data are already encoded that way UnicodeDecodeError is thrown.
if isinstance(data, bytes):
toclip = data.decode("utf8", "replace")
else:
toclip = data
try:
pyperclip.copy(toclip)
except (RuntimeError, UnicodeDecodeError, AttributeError, TypeError):
def save(k):
if k == "y":
ask_save_path(data, "Save data")
signals.status_prompt_onekey.send(
prompt = "Cannot copy data to clipboard. Save as file?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = save
)
def format_flow_data(key, scope, flow):
data = b""
if scope in ("q", "b"):
request = flow.request.copy()
request.decode(strict=False)
if request.content is None:
return None, "Request content is missing"
if key == "h":
data += netlib.http.http1.assemble_request(request)
elif key == "c":
data += request.get_content(strict=False)
else:
raise ValueError("Unknown key: {}".format(key))
if scope == "b" and flow.request.raw_content and flow.response:
# Add padding between request and response
data += b"\r\n" * 2
if scope in ("s", "b") and flow.response:
response = flow.response.copy()
response.decode(strict=False)
if response.content is None:
return None, "Response content is missing"
if key == "h":
data += netlib.http.http1.assemble_response(response)
elif key == "c":
data += response.get_content(strict=False)
else:
raise ValueError("Unknown key: {}".format(key))
return data, False
def handle_flow_data(scope, flow, key, writer):
"""
key: _c_ontent, _h_eaders+content, _u_rl
scope: re_q_uest, re_s_ponse, _b_oth
writer: copy_to_clipboard_or_prompt, ask_save_path
"""
data, err = format_flow_data(key, scope, flow)
if err:
signals.status_message.send(message=err)
return
if not data:
if scope == "q":
signals.status_message.send(message="No request content.")
elif scope == "s":
signals.status_message.send(message="No response content.")
else:
signals.status_message.send(message="No content.")
return
writer(data)
def ask_save_body(scope, flow):
"""
Save either the request or the response body to disk.
scope: re_q_uest, re_s_ponse, _b_oth, None (ask user if necessary)
"""
request_has_content = flow.request and flow.request.raw_content
response_has_content = flow.response and flow.response.raw_content
if scope is None:
ask_scope_and_callback(flow, ask_save_body)
elif scope == "q" and request_has_content:
ask_save_path(
flow.request.get_content(strict=False),
"Save request content to"
)
elif scope == "s" and response_has_content:
ask_save_path(
flow.response.get_content(strict=False),
"Save response content to"
)
elif scope == "b" and request_has_content and response_has_content:
ask_save_path(
(flow.request.get_content(strict=False) + b"\n" +
flow.response.get_content(strict=False)),
"Save request & response content to"
)
else:
signals.status_message.send(message="No content.")
def export_to_clip_or_file(key, scope, flow, writer):
"""
Export selected flow to clipboard or a file.
key: _c_ontent, _h_eaders+content, _u_rl,
cu_r_l_command, _p_ython_code,
_l_ocust_code, locust_t_ask
scope: None, _a_ll, re_q_uest, re_s_ponse
writer: copy_to_clipboard_or_prompt, ask_save_path
"""
for _, exp_key, exporter in export.EXPORTERS:
if key == exp_key:
if exporter is None: # 'c' & 'h'
if scope is None:
ask_scope_and_callback(flow, handle_flow_data, key, writer)
else:
handle_flow_data(scope, flow, key, writer)
else: # other keys
writer(exporter(flow))
flowcache = utils.LRUCache(800)
def raw_format_flow(f, focus, extended):
f = dict(f)
pile = []
req = []
if extended:
req.append(
fcol(
human.format_timestamp(f["req_timestamp"]),
"highlight"
)
)
else:
req.append(fcol(">>" if focus else " ", "focus"))
if f["marked"]:
req.append(fcol(SYMBOL_MARK, "mark"))
if f["req_is_replay"]:
req.append(fcol(SYMBOL_REPLAY, "replay"))
req.append(fcol(f["req_method"], "method"))
preamble = sum(i[1] for i in req) + len(req) - 1
if f["intercepted"] and not f["acked"]:
uc = "intercept"
elif "resp_code" in f or "err_msg" in f:
uc = "text"
else:
uc = "title"
url = f["req_url"]
if f["req_http_version"] not in ("HTTP/1.0", "HTTP/1.1"):
url += " " + f["req_http_version"]
req.append(
urwid.Text([(uc, url)])
)
pile.append(urwid.Columns(req, dividechars=1))
resp = []
resp.append(
("fixed", preamble, urwid.Text(""))
)
if "resp_code" in f:
codes = {
2: "code_200",
3: "code_300",
4: "code_400",
5: "code_500",
}
ccol = codes.get(f["resp_code"] / 100, "code_other")
resp.append(fcol(SYMBOL_RETURN, ccol))
if f["resp_is_replay"]:
resp.append(fcol(SYMBOL_REPLAY, "replay"))
resp.append(fcol(f["resp_code"], ccol))
if extended:
resp.append(fcol(f["resp_reason"], ccol))
if f["intercepted"] and f["resp_code"] and not f["acked"]:
rc = "intercept"
else:
rc = "text"
if f["resp_ctype"]:
resp.append(fcol(f["resp_ctype"], rc))
resp.append(fcol(f["resp_clen"], rc))
resp.append(fcol(f["roundtrip"], rc))
elif f["err_msg"]:
resp.append(fcol(SYMBOL_RETURN, "error"))
resp.append(
urwid.Text([
(
"error",
f["err_msg"]
)
])
)
pile.append(urwid.Columns(resp, dividechars=1))
return urwid.Pile(pile)
def format_flow(f, focus, extended=False, hostheader=False):
d = dict(
intercepted = f.intercepted,
acked = f.reply.acked,
req_timestamp = f.request.timestamp_start,
req_is_replay = f.request.is_replay,
req_method = f.request.method,
req_url = f.request.pretty_url if hostheader else f.request.url,
req_http_version = f.request.http_version,
err_msg = f.error.msg if f.error else None,
marked = f.marked,
)
if f.response:
if f.response.raw_content:
contentdesc = human.pretty_size(len(f.response.raw_content))
elif f.response.raw_content is None:
contentdesc = "[content missing]"
else:
contentdesc = "[no content]"
duration = 0
if f.response.timestamp_end and f.request.timestamp_start:
duration = f.response.timestamp_end - f.request.timestamp_start
roundtrip = human.pretty_duration(duration)
d.update(dict(
resp_code = f.response.status_code,
resp_reason = f.response.reason,
resp_is_replay = f.response.is_replay,
resp_clen = contentdesc,
roundtrip = roundtrip,
))
t = f.response.headers.get("content-type")
if t:
d["resp_ctype"] = t.split(";")[0]
else:
d["resp_ctype"] = ""
return flowcache.get(
raw_format_flow,
tuple(sorted(d.items())), focus, extended
)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions.
NOTE: At this time, functions are experimental and subject to change!. Proceed
with caution.
"""
import collections
import hashlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python.client import pywrap_tf_session as c_api
from tensorflow.python.eager import context
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function, it adds the `call` ops to the
default graph. In addition, it adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions in a graph are frozen as soon as the graph is used to
create a session. However, new functions and new calls to existing functions
may be added to the graph, with the new functions themselves becoming
immediately frozen.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.constant([1.0])
b = tf.constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is must be a
`_DefinedFunction` object. The gradient
function must satisfy the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError(f"Function {func} must be a callable.")
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError(
"Functions with argument defaults or keywords arguments are not "
f"supported. {func} has defaults {argspec.defaults} and keywords "
f"{argspec.keywords}.")
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The number of tf.function input types is not compatible with the "
f"allowed arguments of {func}. The tf.function have {num} input "
f"types, while the python function allows minimum {min_args} and "
f"maximum {max_args} arguments.")
return _DefinedFunction(
func,
argnames,
self._input_types,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(
func, [], [],
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(
func,
argnames,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
class _DefinedFunctionDeleter(object):
"""Unregister function from eager context."""
__slots__ = ["name"]
def __init__(self, name):
self.name = name
def __del__(self):
try:
context.remove_function(self.name)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
class _DefinedFunction(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
capture_by_value=False,
allowlisted_stateful_ops=None,
capture_resource_var_by_value=True,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
capture_by_value: Boolean (defaults to False). If True, captured values
will be copied into the function body.
allowlisted_stateful_ops: A set of ops that if stateful we ignore and
copy into the function body, when `capture_by_value` is True.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._capture_by_value = capture_by_value
self._allowlisted_stateful_ops = allowlisted_stateful_ops
if self._allowlisted_stateful_ops is None:
self._allowlisted_stateful_ops = set()
self._capture_resource_var_by_value = capture_resource_var_by_value
self._extra_kwargs = kwargs
# Constructed only when C API is disabled, lazily
self._definition = None
# Constructed only when C API is enabled, lazily
self._c_func = None
self._function_deleter = None
self._sub_functions = {} # Constructed with _definition or _c_func
# pylint: disable=protected-access
device_funcs = ops.get_default_graph()._device_functions_outer_to_inner
# pylint: enable=protected-access
# Get the innermost device if possible.
self._caller_device = device_funcs[-1] if device_funcs else None
# Cached OpDef for this function. When C API is enabled, this is
# the only part of FunctionDef that we cache in Python. When C API
# is disabled the whole _definition is available and this is simply
# another reference to _definition.signature
self._op_def = None
assert isinstance(input_types, (list, tuple))
self._arg_types = input_types
self._arg_names = [argnames[i] if i < len(argnames) else ("arg%d" % i)
for i in range(len(input_types))]
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
if self._c_func:
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(self._c_func.func, buf)
fdef = function_pb2.FunctionDef()
proto_data = c_api.TF_GetBuffer(buf)
fdef.ParseFromString(compat.as_bytes(proto_data))
with ops.init_scope():
if context.executing_eagerly():
context.add_function(self._c_func.func)
self._function_deleter = _DefinedFunctionDeleter(
fdef.signature.name)
return fdef
return self._definition
@property
def _signature(self):
self._create_definition_if_needed()
return self._op_def
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Returns the name of the gradient function."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
@property
def stateful_ops(self):
"""Returns the list of stateful ops in function definition.
Returns:
A list of (op.name, op.type) pairs.
"""
self._create_definition_if_needed()
return self._stateful_ops
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
with context.graph_mode():
self._create_definition_if_needed_impl()
def _create_definition_if_needed_impl(self):
"""This is not what you want, see _create_definition_if_needed."""
if self._definition is not None or self._c_func is not None:
return
# Copy variable collections (by reference) from the parent graph such that
# name based variable sharing (e.g. via tf.make_template) works between the
# func graph and parent graph.
variable_keys = []
variable_keys.extend(ops.GraphKeys._VARIABLE_COLLECTIONS) # pylint: disable=protected-access
variable_keys.append(vs._VARSTORE_KEY) # pylint: disable=protected-access
parent_graph = ops.get_default_graph()
collections_ref = {
key: parent_graph.get_collection_ref(key) for key in variable_keys}
temp_graph = func_graph_from_py_func(
self._func,
self._arg_names,
self._arg_types,
self._func_name,
self._capture_by_value,
self._caller_device,
collections_ref=collections_ref,
allowlisted_stateful_ops=self._allowlisted_stateful_ops,
capture_resource_var_by_value=self._capture_resource_var_by_value)
self._extra_inputs = temp_graph.extra_inputs
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Extra kwargs are treated as attrs on the function def.
if self._func_name:
base_func_name = self._func_name
else:
base_func_name = function_utils.get_func_name(self._func)
if self._grad_func:
base_func_name += ("_%s" % self._grad_func.name)
kwargs_attr = _parse_kwargs_as_attrs(base_func_name, **self._extra_kwargs)
if not temp_graph._c_graph: # pylint: disable=protected-access
# Build the FunctionDef
self._definition = graph_to_function_def.graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
temp_graph.inputs,
temp_graph.outputs,
out_names=self._out_names)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([base_func_name, self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
self._op_def = self._definition.signature
else: # C API is enabled
output_names = ([compat.as_bytes(x) for x in self._out_names]
if self._out_names else [])
description = self._func.__doc__ or None
# pylint: disable=protected-access
c_func = c_api.TF_GraphToFunction_wrapper(
temp_graph._c_graph,
base_func_name,
self._func_name is None, # append_hash_to_fn_name
None, # opers
[t._as_tf_output() for t in temp_graph.inputs],
[t._as_tf_output() for t in temp_graph.outputs],
output_names,
[], # control_outputs
[], # control_output_names
None, # opts
description)
self._c_func = c_api_util.ScopedTFFunction(c_func)
# pylint: enable=protected-access
self._set_c_attrs(kwargs_attr)
# Set cached fields: _op_def and _func_name (if not already set)
self._op_def = self.definition.signature
if self._func_name:
assert self._func_name == self._op_def.name
else:
self._func_name = compat.as_str(self._op_def.name)
self._stateful_ops = [(op.name, op.type)
for op in temp_graph.get_operations()
if op._is_stateful] # pylint: disable=protected-access
def _set_c_attrs(self, attrs):
"""Sets `attrs` as attributes of self._c_func.
Requires that self._c_func is not None.
Args:
attrs: a dictionary from attribute name to attribute proto value
"""
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_FunctionSetAttrValueProto(self._c_func.func, compat.as_str(name),
serialized)
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# Adds this function into 'g'.
# pylint: disable=protected-access
if context.executing_eagerly():
context.context().add_function_def(self.definition)
else:
g._add_function(self)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
f.add_to_graph(g)
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._signature, *args, **kwargs)
# Set a hidden attr in 'op' so that gradients_impl can refer back
# to this _DefinedFunction instance to access python_grad_func.
assert isinstance(op, ops.Operation)
setattr(op, "__defun", self)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError(f"shape_func {self._shape_func} produced "
f"{len(shapes):d} shapes, which does not match "
f"{len(op.outputs)} outputs.")
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
class _OverloadedFunction(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(
self._func,
self._argnames,
input_types,
name,
None,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type) for _ in defined._signature.output_arg # pylint: disable=protected-access
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(input_types +
output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, ops.Tensor):
raise ValueError(f"Expected a Tensor but got {x} with type {type(x)}.")
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
class _FuncGraph(ops.Graph):
"""A helper for constructing a function.
_FuncGraph overrides ops.Graph's create_op() so that we can keep
track of all inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, name, capture_by_value, allowlisted_stateful_ops,
capture_resource_var_by_value, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._capture_by_value = capture_by_value
self._allowlisted_stateful_ops = allowlisted_stateful_ops
self._capture_resource_var_by_value = capture_resource_var_by_value
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
# The name of the function.
self.name = name
# Placeholder tensors representing the inputs to this function. The tensors
# are in this _FuncGraph.
self.inputs = []
# Tensors that will be returned this function. The tensors are in this
# _FuncGraph.
self.outputs = []
# Maps external tensor -> internal tensor (e.g. input placeholder).
self._captured = {}
# The external tensors that have been captured as inputs and must be passed
# to this function (empty if capturing by value, otherwise these are the
# keys of _captured).
self.extra_inputs = []
# Input placeholders that been added for captured values (empty if capturing
# by value).
self.extra_args = []
# Captured variables.
# TODO(skyewm): is this needed?
self.extra_vars = []
# pylint: disable=g-doc-return-or-yield
@property
def outer_graph(self):
"""The graph active when this _FuncGraph was created."""
return self._outer_graph
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Overridden from `tf.Graph` to update both the init_scope container
and the present inner container. This is necessary to make sure setting
containers applies correctly both to created variables and to stateful
ops.
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
# pylint: disable=protected-access
with ops.init_scope():
original_init_container = ops.get_default_graph()._container
try:
self._container = container_name
with ops.init_scope():
ops.get_default_graph()._container = container_name
yield self._container
finally:
self._container = original_container
with ops.init_scope():
ops.get_default_graph()._container = original_init_container
# pylint: enable=protected-access
# pylint: enable=g-doc-return-or-yield
def getvar(
self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if (isinstance(var, resource_variable_ops.BaseResourceVariable) and
self._capture_resource_var_by_value):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
for i, x in enumerate(inputs):
if isinstance(x, ops.EagerTensor) or x.graph is not self:
inputs[i] = self.capture(x)
return super(_FuncGraph, self)._create_op_internal(
op_type,
inputs,
dtypes=dtypes,
input_types=input_types,
name=name,
attrs=attrs,
op_def=op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Adds the given tensor to this graph and returns the captured tensor."""
if tensor.ref() in self._captured:
# Captured already.
return self._captured[tensor.ref()]
elif self._capture_by_value:
return self._add_tensor_and_parents(tensor)
else:
return self._capture_tensor_as_extra_input(tensor, name)
@property
def captures(self):
"""Pairs of tensors and captured tensor."""
return [(k.deref(), v) for k, v in self._captured.items()]
def _capture_tensor_as_extra_input(self, tensor, name=None):
# Substitute with a placeholder.
self.extra_inputs.append(tensor)
# Hoist the new input placeholder out of any control flow context
# we're currently in.
with ops.control_dependencies(None):
ph = array_ops.placeholder(
tensor.dtype, shape=tensor.get_shape(), name=name)
# pylint: disable=protected-access
if isinstance(tensor, ops.EagerTensor):
handle_data = tensor._handle_data
if handle_data:
handle_data = handle_data.SerializeToString()
else:
handle_data = c_api.GetHandleShapeAndType(tensor.graph._c_graph,
tensor._as_tf_output())
if handle_data:
c_api.SetHandleShapeAndType(ph.graph._c_graph, ph._as_tf_output(),
compat.as_bytes(handle_data))
# pylint: enable=protected-access
self.inputs.append(ph)
self._captured[tensor.ref()] = ph
self.extra_args.append(ph)
if _is_guaranteed_const(tensor):
with ops.control_dependencies(None):
return array_ops.guarantee_const(ph)
else:
return ph
def _add_tensor_and_parents(self, tensor):
op = self._add_op_and_parents(tensor.op)
return op.outputs[tensor.value_index]
def _add_op_and_parents(self, op):
# pylint: disable=protected-access
op_def = graph_to_function_def._get_op_def(op)
if op._is_stateful and op not in self._allowlisted_stateful_ops:
raise ValueError(f"Cannot capture a stateful node (name:{op.name}, "
f"type:{op.type}) by value.")
elif op.type in ("Placeholder", "PlaceholderV2"):
raise ValueError(f"Cannot capture a placeholder (name:{op.name}, "
f"type:{op.type}) by value.")
# pylint: enable=protected-access
captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]
captured_op = self._create_op_internal(
op.type,
captured_inputs, [o.dtype for o in op.outputs],
name=op.name,
attrs=op.node_def.attr,
op_def=op_def)
for t, captured_t in zip(op.outputs, captured_op.outputs):
self._captured[t.ref()] = captured_t
return captured_op
def func_graph_from_py_func(func,
arg_names,
arg_types,
name=None,
capture_by_value=False,
device=None,
colocation_stack=None,
container=None,
collections_ref=None,
arg_shapes=None,
allowlisted_stateful_ops=None,
capture_resource_var_by_value=True):
"""Returns a _FuncGraph generated from `func`.
Args:
func: A Python callable which constructs a TF function body. The arguments
must correspond to `arg_types`. Returns a value or list/tuple of values.
No returned value can be None.
arg_names: A sequence of strings for the function argument names.
arg_types: A sequence of the function's argument types.
name: The function name. If None, the name is derived from `func`.
capture_by_value: boolean. If True, captured values will be copied into the
function body.
device: device name or function.
colocation_stack: A colocation stack (list) the _FuncGraph should use.
container: A container name the _FuncGraph should start with.
collections_ref: A reference to a collections dict the _FuncGraph should
use internally.
arg_shapes: A sequence of the function's argument shapes.
allowlisted_stateful_ops: A set of ops that if stateful we ignore and
re-create.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
Returns:
A _FuncGraph.
Raises:
ValueError: if func returns None.
"""
if not name:
name = function_utils.get_func_name(func)
func_graph = _FuncGraph(name, capture_by_value, allowlisted_stateful_ops,
capture_resource_var_by_value)
with func_graph.as_default(), ops.device(device):
# pylint: disable=protected-access
if collections_ref is not None:
func_graph._collections = collections_ref
if container is not None:
func_graph._container = container
if colocation_stack is not None:
func_graph._colocation_stack = colocation_stack
# pylint: enable=protected-access
if arg_shapes is None:
arg_shapes = [None] * len(arg_types)
# Create placeholders for the function arguments.
for (argname, argtype, argshape) in zip(arg_names, arg_types, arg_shapes):
argholder = array_ops.placeholder(argtype, shape=argshape, name=argname)
func_graph.inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=func_graph.getvar):
outputs = func(*func_graph.inputs)
# There is no way of distinguishing between a function not returning
# anything and a function returning None in Python.
# We need to allow the former and ideally want to forbid the latter as
# it is most likely user error.
# TODO(iga): Consider adding a @NoOutput decorator on top of @Defun to
# allow users to explicitly mark the function as not returning anything.
# For now, we allow a single None return and interpret it as a function
# with no output.
if outputs is None:
outputs = []
else:
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any(_ is None for _ in outputs):
raise ValueError(f"Function {name} can not return None.")
# Ensures each output is a Tensor in the function graph.
outputs = [ops.convert_to_tensor(t) for t in outputs]
outputs = [func_graph.capture(t) if t.graph is not func_graph else t
for t in outputs]
func_graph.outputs = outputs
return func_graph
def _is_guaranteed_const(tensor):
"""Determines whether `tensor` is guaranteed to be a constant.
A tensor is guaranteed to be a constant if either it was produced by
a `GuaranteeConst` op or if all of its children are guaranteed to be
constants.
Args:
tensor: The tensor for which to determine const-ness.
Returns:
True if `tensor` is guaranteed to be a constant, False otherwise.
"""
if isinstance(tensor, ops.EagerTensor):
return False
class Work(object):
def __init__(self, op, leaving):
self.op = op
self.leaving = leaving
is_guaranteed_const = lambda op: op.node_def.op == "GuaranteeConst"
constants = set([])
def all_inputs_const(op):
# If all inputs of an op are guaranteed constants, then we can infer that
# the op produces a constant as well.
return op.inputs and all(inp.op in constants for inp in op.inputs)
visited = set([])
stack = [Work(tensor.op, leaving=False)]
while stack:
work = stack.pop()
if work.leaving:
if all_inputs_const(work.op):
constants.add(work.op)
continue
visited.add(work.op)
if is_guaranteed_const(work.op):
constants.add(work.op)
continue
# This op will be revisited after all its inputs are checked for const-ness.
stack.append(Work(work.op, leaving=True))
for inp in work.op.inputs:
if inp.op not in visited:
stack.append(Work(inp.op, leaving=False))
return tensor.op in constants
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError(f"Expected {len(sig.input_arg):d} arguments, got "
f"{len(inputs):d}.")
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
if name is None:
name = func_name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
op = g._create_op_internal( # pylint: disable=protected-access
func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op
def _from_definition(fdef, grad_func=None):
"""Creates a _DefinedFunction initialized from a FunctionDef proto.
Args:
fdef: a FunctionDef
grad_func: a _DefinedFunction or None
Returns:
A _DefinedFunction representing fdef
"""
# TODO(iga): This method does major surgery on _DefinedFunction.
# Make it a named constructor using @classmethod of _DefinedFunction.
# The Python callable is only needed to create a FunctionDef. Since we have
# the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
# have access to such a callable here).
func = None
argnames = [arg.name for arg in fdef.signature.input_arg]
input_types = tuple(
dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
func_name = fdef.signature.name
# Note: FunctionDefs do not include python gradient functions, so if the
# original _DefinedFunction included one it will not be reflected here.
python_grad_func = None
out_names = [arg.name for arg in fdef.signature.output_arg]
result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
python_grad_func, out_names)
# pylint: disable=protected-access
serialized = fdef.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
result._c_func = c_api_util.ScopedTFFunction(c_func)
result._extra_inputs = []
result._op_def = fdef.signature
# pylint: enable=protected-access
return result
def from_library(lib):
"""Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.
This method handles assigning the correct gradient functions to each
function.
Args:
lib: a FunctionDefLibrary
Returns:
A list of _DefinedFunctions
Raises:
ValueError: `lib` is invalid
"""
if not lib.function and not lib.gradient:
return []
# function name -> FunctionDef proto
funcs = {fdef.signature.name: fdef for fdef in lib.function}
# Validate that all references function names have function defs
for g in lib.gradient:
if g.function_name not in funcs:
raise ValueError(f"FunctionDefLibrary missing '{g.function_name}' "
f"FunctionDef\n{lib}")
if g.gradient_func not in funcs:
raise ValueError(f"FunctionDefLibrary missing '{g.gradient_func}' "
f"FunctionDef\n{lib}")
# function name -> gradient function name
func_to_grad = collections.defaultdict(lambda: None)
# gradient function name -> names of functions having that grad function
grad_to_funcs = collections.defaultdict(list)
for gdef in lib.gradient:
func_to_grad[gdef.function_name] = gdef.gradient_func
grad_to_funcs[gdef.gradient_func].append(gdef.function_name)
# Start with functions without gradients
ready = [
fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None
]
if not ready:
raise ValueError(
f"FunctionDefLibrary contains cyclic gradient functions!\n{lib}")
# function name -> _DefinedFunction
initialized = {}
while ready:
fdef = ready.pop()
name = fdef.signature.name
grad = initialized.get(func_to_grad[name])
if func_to_grad[name]:
assert grad
defined_func = _from_definition(fdef, grad_func=grad)
initialized[name] = defined_func
ready.extend(funcs[f] for f in grad_to_funcs[name])
return initialized.values()
def _get_experimental_kwarg_as_attr(attr_name, value):
"""Creates an AttrValue for a python object."""
if isinstance(value, bool):
return attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
return attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
return attr_value_pb2.AttrValue(f=value)
elif isinstance(value, str):
return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError(f"Attribute {attr_name} must be bool, int, float, or "
f"str. Got {type(value)}.")
def _get_kwarg_as_str_attr(attr_name, value):
"""Creates an AttrValue for a python object."""
if isinstance(value, str):
return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError(f"Attribute {attr_name} must be str. Got {type(value)}.")
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
# For compatibility with previous behavior, Defun does not perform shape
# inference through its function call operations.
attrs["_disable_call_shape_inference"] = attr_value_pb2.AttrValue(b=True)
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
# Forward _XlaScope from enclosing context (if set), otherwise create new.
# pylint: disable=protected-access
if "_XlaScope" in ops.get_default_graph()._attr_scope_map:
attrs["_XlaScope"] = ops.get_default_graph()._attr_scope_map["_XlaScope"]
else:
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
# pylint: enable=protected-access
kwargs_keys = list(kwargs.keys())
for key in kwargs_keys:
if key.startswith("experimental_"):
attrs[key] = _get_experimental_kwarg_as_attr(key, kwargs[key])
del kwargs[key]
# Support for https://github.com/tensorflow/community/pull/113/files.
elif key == "_implements" or key == "_reference":
attrs[key] = _get_kwarg_as_str_attr(key, kwargs[key])
del kwargs[key]
if kwargs:
raise ValueError(f"Unknown keyword arguments: {kwargs.keys()}.")
return attrs
def get_extra_vars():
"""Returns the captured variables by the function.
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
def get_extra_inputs():
"""Returns the captured input tensors by the function.
Returns:
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
def get_extra_args():
"""Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return []
def _type_list_to_str(types):
if any(_ not in _DTYPE_TO_STR for _ in types):
unsupported_types = [type_ for type_ in types if type_ not in _DTYPE_TO_STR]
raise ValueError(f"Unsupported dtypes {unsupported_types} in "
"`types`. Supported dtypes are "
f"{_DTYPE_TO_STR.keys()}.")
return "".join(_DTYPE_TO_STR[_] for _ in types)
# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
dtypes.float16: "f16",
dtypes.float32: "f32",
dtypes.float64: "f64",
dtypes.int32: "i32",
dtypes.uint8: "i8",
dtypes.uint16: "u16",
dtypes.uint32: "u32",
dtypes.uint64: "u64",
dtypes.int16: "i16",
dtypes.int8: "i8",
dtypes.string: "s",
dtypes.complex64: "c64",
dtypes.complex128: "c128",
dtypes.int64: "i64",
dtypes.bool: "b",
dtypes.qint8: "qi8",
dtypes.quint8: "qu8",
dtypes.qint16: "qi16",
dtypes.quint16: "qu16",
dtypes.qint32: "qi32",
dtypes.bfloat16: "b16"
}
def function_def_from_tf_function(c_func):
"""Converts a SWIG-wrapped TF_Function* to a FunctionDef proto."""
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(c_func, buf)
data = c_api.TF_GetBuffer(buf)
fdef = function_pb2.FunctionDef()
fdef.ParseFromString(compat.as_bytes(data))
return fdef
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
.. note:: Experimental
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
8.14636...
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureSummary(ClusteringSummary):
"""
.. note:: Experimental
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Return the K-means cost (sum of squared distances of points to their nearest center)
for this model on the given data.
..note:: Deprecated in 2.4.0. It will be removed in 3.0.0. Use ClusteringEvaluator instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 2.4.0. It will be removed in 3.0.0. Use ClusteringEvaluator "
"instead. You can also get the cost on the training dataset in the summary.",
DeprecationWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class KMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol, HasMaxIter,
HasTol, HasSeed, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
2.000...
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol,
HasMaxIter, HasSeed, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure` or its default value.
"""
return self.getOrDefault(self.distanceMeasure)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
pass
@inherit_doc
class LDAModel(JavaModel):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class PowerIterationClustering(HasMaxIter, HasWeightCol, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
`Lin and Cohen <http://www.icml2010.org/papers/387.pdf>`_. From the abstract:
PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. seealso:: `Wikipedia on Spectral clustering
<http://en.wikipedia.org/wiki/Spectral_clustering>`_
>>> data = [(1, 0, 0.5),
... (2, 0, 0.5), (2, 1, 0.7),
... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),
... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),
... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight")
>>> pic = PowerIterationClustering(k=2, maxIter=40, weightCol="weight")
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |1 |
|1 |1 |
|2 |1 |
|3 |1 |
|4 |1 |
|5 |0 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
.. versionadded:: 2.4.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
:param dataset:
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
:return:
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
|
from sympy import Symbol, Wild, sin, cos, exp, sqrt, pi, Function, Derivative,\
abc, Integer, Eq, symbols, Add, I, Real, log, Rational, Lambda, atan2
def test_subs():
n3=Rational(3)
n2=Rational(2)
n6=Rational(6)
x=Symbol("x")
c=Symbol("c")
e=x
e=e.subs(x,n3)
assert e == Rational(3)
e=2*x
assert e == 2*x
e=e.subs(x,n3)
assert e == Rational(6)
def test_trigonometric():
x = Symbol('x')
n3 = Rational(3)
e=(sin(x)**2).diff(x)
assert e == 2*sin(x)*cos(x)
e=e.subs(x,n3)
assert e == 2*cos(n3)*sin(n3)
e=(sin(x)**2).diff(x)
assert e == 2*sin(x)*cos(x)
e=e.subs(sin(x),cos(x))
assert e == 2*cos(x)**2
assert exp(pi).subs(exp, sin) == 0
assert cos(exp(pi)).subs(exp, sin) == 1
def test_powers():
x = Symbol('x')
assert sqrt(1 - sqrt(x)).subs(x, 4) == I
assert (sqrt(1-x**2)**3).subs(x, 2) == - 3 * I * sqrt(3)
assert (x ** Rational(1,3)).subs(x, 27) == 3
assert (x ** Rational(1,3)).subs(x, -27) == 3 * (-1) ** Rational(1,3)
assert ((-x) ** Rational(1,3)).subs(x, 27) == 3 * (-1) ** Rational(1,3)
def test_logexppow(): # no eval()
x = Symbol("x")
w = Symbol("dummy :)")
e = (3**(1+x)+2**(1+x))/(3**x+2**x)
assert e.subs(2**x, w) != e
assert e.subs(exp(x*log(Rational(2))),w) != e
def test_bug():
x1=Symbol("x1")
x2=Symbol("x2")
y=x1*x2
y.subs(x1,Real(3.0))
def test_subbug1():
x=Symbol("x")
e=(x**x).subs(x,1)
e=(x**x).subs(x,1.0)
def test_subbug2():
# Ensure this does not cause infinite recursion
x = Symbol('x')
assert Real(7.7).epsilon_eq(abs(x).subs(x, -7.7))
def test_dict():
x = Symbol('x')
a,b,c = map(Wild, 'abc')
f = 3*cos(4*x)
r = f.match(a*cos(b*x))
assert r == {a: 3, b: 4}
e = a/b * sin(b*x)
assert e._subs_dict(r) == r[a]/r[b] * sin(r[b]*x)
assert e._subs_dict(r) == 3 * sin(4*x) / 4
def test_dict_ambigous(): # see #467
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f = x*exp(x)
g = z*exp(z)
df= {x:y, exp(x): y}
dg= {z:y, exp(z): y}
assert f._subs_dict(df) == y**2
assert g._subs_dict(dg) == y**2
# and this is how order can affect the result
assert f .subs(x,y) .subs(exp(x),y) == y*exp(y)
assert f .subs(exp(x),y) .subs(x,y) == y**2
def test_deriv_sub_bug3():
x = Symbol("x")
y = Symbol("y")
f = Function("f")
pat = Derivative(f(x), x, x)
assert pat.subs(y, y**2) == Derivative(f(x), x, x)
assert pat.subs(y, y**2) != Derivative(f(x), x)
def test_equality_subs1():
f = Function("f")
x = abc.x
eq = Eq(f(x)**2, x)
res = Eq(Integer(16), x)
assert eq.subs(f(x), 4) == res
def test_equality_subs2():
f = Function("f")
x = abc.x
eq = Eq(f(x)**2, 16)
assert bool(eq.subs(f(x), 3)) == False
assert bool(eq.subs(f(x), 4)) == True
def test_issue643():
x = Symbol('x')
y = Symbol('y')
e = sqrt(x)*exp(y)
assert e.subs(sqrt(x), 1) == exp(y)
def test_subs_dict1():
x, y = symbols('xy')
assert (1+x*y).subs(x, pi) == 1 + pi*y
assert (1+x*y).subs({x:pi, y:2}) == 1 + 2*pi
c2,c3,q1p,q2p,c1,s1,s2,s3= symbols('c2 c3 q1p q2p c1 s1 s2 s3')
test=c2**2*q2p*c3 + c1**2*s2**2*q2p*c3 + s1**2*s2**2*q2p*c3 \
- c1**2*q1p*c2*s3 - s1**2*q1p*c2*s3
assert test.subs({c1**2 : 1-s1**2, c2**2 : 1-s2**2, c3**3: 1-s3**2}) \
== c3*q2p*(1 - s2**2) + c3*q2p*s2**2*(1 - s1**2) - c2*q1p*s3*(1 - s1**2) \
+ c3*q2p*s1**2*s2**2 - c2*q1p*s3*s1**2
def test_subs_dict2():
x = Symbol('x')
a,b,c = map(Wild, 'abc')
f = 3*cos(4*x)
r = f.match(a*cos(b*x))
assert r == {a: 3, b: 4}
e = a/b * sin(b*x)
assert e.subs(r) == r[a]/r[b] * sin(r[b]*x)
assert e.subs(r) == 3 * sin(4*x) / 4
def test_mul():
x, y, z = map(Symbol, 'xyz')
assert (x*y*z).subs(z*x,y) == y**2
#assert (2*x*y).subs(5*x*y,z) == 2*z/5
def test_subs_simple():
# Define symbols
a = symbols('a', commutative = True)
x = symbols('x', commutative = False)
""" SIMPLE TESTS """
assert (2*a ).subs(1,3) == 2*a
assert (2*a ).subs(2,3) == 3*a
assert (2*a ).subs(a,3) == 6
assert sin(2).subs(1,3) == sin(2)
assert sin(2).subs(2,3) == sin(3)
assert sin(a).subs(a,3) == sin(3)
assert (2*x ).subs(1,3) == 2*x
assert (2*x ).subs(2,3) == 3*x
assert (2*x ).subs(x,3) == 6
assert sin(x).subs(x,3) == sin(3)
def test_subs_constants():
# Define symbols
a,b = symbols('ab', commutative = True)
x,y = symbols('xy', commutative = False)
""" CONSTANTS TESTS """
assert (a*b ).subs(2*a,1) == a*b
assert (1.5*a*b).subs(a,1) == 1.5*b
assert (2*a*b).subs(2*a,1) == b
assert (2*a*b).subs(4*a,1) == 2*a*b
assert (x*y ).subs(2*x,1) == x*y
assert (1.5*x*y).subs(x,1) == 1.5*y
assert (2*x*y).subs(2*x,1) == y
assert (2*x*y).subs(4*x,1) == 2*x*y
def test_subs_commutative():
# Define symbols
a,b,c,d,K = symbols('abcdK', commutative = True)
""" COMMUTATIVE TESTS """
assert (a*b ).subs(a*b,K) == K
assert (a*b*a*b).subs(a*b,K) == K**2
assert (a*a*b*b).subs(a*b,K) == K**2
assert (a*b*c*d).subs(a*b*c,K) == d*K
assert (a*b**c ).subs(a,K) == K*b**c
assert (a*b**c ).subs(b,K) == a*K**c
assert (a*b**c ).subs(c,K) == a*b**K
assert (a*b*c*b*a ).subs(a*b,K) == c*K**2
assert (a**3*b**2*a).subs(a*b,K) == a**2*K**2
def test_subs_noncommutative():
# Define symbols
w,x,y,z,L = symbols('wxyzL', commutative = False)
""" NONCOMMUTATIVE TESTS """
assert (x*y ).subs(x*y,L) == L
assert (w*y*x ).subs(x*y,L) == w*y*x
assert (w*x*y*z).subs(x*y,L) == w*L*z
assert (x*y*x*y).subs(x*y,L) == L**2
assert (x*x*y ).subs(x*y,L) == x*L
assert (x*x*y*y).subs(x*y,L) == x*L*y
assert (w*x*y ).subs(x*y*z,L) == w*x*y
assert (x*y**z ).subs(x,L) == L*y**z
assert (x*y**z ).subs(y,L) == x*L**z
assert (x*y**z ).subs(z,L) == x*y**L
assert (w*x*y*z*x*y).subs(x*y*z,L) == w*L*x*y
assert (w*x*y*y*w*x*x*y*x*y*y*x*y).subs(x*y,L) == w*L*y*w*x*L**2*y*L
def test_subs_basic_funcs():
# Define symbols
a,b,c,d,K = symbols('abcdK', commutative = True)
w,x,y,z,L = symbols('wxyzL', commutative = False)
""" OTHER OPERATION TESTS"""
assert (x+y ).subs(x+y,L) == L
assert (x-y ).subs(x-y,L) == L
assert (x/y ).subs(x,L) == L/y
assert (x**y ).subs(x,L) == L**y
assert (x**y ).subs(y,L) == x**L
assert ( (a-c)/b ).subs(b,K) == (a-c)/K
assert (exp(x*y-z)).subs(x*y,L) == exp(L-z)
assert (a*exp(x*y-w*z)+b*exp(x*y+w*z)).subs(z,0) == a*exp(x*y)+b*exp(x*y)
assert ((a-b)/(c*d-a*b)).subs(c*d-a*b,K) == (a-b)/K
assert (w*exp(a*b-c)*x*y/4).subs(x*y,L) == w*exp(a*b-c)*L/4
#assert (a/(b*c)).subs(b*c,K) == a/K,'Failed'; print '.' #FAILS DIVISION
def test_subs_wild():
# Define symbols
R = Wild('R'); S = Wild('S'); T = Wild('T'); U = Wild('U')
""" WILD TESTS """
assert (R*S ).subs(R*S,T) == T
assert (S*R ).subs(R*S,T) == T
assert (R+S ).subs(R+S,T) == T
assert (R**S).subs(R,T) == T**S
assert (R**S).subs(S,T) == R**T
assert (R*S**T).subs(R,U) == U*S**T
assert (R*S**T).subs(S,U) == R*U**T
assert (R*S**T).subs(T,U) == R*S**U
def test_subs_mixed():
# Define symbols
a,b,c,d,K = symbols('abcdK', commutative = True)
w,x,y,z,L = symbols('wxyzL', commutative = False)
R,S,T,U = Wild('R'), Wild('S'), Wild('T'), Wild('U')
""" MIXED TESTS """
assert ( a*x*y ).subs(x*y,L) == a*L
assert ( a*b*x*y*x ).subs(x*y,L) == a*b*L*x
assert (R*x*y*exp(x*y)).subs(x*y,L) == R*L*exp(L)
assert ( a*x*y*y*x-x*y*z*exp(a*b) ).subs(x*y,L) == a*L*y*x-L*z*exp(a*b)
assert (c*y*x*y*x**(R*S-a*b)-T*(a*R*b*S)).subs(x*y,L).subs(a*b,K).subs(R*S,U) == c*y*L*x**(U-K)-T*(U*K)
def test_division():
a,b,c = symbols('abc', commutative = True)
x,y,z = symbols('xyz', commutative = True)
assert ( 1/a ).subs(a,c) == 1/c
assert ( 1/a**2 ).subs(a,c) == 1/c**2
assert ( 1/a**2 ).subs(a,-2) == Rational(1,4)
assert ( -(1/a**2)).subs(a,-2) == -Rational(1,4)
assert ( 1/x ).subs(x,z) == 1/z
assert ( 1/x**2 ).subs(x,z) == 1/z**2
assert ( 1/x**2 ).subs(x,-2) == Rational(1,4)
assert ( -(1/x**2)).subs(x,-2) == -Rational(1,4)
def test_add():
a, b, c, d, x = abc.a, abc.b, abc.c, abc.d, abc.x
assert (a**2 - b - c).subs(a**2 - b, d) in [d - c, a**2 - b - c]
assert (a**2 - c).subs(a**2 - c, d) == d
assert (a**2 - b - c).subs(a**2 - c, d) in [d - b, a**2 - b - c]
assert (a**2 - x - c).subs(a**2 - c, d) in [d - x, a**2 - x - c]
assert (a**2 - b - sqrt(a)).subs(a**2 - sqrt(a), c) == c - b
assert (a+b+exp(a+b)).subs(a+b,c) == c + exp(c)
assert (c+b+exp(c+b)).subs(c+b,a) == a + exp(a)
# this should work everytime:
e = a**2 - b - c
assert e.subs(Add(*e.args[:2]), d) == d + e.args[2]
assert e.subs(a**2 - c, d) == d - b
def test_subs_issue910():
assert (I*Symbol("a")).subs(1, 2) == I*Symbol("a")
def test_functions_subs():
x, y = map(Symbol, 'xy')
f, g = map(Function, 'fg')
l = Lambda(x, y, sin(x) + y)
assert (g(y, x)+cos(x)).subs(g, l) == sin(y) + x + cos(x)
assert (f(x)**2).subs(f, sin) == sin(x)**2
assert (f(x,y)).subs(f,log) == log(x,y)
assert (f(x,y)).subs(f,sin) == f(x,y)
assert (sin(x)+atan2(x,y)).subs([[atan2,f],[sin,g]]) == f(x,y) + g(x)
assert (g(f(x+y, x))).subs([[f, l], [g, exp]]) == exp(x + sin(x + y))
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Section.version_parent_pk'
db.add_column('repchantest_section', 'version_parent_pk',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='repchantest_section_parentpk', null=True, to=orm['repchantest.Section']),
keep_default=False)
# Adding field 'Section.version_parent_rev_pk'
db.add_column('repchantest_section', 'version_parent_rev_pk',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='repchantest_section_parentverpk', null=True, to=orm['repchantest.Section']),
keep_default=False)
# Adding field 'Section.version_have_children'
db.add_column('repchantest_section', 'version_have_children',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Section.version_date'
db.add_column('repchantest_section', 'version_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True),
keep_default=False)
# Adding field 'Section.version_hash'
db.add_column('repchantest_section', 'version_hash',
self.gf('django.db.models.fields.CharField')(max_length=512, null=True, blank=True),
keep_default=False)
# Adding field 'Section.version_in_trash'
db.add_column('repchantest_section', 'version_in_trash',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'AuthorAlias.version_parent_pk'
db.add_column('repchantest_authoralias', 'version_parent_pk',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='repchantest_authoralias_parentpk', null=True, to=orm['repchantest.AuthorAlias']),
keep_default=False)
# Adding field 'AuthorAlias.version_parent_rev_pk'
db.add_column('repchantest_authoralias', 'version_parent_rev_pk',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='repchantest_authoralias_parentverpk', null=True, to=orm['repchantest.AuthorAlias']),
keep_default=False)
# Adding field 'AuthorAlias.version_have_children'
db.add_column('repchantest_authoralias', 'version_have_children',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'AuthorAlias.version_date'
db.add_column('repchantest_authoralias', 'version_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True),
keep_default=False)
# Adding field 'AuthorAlias.version_hash'
db.add_column('repchantest_authoralias', 'version_hash',
self.gf('django.db.models.fields.CharField')(max_length=512, null=True, blank=True),
keep_default=False)
# Adding field 'AuthorAlias.version_in_trash'
db.add_column('repchantest_authoralias', 'version_in_trash',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Page.version_parent_pk'
db.add_column('repchantest_page', 'version_parent_pk',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='repchantest_page_parentpk', null=True, to=orm['repchantest.Page']),
keep_default=False)
# Adding field 'Page.version_parent_rev_pk'
db.add_column('repchantest_page', 'version_parent_rev_pk',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='repchantest_page_parentverpk', null=True, to=orm['repchantest.Page']),
keep_default=False)
# Adding field 'Page.version_have_children'
db.add_column('repchantest_page', 'version_have_children',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Page.version_date'
db.add_column('repchantest_page', 'version_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True),
keep_default=False)
# Adding field 'Page.version_hash'
db.add_column('repchantest_page', 'version_hash',
self.gf('django.db.models.fields.CharField')(max_length=512, null=True, blank=True),
keep_default=False)
# Adding field 'Page.version_in_trash'
db.add_column('repchantest_page', 'version_in_trash',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Book.version_parent_pk'
db.add_column('repchantest_book', 'version_parent_pk',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='repchantest_book_parentpk', null=True, to=orm['repchantest.Book']),
keep_default=False)
# Adding field 'Book.version_parent_rev_pk'
db.add_column('repchantest_book', 'version_parent_rev_pk',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='repchantest_book_parentverpk', null=True, to=orm['repchantest.Book']),
keep_default=False)
# Adding field 'Book.version_have_children'
db.add_column('repchantest_book', 'version_have_children',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Book.version_date'
db.add_column('repchantest_book', 'version_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True),
keep_default=False)
# Adding field 'Book.version_hash'
db.add_column('repchantest_book', 'version_hash',
self.gf('django.db.models.fields.CharField')(max_length=512, null=True, blank=True),
keep_default=False)
# Adding field 'Book.version_in_trash'
db.add_column('repchantest_book', 'version_in_trash',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Section.version_parent_pk'
db.delete_column('repchantest_section', 'version_parent_pk_id')
# Deleting field 'Section.version_parent_rev_pk'
db.delete_column('repchantest_section', 'version_parent_rev_pk_id')
# Deleting field 'Section.version_have_children'
db.delete_column('repchantest_section', 'version_have_children')
# Deleting field 'Section.version_date'
db.delete_column('repchantest_section', 'version_date')
# Deleting field 'Section.version_hash'
db.delete_column('repchantest_section', 'version_hash')
# Deleting field 'Section.version_in_trash'
db.delete_column('repchantest_section', 'version_in_trash')
# Deleting field 'AuthorAlias.version_parent_pk'
db.delete_column('repchantest_authoralias', 'version_parent_pk_id')
# Deleting field 'AuthorAlias.version_parent_rev_pk'
db.delete_column('repchantest_authoralias', 'version_parent_rev_pk_id')
# Deleting field 'AuthorAlias.version_have_children'
db.delete_column('repchantest_authoralias', 'version_have_children')
# Deleting field 'AuthorAlias.version_date'
db.delete_column('repchantest_authoralias', 'version_date')
# Deleting field 'AuthorAlias.version_hash'
db.delete_column('repchantest_authoralias', 'version_hash')
# Deleting field 'AuthorAlias.version_in_trash'
db.delete_column('repchantest_authoralias', 'version_in_trash')
# Deleting field 'Page.version_parent_pk'
db.delete_column('repchantest_page', 'version_parent_pk_id')
# Deleting field 'Page.version_parent_rev_pk'
db.delete_column('repchantest_page', 'version_parent_rev_pk_id')
# Deleting field 'Page.version_have_children'
db.delete_column('repchantest_page', 'version_have_children')
# Deleting field 'Page.version_date'
db.delete_column('repchantest_page', 'version_date')
# Deleting field 'Page.version_hash'
db.delete_column('repchantest_page', 'version_hash')
# Deleting field 'Page.version_in_trash'
db.delete_column('repchantest_page', 'version_in_trash')
# Deleting field 'Book.version_parent_pk'
db.delete_column('repchantest_book', 'version_parent_pk_id')
# Deleting field 'Book.version_parent_rev_pk'
db.delete_column('repchantest_book', 'version_parent_rev_pk_id')
# Deleting field 'Book.version_have_children'
db.delete_column('repchantest_book', 'version_have_children')
# Deleting field 'Book.version_date'
db.delete_column('repchantest_book', 'version_date')
# Deleting field 'Book.version_hash'
db.delete_column('repchantest_book', 'version_hash')
# Deleting field 'Book.version_in_trash'
db.delete_column('repchantest_book', 'version_in_trash')
models = {
'repchantest.author': {
'Meta': {'unique_together': "(('name', 'surname'),)", 'object_name': 'Author'},
'author_alias': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['repchantest.AuthorAlias']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'repchantest.authoralias': {
'Meta': {'object_name': 'AuthorAlias'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'version_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'version_hash': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'version_have_children': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version_in_trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version_parent_pk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'repchantest_authoralias_parentpk'", 'null': 'True', 'to': "orm['repchantest.AuthorAlias']"}),
'version_parent_rev_pk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'repchantest_authoralias_parentverpk'", 'null': 'True', 'to': "orm['repchantest.AuthorAlias']"})
},
'repchantest.book': {
'Meta': {'object_name': 'Book'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['repchantest.Author']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'version_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'version_hash': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'version_have_children': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version_in_trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version_parent_pk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'repchantest_book_parentpk'", 'null': 'True', 'to': "orm['repchantest.Book']"}),
'version_parent_rev_pk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'repchantest_book_parentverpk'", 'null': 'True', 'to': "orm['repchantest.Book']"})
},
'repchantest.chapter': {
'Meta': {'object_name': 'Chapter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'repchantest.page': {
'Meta': {'unique_together': "(('book', 'page_number'),)", 'object_name': 'Page'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['repchantest.Book']"}),
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['repchantest.Chapter']"}),
'contents': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'page_number': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['repchantest.Section']"}),
'version_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'version_hash': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'version_have_children': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version_in_trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version_parent_pk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'repchantest_page_parentpk'", 'null': 'True', 'to': "orm['repchantest.Page']"}),
'version_parent_rev_pk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'repchantest_page_parentverpk'", 'null': 'True', 'to': "orm['repchantest.Page']"})
},
'repchantest.section': {
'Meta': {'object_name': 'Section'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'version_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'version_hash': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'version_have_children': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version_in_trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version_parent_pk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'repchantest_section_parentpk'", 'null': 'True', 'to': "orm['repchantest.Section']"}),
'version_parent_rev_pk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'repchantest_section_parentverpk'", 'null': 'True', 'to': "orm['repchantest.Section']"})
}
}
complete_apps = ['repchantest']
|
|
import urllib.request
import urllib.parse
import xml.dom.minidom
def test_latest(name, **query):
revision = get_revision(name)
output = get_output(name, revision, query)
return output
def get_output(name, revision, query):
string = run_utility('xml', name, revision, query)
middle = string.split('<', 1)[1].rsplit('>', 1)[0]
dom = xml.dom.minidom.parseString('<' + middle + '>')
value = extract_output(dom)
dom.unlink()
return value
def run_utility(api, name, revision, query):
url = 'http://utilitymill.com/api/{0}/utility/{1}/{2}/run?{3}'
params = urllib.parse.urlencode(query)
file = urllib.request.urlopen(url.format(api, name, revision, params))
# dom = xml.dom.minidom.parse(file)
return file.read().decode()
def extract_output(dom):
elements = dom.getElementsByTagName('output')
assert len(elements) == 1, 'XML Error'
output = elements[0]
assert len(output.childNodes) == 1, 'XML Error'
child = output.childNodes[0]
assert child.nodeType == child.CDATA_SECTION_NODE, 'XML Error'
return child.nodeValue
def get_revision(name):
string = info_utility('xml', name)
middle = string.split('<', 1)[1].rsplit('>', 1)[0]
dom = xml.dom.minidom.parseString('<' + middle + '>')
value = extract_number(dom)
dom.unlink()
return value
def info_utility(api, name):
url = 'http://utilitymill.com/api/{0}/utility/{1}/info'
file = urllib.request.urlopen(url.format(api, name))
return file.read().decode()
def extract_number(dom):
elements = dom.getElementsByTagName('number')
assert len(elements) == 1, 'XML Error'
number = elements[0]
assert len(number.childNodes) == 1, 'XML Error'
child = number.childNodes[0]
assert child.nodeType == child.TEXT_NODE, 'XML Error'
return child.nodeValue
################################################################################
import random
import spice
import time
# Python 2.5 Hack
__builtins__.xrange = __builtins__.range
def hack1(*args):
return list(xrange(*args))
__builtins__.range = hack1
__builtins__.xmap = __builtins__.map
def hack2(*args):
return list(xmap(*args))
__builtins__.map = hack2
def _decode(string, map_1, map_2):
'Private module function.'
cache = ''
iterator = iter(string)
for byte in iterator:
bits_12 = map_1[ord(byte)] << 6
bits_34 = map_1[ord(next(iterator))] << 4
bits_56 = map_1[ord(next(iterator))] << 2
bits_78 = map_1[ord(next(iterator))]
cache += map_2[bits_12 + bits_34 + bits_56 + bits_78]
return cache
spice._decode = _decode
# END
def main():
try:
while True:
print('Testing', end=' ')
choice = random.randrange(3)
if choice == 1:
# Test "Create Keys" Action
print('Create', end=' ')
choice = random.randrange(4)
if choice == 1:
# Test No Names
print('No Names', end=' ... ')
test_create()
elif choice == 2:
# Test Major Name
print('Major Name', end=' ... ')
n1 = verse()
major, minor = test_create(MAJOR_NAME=n1)
assert major == spice.named_major(n1)
elif choice == 3:
# Test Minor Name
print('Minor Name', end=' ... ')
n2 = verse()
major, minor = test_create(MINOR_NAME=n2)
assert minor == spice.named_minor(n2)
else:
# Test Both Names
print('Both Names', end=' ... ')
n1, n2 = verse(), verse()
major, minor = test_create(MAJOR_NAME=n1, MINOR_NAME=n2)
assert major == spice.named_major(n1)
assert minor == spice.named_minor(n2)
elif choice == 2:
# Test "Encode Input" Action
print('Encode', end=' ... ')
major = spice.crypt_major()
minor = spice.crypt_minor()
data = verse()
encoded = test_encode(major, minor, data)
decoded = spice.decode_string(encoded, major, minor)
assert decoded == data
else:
# Test "Decode Input" Action
print('Decode', end=' ... ')
major = spice.crypt_major()
minor = spice.crypt_minor()
data = verse()
encoded = spice.encode_string(data, major, minor)
decoded = test_decode(major, minor, encoded)
assert decoded == data
print('PASS')
time.sleep(60)
except:
print('FAIL')
def test_create(**query):
output = test_latest('SPICE_Text', ACTION='Create Keys', **query)
x, x, major, x, x, x, minor = output.split('\n')
major = hex2bin(major)
minor = hex2bin(minor)
spice._check_major(major)
spice._check_minor(minor)
return major, minor
def test_encode(major, minor, data):
hma = bin2hex(major)
hmi = bin2hex(minor)
output = test_latest('SPICE_Text', ACTION='Encode Input',
MAJOR_KEY=hma, MINOR_KEY=hmi, INPUT=data)
encoded = hex2bin(output.replace('\r', '').replace('\n', ''))
return encoded
def test_decode(major, minor, data):
hma = bin2hex(major)
hmi = bin2hex(minor)
hda = bin2hex(data)
decoded = test_latest('SPICE_Text', ACTION='Decode Input',
MAJOR_KEY=hma, MINOR_KEY=hmi, INPUT=hda)
return decoded
def bin2hex(x):
return ''.join('%02X' % ord(y) for y in x)
def hex2bin(x):
return ''.join(chr(int(x[y:y+2], 16)) for y in range(0, len(x), 2))
################################################################################
def verse():
return random.choice(random.choice(random.choice(BIBLE)))
def load_bible():
global BIBLE
try:
bible = open('bible13.txt', 'r').read()
except:
bible = get_bible()
open('bible13.txt', 'w').write(bible)
win_fix = bible.replace('\r\n', '\n')
mac_fix = win_fix.replace('\r', '\n')
BIBLE = parse_bible(mac_fix)
def get_bible():
url = 'http://www.gutenberg.org/dirs/etext92/bible13.txt'
file = urllib.request.urlopen(url)
return file.read().decode()
def parse_bible(string):
'Parse Bible and return 3D array.'
book = chap = vers = 1
form = '%02u:%03u:%03u'
book_s, chap_s, vers_s = [], [], []
start = 0
while True:
try:
start = string.index(form % (book, chap, vers), start) + 11
end = string.index('\n\n', start)
vers_s.append(' '.join(string[start:end].replace('\n', '').split()))
start = end
vers += 1
except:
if vers != 1:
chap_s.append(vers_s)
vers_s = []
chap += 1
vers = 1
elif chap != 1:
book_s.append(chap_s)
chap_s = []
book += 1
chap = 1
elif book != 1:
return book_s
else:
raise EOFError
################################################################################
if __name__ == '__main__':
load_bible()
main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.optimizers.Ftrl')
class Ftrl(optimizer_v2.OptimizerV2):
"""Optimizer that implements the FTRL algorithm.
See this [paper](
https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
This version has support for both online L2 (the L2 penalty given in the paper
above) and shrinkage-type L2 (which is the addition of an L2 penalty to the
loss function).
"""
def __init__(self,
learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
name='Ftrl',
l2_shrinkage_regularization_strength=0.0,
**kwargs):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for
a fixed learning rate.
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (1 + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (1 + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.\
**kwargs: keyword arguments. Allowed to be {`decay`}
Raises:
ValueError: If one of the arguments is invalid.
References
See [paper]
(https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)
"""
super(Ftrl, self).__init__(name, **kwargs)
if initial_accumulator_value < 0.0:
raise ValueError(
'initial_accumulator_value %f needs to be positive or zero' %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError('learning_rate_power %f needs to be negative or zero' %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
'l1_regularization_strength %f needs to be positive or zero' %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
'l2_regularization_strength %f needs to be positive or zero' %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
'l2_shrinkage_regularization_strength %f needs to be positive'
' or zero' % l2_shrinkage_regularization_strength)
self._set_hyper('learning_rate', learning_rate)
self._set_hyper('decay', self._initial_decay)
self._set_hyper('learning_rate_power', learning_rate_power)
self._set_hyper('l1_regularization_strength', l1_regularization_strength)
self._set_hyper('l2_regularization_strength', l2_regularization_strength)
self._initial_accumulator_value = initial_accumulator_value
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for var in var_list:
dtype = var.dtype.base_dtype
init = init_ops.constant_initializer(
self._initial_accumulator_value, dtype=dtype)
self.add_slot(var, 'accumulator', init)
self.add_slot(var, 'linear')
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
learning_rate_power = self._get_hyper('learning_rate_power', var_dtype)
l1_regularization_strength = self._get_hyper('l1_regularization_strength',
var_dtype)
l2_regularization_strength = self._get_hyper('l2_regularization_strength',
var_dtype)
accum = self.get_slot(var, 'accumulator')
linear = self.get_slot(var, 'linear')
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
lr_t,
l1_regularization_strength,
l2_regularization_strength,
learning_rate_power,
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
lr_t,
l1_regularization_strength,
l2_regularization_strength,
math_ops.cast(self._l2_shrinkage_regularization_strength, var_dtype),
learning_rate_power,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
learning_rate_power = self._get_hyper('learning_rate_power', var_dtype)
l1_regularization_strength = self._get_hyper('l1_regularization_strength',
var_dtype)
l2_regularization_strength = self._get_hyper('l2_regularization_strength',
var_dtype)
accum = self.get_slot(var, 'accumulator')
linear = self.get_slot(var, 'linear')
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
lr_t,
l1_regularization_strength,
l2_regularization_strength,
learning_rate_power,
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
lr_t,
l1_regularization_strength,
l2_regularization_strength,
math_ops.cast(self._l2_shrinkage_regularization_strength, var_dtype),
learning_rate_power,
use_locking=self._use_locking)
def get_config(self):
config = super(Ftrl, self).get_config()
config.update({
'learning_rate':
self._serialize_hyperparameter('learning_rate'),
'decay':
self._serialize_hyperparameter('decay'),
'initial_accumulator_value':
self._initial_accumulator_value,
'learning_rate_power':
self._serialize_hyperparameter('learning_rate_power'),
'l1_regularization_strength':
self._serializer_hyperparameter('l1_regularization_strength'),
'l2_regularization_strength':
self._serializer_hyperparameter('l2_regularization_strength'),
'l2_shrinkage_regularization_strength':
self._l2_shrinkage_regularization_strength,
})
return config
|
|
# Copyright 2014 PressLabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stat import S_IFDIR, S_IFREG
import pytest
from mock import MagicMock, patch
from pygit2 import GIT_FILEMODE_TREE
from fuse import FuseOSError
from gitfs.views.commit import CommitView
class TestCommitView(object):
def test_readdir_without_tree_name(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_entry = MagicMock()
mocked_entry.name = "entry"
mocked_commit.tree = [mocked_entry]
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
with patch('gitfs.views.commit.os') as mocked_os:
mocked_os.path.split.return_value = [None, None]
dirs = [entry for entry in view.readdir("/path", 0)]
assert dirs == [".", "..", "entry"]
mocked_os.path.split.assert_called_once_with("/path")
def test_readdir_with_tree_name(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_entry = MagicMock()
mocked_entry.name = "entry"
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_git_object.return_value = [mocked_entry]
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
with patch('gitfs.views.commit.os') as mocked_os:
mocked_os.path.split.return_value = [None, True]
dirs = [entry for entry in view.readdir("/path", 0)]
assert dirs == [".", "..", "entry"]
mocked_os.path.split.assert_called_once_with("/path")
mocked_repo.get_git_object.assert_called_once_with("tree", "/path")
def test_access_with_missing_relative_path(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
assert view.access("path", "mode") == 0
def test_access_with_invalid_relative_path(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
view.relative_path = "/"
assert view.access("path", "mode") == 0
def test_access_with_invalid_path(self):
mocked_repo = MagicMock()
mocked_validation = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_validation.return_value = False
with patch("gitfs.views.commit.split_path_into_components") as split:
split.return_value = "elements"
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
view._validate_commit_path = mocked_validation
view.relative_path = "relative_path"
with pytest.raises(FuseOSError):
view.access("path", "mode")
split.assert_called_once_with("relative_path")
mocked_validation.assert_called_once_with("tree", "elements")
def test_getattr_with_no_path(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
assert view.getattr(False, 1) is None
def test_getattr_with_simple_path(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
stats = {
'st_mode': S_IFDIR | 0555,
'st_nlink': 2
}
mocked_commit.tree = "tree"
mocked_commit.commit_time = "now+1"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_git_object_default_stats.return_value = stats
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
result = view.getattr("/", 1)
asserted_result = {
'st_uid': 1,
'st_gid': 1,
'st_mtime': "now+1",
'st_ctime': "now+1",
'st_mode': S_IFDIR | 0555,
'st_nlink': 2
}
assert result == asserted_result
def test_getattr_with_invalid_object_type(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_commit.commit_time = "now+1"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_git_object_default_stats.return_value = None
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
with pytest.raises(FuseOSError):
view.getattr("/path", 1)
args = ("tree", "/path")
mocked_repo.get_git_object_default_stats.assert_called_once_with(*args)
def test_getattr_for_a_valid_file(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_commit.commit_time = "now+1"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_git_object_default_stats.return_value = {
'st_mode': S_IFREG | 0444,
'st_size': 10
}
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
result = view.getattr("/path", 1)
asserted_result = {
'st_uid': 1,
'st_gid': 1,
'st_mtime': "now+1",
'st_ctime': "now+1",
'st_mode': S_IFREG | 0444,
'st_size': 10
}
assert result == asserted_result
args = ("tree", "/path")
mocked_repo.get_git_object_default_stats.assert_called_once_with(*args)
def test_readlink(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_blob_data.return_value = "link value"
with patch('gitfs.views.commit.os') as mocked_os:
mocked_os.path.split.return_value = ["name", "another_name"]
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
assert view.readlink("/path") == "link value"
mocked_os.path.split.assert_called_once_with("/path")
mocked_repo.get_blob_data.assert_called_once_with("tree",
"another_name")
def test_read(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_blob_data.return_value = [1, 1, 1]
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
assert view.read("/path", 1, 1, 0) == [1]
mocked_repo.get_blob_data.assert_called_once_with("tree", "/path")
def test_validate_commit_path_with_no_entries(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
assert view._validate_commit_path([], "") is False
def test_validate_commit_path_with_trees(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_entry = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_entry.name = "simple_entry"
mocked_entry.filemode = GIT_FILEMODE_TREE
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
result = view._validate_commit_path([mocked_entry], ["simple_entry"])
assert result is True
def test_validate_commit_path_with_more_than_one_entry(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_entry = MagicMock()
mocked_second_entry = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_second_entry.id = 1
mocked_second_entry.name = "complex_entry"
mocked_second_entry.filemode = GIT_FILEMODE_TREE
mocked_entry.name = "simple_entry"
mocked_entry.filemode = GIT_FILEMODE_TREE
mocked_repo.__getitem__.return_value = [mocked_entry]
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
result = view._validate_commit_path([mocked_second_entry,
mocked_entry],
["complex_entry",
"simple_entry"])
assert result is True
mocked_repo.__getitem__.assert_called_once_with(1)
def test_init_with_invalid_commit_sha1(self):
mocked_repo = MagicMock()
mocked_repo.revparse_single.side_effect = KeyError
with pytest.raises(FuseOSError):
CommitView(repo=mocked_repo, commit_sha1="sha1")
mocked_repo.revparse_single.assert_called_once_with("sha1")
|
|
# -*- coding: utf-8 -*-
"""
sphinx.writers.manpage
~~~~~~~~~~~~~~~~~~~~~~
Manual page writer, extended for Sphinx custom nodes.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
try:
from docutils.writers.manpage import MACRO_DEF, Writer, \
Translator as BaseTranslator
has_manpage_writer = True
except ImportError:
# define the classes in any case, sphinx.application needs it
Writer = BaseTranslator = object
has_manpage_writer = False
from sphinx import addnodes
from sphinx.locale import admonitionlabels, _
from sphinx.util.osutil import ustrftime
from sphinx.util.compat import docutils_version
class ManualPageWriter(Writer):
def __init__(self, builder):
Writer.__init__(self)
self.builder = builder
def translate(self):
visitor = ManualPageTranslator(self.builder, self.document)
self.visitor = visitor
self.document.walkabout(visitor)
self.output = visitor.astext()
class ManualPageTranslator(BaseTranslator):
"""
Custom translator.
"""
def __init__(self, builder, *args, **kwds):
BaseTranslator.__init__(self, *args, **kwds)
self.builder = builder
self.in_productionlist = 0
# first title is the manpage title
self.section_level = -1
# docinfo set by man_pages config value
self._docinfo['title'] = self.document.settings.title
self._docinfo['subtitle'] = self.document.settings.subtitle
if self.document.settings.authors:
# don't set it if no author given
self._docinfo['author'] = self.document.settings.authors
self._docinfo['manual_section'] = self.document.settings.section
# docinfo set by other config values
self._docinfo['title_upper'] = self._docinfo['title'].upper()
if builder.config.today:
self._docinfo['date'] = builder.config.today
else:
self._docinfo['date'] = ustrftime(builder.config.today_fmt
or _('%B %d, %Y'))
self._docinfo['copyright'] = builder.config.copyright
self._docinfo['version'] = builder.config.version
self._docinfo['manual_group'] = builder.config.project
# In docutils < 0.11 self.append_header() was never called
if docutils_version < (0, 11):
self.body.append(MACRO_DEF)
# Overwrite admonition label translations with our own
for label, translation in admonitionlabels.items():
self.language.labels[label] = self.deunicode(translation)
# overwritten -- added quotes around all .TH arguments
def header(self):
tmpl = (".TH \"%(title_upper)s\" \"%(manual_section)s\""
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
"%(title)s \- %(subtitle)s\n")
return tmpl % self._docinfo
def visit_start_of_file(self, node):
pass
def depart_start_of_file(self, node):
pass
def visit_desc(self, node):
self.visit_definition_list(node)
def depart_desc(self, node):
self.depart_definition_list(node)
def visit_desc_signature(self, node):
self.visit_definition_list_item(node)
self.visit_term(node)
def depart_desc_signature(self, node):
self.depart_term(node)
def visit_desc_addname(self, node):
pass
def depart_desc_addname(self, node):
pass
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.body.append(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_name(self, node):
pass
def depart_desc_name(self, node):
pass
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.body.append(')')
def visit_desc_parameter(self, node):
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
def depart_desc_parameter(self, node):
pass
def visit_desc_optional(self, node):
self.body.append('[')
def depart_desc_optional(self, node):
self.body.append(']')
def visit_desc_annotation(self, node):
pass
def depart_desc_annotation(self, node):
pass
def visit_desc_content(self, node):
self.visit_definition(node)
def depart_desc_content(self, node):
self.depart_definition(node)
def visit_versionmodified(self, node):
self.visit_paragraph(node)
def depart_versionmodified(self, node):
self.depart_paragraph(node)
def visit_termsep(self, node):
self.body.append(', ')
raise nodes.SkipNode
# overwritten -- we don't want source comments to show up
def visit_comment(self, node):
raise nodes.SkipNode
# overwritten -- added ensure_eol()
def visit_footnote(self, node):
self.ensure_eol()
BaseTranslator.visit_footnote(self, node)
# overwritten -- handle footnotes rubric
def visit_rubric(self, node):
self.ensure_eol()
if len(node.children) == 1:
rubtitle = node.children[0].astext()
if rubtitle in ('Footnotes', _('Footnotes')):
self.body.append('.SH ' + self.deunicode(rubtitle).upper() +
'\n')
raise nodes.SkipNode
else:
self.body.append('.sp\n')
def depart_rubric(self, node):
pass
def visit_seealso(self, node):
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
self.depart_admonition(node)
def visit_productionlist(self, node):
self.ensure_eol()
names = []
self.in_productionlist += 1
self.body.append('.sp\n.nf\n')
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in node:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.defs['strong'][0])
self.body.append(self.deunicode(lastname))
self.body.append(self.defs['strong'][1])
self.body.append(' ::= ')
elif lastname is not None:
self.body.append('%s ' % (' '*len(lastname)))
production.walkabout(self)
self.body.append('\n')
self.body.append('\n.fi\n')
self.in_productionlist -= 1
raise nodes.SkipNode
def visit_production(self, node):
pass
def depart_production(self, node):
pass
# overwritten -- don't emit a warning for images
def visit_image(self, node):
if 'alt' in node.attributes:
self.body.append(_('[image: %s]') % node['alt'] + '\n')
self.body.append(_('[image]') + '\n')
raise nodes.SkipNode
# overwritten -- don't visit inner marked up nodes
def visit_reference(self, node):
self.body.append(self.defs['reference'][0])
self.visit_Text(node) # avoid repeating escaping code... fine since
# visit_Text calls astext() and only works
# on that afterwards
self.body.append(self.defs['reference'][1])
uri = node.get('refuri', '')
if uri.startswith('mailto:') or uri.startswith('http:') or \
uri.startswith('https:') or uri.startswith('ftp:'):
# if configured, put the URL after the link
if self.builder.config.man_show_urls and \
node.astext() != uri:
if uri.startswith('mailto:'):
uri = uri[7:]
self.body.extend([
' <',
self.defs['strong'][0], uri, self.defs['strong'][1],
'>'])
raise nodes.SkipNode
def visit_centered(self, node):
self.ensure_eol()
self.body.append('.sp\n.ce\n')
def depart_centered(self, node):
self.body.append('\n.ce 0\n')
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_highlightlang(self, node):
pass
def depart_highlightlang(self, node):
pass
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_toctree(self, node):
raise nodes.SkipNode
def visit_index(self, node):
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
raise nodes.SkipNode
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_acks(self, node):
self.ensure_eol()
self.body.append(', '.join(n.astext()
for n in node.children[0].children) + '.')
self.body.append('\n')
raise nodes.SkipNode
def visit_hlist(self, node):
self.visit_bullet_list(node)
def depart_hlist(self, node):
self.depart_bullet_list(node)
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def visit_literal_emphasis(self, node):
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
return self.depart_emphasis(node)
def visit_abbreviation(self, node):
pass
def depart_abbreviation(self, node):
pass
# overwritten: handle section titles better than in 0.6 release
def visit_title(self, node):
if isinstance(node.parent, addnodes.seealso):
self.body.append('.IP "')
return
elif isinstance(node.parent, nodes.section):
if self.section_level == 0:
# skip the document title
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append('.SH %s\n' %
self.deunicode(node.astext().upper()))
raise nodes.SkipNode
return BaseTranslator.visit_title(self, node)
def depart_title(self, node):
if isinstance(node.parent, addnodes.seealso):
self.body.append('"\n')
return
return BaseTranslator.depart_title(self, node)
def visit_raw(self, node):
if 'manpage' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_meta(self, node):
raise nodes.SkipNode
def visit_inline(self, node):
pass
def depart_inline(self, node):
pass
def visit_math(self, node):
self.builder.warn('using "math" markup without a Sphinx math extension '
'active, please use one of the math extensions '
'described at http://sphinx-doc.org/ext/math.html')
raise nodes.SkipNode
visit_math_block = visit_math
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
|
|
"""Support for Australian BOM (Bureau of Meteorology) weather service."""
import datetime
import ftplib
import gzip
import io
import json
import logging
import os
import re
import zipfile
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
TEMP_CELSIUS,
CONF_NAME,
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_RESOURCE = "http://www.bom.gov.au/fwo/{}/{}.{}.json"
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATE = "last_update"
ATTR_SENSOR_ID = "sensor_id"
ATTR_STATION_ID = "station_id"
ATTR_STATION_NAME = "station_name"
ATTR_ZONE_ID = "zone_id"
ATTRIBUTION = "Data provided by the Australian Bureau of Meteorology"
CONF_STATION = "station"
CONF_ZONE_ID = "zone_id"
CONF_WMO_ID = "wmo_id"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(seconds=60)
SENSOR_TYPES = {
"wmo": ["wmo", None],
"name": ["Station Name", None],
"history_product": ["Zone", None],
"local_date_time": ["Local Time", None],
"local_date_time_full": ["Local Time Full", None],
"aifstime_utc": ["UTC Time Full", None],
"lat": ["Lat", None],
"lon": ["Long", None],
"apparent_t": ["Feels Like C", TEMP_CELSIUS],
"cloud": ["Cloud", None],
"cloud_base_m": ["Cloud Base", None],
"cloud_oktas": ["Cloud Oktas", None],
"cloud_type_id": ["Cloud Type ID", None],
"cloud_type": ["Cloud Type", None],
"delta_t": ["Delta Temp C", TEMP_CELSIUS],
"gust_kmh": ["Wind Gust kmh", "km/h"],
"gust_kt": ["Wind Gust kt", "kt"],
"air_temp": ["Air Temp C", TEMP_CELSIUS],
"dewpt": ["Dew Point C", TEMP_CELSIUS],
"press": ["Pressure mb", "mbar"],
"press_qnh": ["Pressure qnh", "qnh"],
"press_msl": ["Pressure msl", "msl"],
"press_tend": ["Pressure Tend", None],
"rain_trace": ["Rain Today", "mm"],
"rel_hum": ["Relative Humidity", "%"],
"sea_state": ["Sea State", None],
"swell_dir_worded": ["Swell Direction", None],
"swell_height": ["Swell Height", "m"],
"swell_period": ["Swell Period", None],
"vis_km": ["Visability km", "km"],
"weather": ["Weather", None],
"wind_dir": ["Wind Direction", None],
"wind_spd_kmh": ["Wind Speed kmh", "km/h"],
"wind_spd_kt": ["Wind Speed kt", "kt"],
}
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
station = station.replace(".shtml", "")
if not re.fullmatch(r"ID[A-Z]\d\d\d\d\d\.\d\d\d\d\d", station):
raise vol.error.Invalid("Malformed station ID")
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_ZONE_ID, "Deprecated partial station ID"): cv.string,
vol.Inclusive(CONF_WMO_ID, "Deprecated partial station ID"): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BOM sensor."""
station = config.get(CONF_STATION)
zone_id, wmo_id = config.get(CONF_ZONE_ID), config.get(CONF_WMO_ID)
if station is not None:
if zone_id and wmo_id:
_LOGGER.warning(
"Using config %s, not %s and %s for BOM sensor",
CONF_STATION,
CONF_ZONE_ID,
CONF_WMO_ID,
)
elif zone_id and wmo_id:
station = f"{zone_id}.{wmo_id}"
else:
station = closest_station(
config.get(CONF_LATITUDE),
config.get(CONF_LONGITUDE),
hass.config.config_dir,
)
if station is None:
_LOGGER.error("Could not get BOM weather station from lat/lon")
return
bom_data = BOMCurrentData(station)
try:
bom_data.update()
except ValueError as err:
_LOGGER.error("Received error from BOM Current: %s", err)
return
add_entities(
[
BOMCurrentSensor(bom_data, variable, config.get(CONF_NAME))
for variable in config[CONF_MONITORED_CONDITIONS]
]
)
class BOMCurrentSensor(Entity):
"""Implementation of a BOM current sensor."""
def __init__(self, bom_data, condition, stationname):
"""Initialize the sensor."""
self.bom_data = bom_data
self._condition = condition
self.stationname = stationname
@property
def name(self):
"""Return the name of the sensor."""
if self.stationname is None:
return "BOM {}".format(SENSOR_TYPES[self._condition][0])
return "BOM {} {}".format(self.stationname, SENSOR_TYPES[self._condition][0])
@property
def state(self):
"""Return the state of the sensor."""
return self.bom_data.get_reading(self._condition)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_LAST_UPDATE: self.bom_data.last_updated,
ATTR_SENSOR_ID: self._condition,
ATTR_STATION_ID: self.bom_data.latest_data["wmo"],
ATTR_STATION_NAME: self.bom_data.latest_data["name"],
ATTR_ZONE_ID: self.bom_data.latest_data["history_product"],
}
return attr
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES[self._condition][1]
def update(self):
"""Update current conditions."""
self.bom_data.update()
class BOMCurrentData:
"""Get data from BOM."""
def __init__(self, station_id):
"""Initialize the data object."""
self._zone_id, self._wmo_id = station_id.split(".")
self._data = None
self.last_updated = None
def _build_url(self):
"""Build the URL for the requests."""
url = _RESOURCE.format(self._zone_id, self._zone_id, self._wmo_id)
_LOGGER.debug("BOM URL: %s", url)
return url
@property
def latest_data(self):
"""Return the latest data object."""
if self._data:
return self._data[0]
return None
def get_reading(self, condition):
"""Return the value for the given condition.
BOM weather publishes condition readings for weather (and a few other
conditions) at intervals throughout the day. To avoid a `-` value in
the frontend for these conditions, we traverse the historical data
for the latest value that is not `-`.
Iterators are used in this method to avoid iterating needlessly
through the entire BOM provided dataset.
"""
condition_readings = (entry[condition] for entry in self._data)
return next((x for x in condition_readings if x != "-"), None)
def should_update(self):
"""Determine whether an update should occur.
BOM provides updated data every 30 minutes. We manually define
refreshing logic here rather than a throttle to keep updates
in lock-step with BOM.
If 35 minutes has passed since the last BOM data update, then
an update should be done.
"""
if self.last_updated is None:
# Never updated before, therefore an update should occur.
return True
now = dt_util.utcnow()
update_due_at = self.last_updated + datetime.timedelta(minutes=35)
return now > update_due_at
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from BOM."""
if not self.should_update():
_LOGGER.debug(
"BOM was updated %s minutes ago, skipping update as"
" < 35 minutes, Now: %s, LastUpdate: %s",
(dt_util.utcnow() - self.last_updated),
dt_util.utcnow(),
self.last_updated,
)
return
try:
result = requests.get(self._build_url(), timeout=10).json()
self._data = result["observations"]["data"]
# set lastupdate using self._data[0] as the first element in the
# array is the latest date in the json
self.last_updated = dt_util.as_utc(
datetime.datetime.strptime(
str(self._data[0]["local_date_time_full"]), "%Y%m%d%H%M%S"
)
)
return
except ValueError as err:
_LOGGER.error("Check BOM %s", err.args)
self._data = None
raise
def _get_bom_stations():
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
This function does several MB of internet requests, so please use the
caching version to minimise latency and hit-count.
"""
latlon = {}
with io.BytesIO() as file_obj:
with ftplib.FTP("ftp.bom.gov.au") as ftp:
ftp.login()
ftp.cwd("anon2/home/ncc/metadata/sitelists")
ftp.retrbinary("RETR stations.zip", file_obj.write)
file_obj.seek(0)
with zipfile.ZipFile(file_obj) as zipped:
with zipped.open("stations.txt") as station_txt:
for _ in range(4):
station_txt.readline() # skip header
while True:
line = station_txt.readline().decode().strip()
if len(line) < 120:
break # end while loop, ignoring any footer text
wmo, lat, lon = (
line[a:b].strip() for a, b in [(128, 134), (70, 78), (79, 88)]
)
if wmo != "..":
latlon[wmo] = (float(lat), float(lon))
zones = {}
pattern = (
r'<a href="/products/(?P<zone>ID[A-Z]\d\d\d\d\d)/'
r'(?P=zone)\.(?P<wmo>\d\d\d\d\d).shtml">'
)
for state in ("nsw", "vic", "qld", "wa", "tas", "nt"):
url = "http://www.bom.gov.au/{0}/observations/{0}all.shtml".format(state)
for zone_id, wmo_id in re.findall(pattern, requests.get(url).text):
zones[wmo_id] = zone_id
return {"{}.{}".format(zones[k], k): latlon[k] for k in set(latlon) & set(zones)}
def bom_stations(cache_dir):
"""Return {CONF_STATION: (lat, lon)} for all stations, for auto-config.
Results from internet requests are cached as compressed JSON, making
subsequent calls very much faster.
"""
cache_file = os.path.join(cache_dir, ".bom-stations.json.gz")
if not os.path.isfile(cache_file):
stations = _get_bom_stations()
with gzip.open(cache_file, "wt") as cache:
json.dump(stations, cache, sort_keys=True)
return stations
with gzip.open(cache_file, "rt") as cache:
return {k: tuple(v) for k, v in json.load(cache).items()}
def closest_station(lat, lon, cache_dir):
"""Return the ZONE_ID.WMO_ID of the closest station to our lat/lon."""
if lat is None or lon is None or not os.path.isdir(cache_dir):
return
stations = bom_stations(cache_dir)
def comparable_dist(wmo_id):
"""Create a psudeo-distance from latitude/longitude."""
station_lat, station_lon = stations[wmo_id]
return (lat - station_lat) ** 2 + (lon - station_lon) ** 2
return min(stations, key=comparable_dist)
|
|
# -*- coding: utf-8 -*-
'''
Module to provide MS SQL Server compatibility to salt.
:depends: - FreeTDS
- pymssql Python module
:configuration: In order to connect to MS SQL Server, certain configuration is
required in minion configs/pillars on the relevant minions. Some sample
pillars might look like::
mssql.server: 'localhost'
mssql.port: 1433
mssql.user: 'sysdba'
mssql.password: 'Some preferable complex password'
mssql.database: ''
The default for the port is '1433' and for the database is '' (empty string);
in most cases they can be left at the default setting.
Options that are directly passed into functions will overwrite options from
configs or pillars.
'''
# Import python libs
from __future__ import absolute_import
from json import JSONEncoder, loads
try:
import pymssql
HAS_ALL_IMPORTS = True
except ImportError:
HAS_ALL_IMPORTS = False
_DEFAULTS = {
'server': 'localhost',
'port': 1433,
'user': 'sysdba',
'password': '',
'database': '',
'as_dict': False
}
def __virtual__():
'''
Only load this module if all imports succeeded bin exists
'''
if HAS_ALL_IMPORTS:
return True
return False
def _get_connection(**kwargs):
connection_args = {}
for arg in ('server', 'port', 'user', 'password', 'database', 'as_dict'):
if arg in kwargs:
connection_args[arg] = kwargs[arg]
else:
connection_args[arg] = __salt__['config.option']('mssql.'+arg, _DEFAULTS.get(arg, None))
return pymssql.connect(**connection_args)
class _MssqlEncoder(JSONEncoder):
# E0202: 68:_MssqlEncoder.default: An attribute inherited from JSONEncoder hide this method
def default(self, o): # pylint: disable=E0202
return str(o)
def tsql_query(query, **kwargs):
'''
Run a SQL query and return query result as list of tuples, or a list of dictionaries if as_dict was passed, or an empty list if no data is available.
CLI Example:
.. code-block:: bash
salt minion mssql.tsql_query 'SELECT @@version as version' as_dict=True
'''
try:
cur = _get_connection(**kwargs).cursor()
cur.execute(query)
# Making sure the result is JSON serializable
return loads(_MssqlEncoder().encode({'resultset': cur.fetchall()}))['resultset']
except Exception as e:
# Trying to look like the output of cur.fetchall()
return (('Could not run the query', ), (str(e), ))
def version(**kwargs):
'''
Return the version of a MS SQL server.
CLI Example:
.. code-block:: bash
salt minion mssql.version
'''
return tsql_query('SELECT @@version', **kwargs)
def db_list(**kwargs):
'''
Return the databse list created on a MS SQL server.
CLI Example:
.. code-block:: bash
salt minion mssql.db_list
'''
return [row[0] for row in tsql_query('SELECT name FROM sys.databases', as_dict=False, **kwargs)]
def db_exists(database_name, **kwargs):
'''
Find if a specific database exists on the MS SQL server.
CLI Example:
.. code-block:: bash
salt minion mssql.db_exists database_name='DBNAME'
'''
# We should get one, and only one row
return len(tsql_query("SELECT database_id FROM sys.databases WHERE NAME='{0}'".format(database_name), **kwargs)) == 1
def db_remove(database_name, **kwargs):
'''
Drops a specific database from the MS SQL server.
It will not drop any of 'master', 'model', 'msdb' or 'tempdb'.
CLI Example:
.. code-block:: bash
salt minion mssql.db_remove database_name='DBNAME'
'''
try:
if db_exists(database_name) and database_name not in ['master', 'model', 'msdb', 'tempdb']:
conn = _get_connection(**kwargs)
conn.autocommit(True)
cur = conn.cursor()
cur.execute('ALTER DATABASE {0} SET SINGLE_USER WITH ROLLBACK IMMEDIATE'.format(database_name))
cur.execute('DROP DATABASE {0}'.format(database_name))
conn.autocommit(False)
conn.close()
return True
else:
return False
except Exception as e:
return 'Could not find the database: {0}'.format(e)
def role_list(**kwargs):
'''
Lists database roles.
CLI Example:
.. code-block:: bash
salt minion mssql.role_list
'''
return tsql_query(query='sp_helprole', as_dict=True, **kwargs)
def role_exists(role, **kwargs):
'''
Checks if a role exists.
CLI Example:
.. code-block:: bash
salt minion mssql.role_exists db_owner
'''
# We should get one, and only one row
return len(tsql_query(query='sp_helprole "{0}"'.format(role), as_dict=True, **kwargs)) == 1
def role_create(role, owner=None, **kwargs):
'''
Creates a new database role.
If no owner is specified, the role will be owned by the user that
executes CREATE ROLE, which is the user argument or mssql.user option.
CLI Example:
.. code-block:: bash
salt minion mssql.role_create role=product01 owner=sysdba
'''
try:
conn = _get_connection(**kwargs)
conn.autocommit(True)
cur = conn.cursor()
if owner:
cur.execute('CREATE ROLE {0} AUTHORIZATION {1}'.format(role, owner))
else:
cur.execute('CREATE ROLE {0}'.format(role))
conn.autocommit(True)
conn.close()
return True
except Exception as e:
return 'Could not create the role: {0}'.format(e)
def role_remove(role, **kwargs):
'''
Remove a database role.
CLI Example:
.. code-block:: bash
salt minion mssql.role_create role=test_role01
'''
try:
conn = _get_connection(**kwargs)
conn.autocommit(True)
cur = conn.cursor()
cur.execute('DROP ROLE {0}'.format(role))
conn.autocommit(True)
conn.close()
return True
except Exception as e:
return 'Could not create the role: {0}'.format(e)
def login_exists(login, **kwargs):
'''
Find if a login exists in the MS SQL server.
CLI Example:
.. code-block:: bash
salt minion mssql.login_exists 'LOGIN'
'''
try:
# We should get one, and only one row
return len(tsql_query(query="SELECT name FROM sys.syslogins WHERE name='{0}'".format(login), **kwargs)) == 1
except Exception as e:
return 'Could not find the login: {0}'.format(e)
def user_exists(username, **kwargs):
'''
Find if an user exists in a specific database on the MS SQL server.
Note:
*database* argument is mandatory
CLI Example:
.. code-block:: bash
salt minion mssql.user_exists 'USERNAME' [database='DBNAME']
'''
# 'database' argument is mandatory
if 'database' not in kwargs:
return False
# We should get one, and only one row
return len(tsql_query(query="SELECT name FROM sysusers WHERE name='{0}'".format(username), **kwargs)) == 1
def user_list(**kwargs):
'''
Get the user list for a specific database on the MS SQL server.
CLI Example:
.. code-block:: bash
salt minion mssql.user_list [database='DBNAME']
'''
return [row[0] for row in tsql_query("SELECT name FROM sysusers where issqluser=1 or isntuser=1", as_dict=False, **kwargs)]
def user_create(username, new_login_password=None, **kwargs):
'''
Creates a new user.
If new_login_password is not specified, the user will be created without a login.
CLI Example:
.. code-block:: bash
salt minion mssql.user_create USERNAME database=DBNAME [new_login_password=PASSWORD]
'''
# 'database' argument is mandatory
if 'database' not in kwargs:
return False
if user_exists(username, **kwargs):
return False
try:
conn = _get_connection(**kwargs)
conn.autocommit(True)
cur = conn.cursor()
if new_login_password:
if login_exists(username, **kwargs):
conn.close()
return False
cur.execute("CREATE LOGIN {0} WITH PASSWORD='{1}',check_policy = off".format(username, new_login_password))
cur.execute("CREATE USER {0} FOR LOGIN {1}".format(username, username))
else: # new_login_password is not specified
cur.execute("CREATE USER {0} WITHOUT LOGIN".format(username))
conn.autocommit(False)
conn.close()
return True
except Exception as e:
return 'Could not create the user: {0}'.format(e)
def user_remove(username, **kwargs):
'''
Removes an user.
CLI Example:
.. code-block:: bash
salt minion mssql.user_remove USERNAME database=DBNAME
'''
# 'database' argument is mandatory
if 'database' not in kwargs:
return False
try:
conn = _get_connection(**kwargs)
conn.autocommit(True)
cur = conn.cursor()
cur.execute("DROP USER {0}".format(username))
conn.autocommit(False)
conn.close()
return True
except Exception as e:
return 'Could not create the user: {0}'.format(e)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPoliciesOperations:
"""ServiceEndpointPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
"""Gets the specified service Endpoint Policies in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.ServiceEndpointPolicy",
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceEndpointPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.ServiceEndpointPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.ServiceEndpointPolicy"]:
"""Creates or updates a service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to the create or update service endpoint policy
operation.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.ServiceEndpointPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
"""Updates tags of a service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to update service endpoint policy tags.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyListResult"]:
"""Gets all the service endpoint policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ServiceEndpointPolicies'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyListResult"]:
"""Gets all service endpoint Policies in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies'} # type: ignore
|
|
import logging
import os
import shutil
from hashlib import sha256
class FileStore:
"""
A file based implementation of a key value store.
"""
def __init__(self,
dbDir,
dbName,
isLineNoKey: bool=False,
storeContentHash: bool=True,
ensureDurability: bool=True,
delimiter="\t",
lineSep="\r\n",
defaultFile=None):
"""
:param dbDir: The directory where the file storing the data would be
present
:param dbName: The name of the file that is used to store the data
:param isLineNoKey: If false then each line has the key followed by a
delimiter followed by the value
:param storeContentHash: Whether to store a hash of the value or not.
Storing hash can make it really fast to compare the value for equality
:param ensureDurability: Should the file be fysnced after every write.
This can ensure durability in most of the cases, but make
writes extremely slow. See testMeasureWriteTime. For frequent writes,
it makes sense to disable flush and fsync on every write
:param delimiter: delimiter between key and value
:param lineSep: line separator - defaults to \r\n
:param defaultFile: file or dir to use for initialization
"""
self.delimiter = delimiter
self.lineSep = lineSep
self.isLineNoKey = isLineNoKey
self.storeContentHash = storeContentHash
self.ensureDurability = ensureDurability
self._defaultFile = defaultFile
def _prepareFiles(self, dbDir, dbName, defaultFile):
if not defaultFile:
return
if not os.path.exists(defaultFile):
errMessage = "File that should be used for " \
"initialization does not exist: {}"\
.format(defaultFile)
logging.warning(errMessage)
raise ValueError(errMessage)
dataLocation = os.path.join(self.dbDir, dbName)
copy = shutil.copy if os.path.isfile(defaultFile) else shutil.copytree
copy(defaultFile, dataLocation)
def _prepareDBLocation(self, dbDir, dbName):
self.dbDir = dbDir
self.dbName = dbName
if not os.path.exists(self.dbDir):
os.makedirs(self.dbDir)
if not os.path.exists(os.path.join(dbDir, dbName)):
self._prepareFiles(dbDir, dbName, self._defaultFile)
def _initDB(self, dbDir, dbName):
self._prepareDBLocation(dbDir, dbName)
# noinspection PyUnresolvedReferences
def put(self, value, key=None):
# If line no is not treated as key then write the key and then the
# delimiter
if not self.isLineNoKey:
if key is None:
raise ValueError("Key must be provided for storing the value")
self.dbFile.write(key)
self.dbFile.write(self.delimiter)
self.dbFile.write(value)
if self.storeContentHash:
self.dbFile.write(self.delimiter)
if isinstance(value, str):
value = value.encode()
hexedHash = sha256(value).hexdigest()
self.dbFile.write(hexedHash)
self.dbFile.write(self.lineSep)
# A little bit smart strategy like flush every 2 seconds
# or every 10 writes or every 1 KB may be a better idea
# Make sure data get written to the disk
# Even flush slows down writes significantly
self.dbFile.flush()
if self.ensureDurability:
# fsync takes too much time on Windows.
# This is the reason of test_merkle_proof tests slowness on Windows.
# Even on Linux using fsync slows down the test by at least 2
# orders of magnitude. See testMeasureWriteTime
os.fsync(self.dbFile.fileno())
def get(self, key):
for k, v in self.iterator():
if k == key:
return v
def _keyIterator(self, lines, prefix=None):
return self._baseIterator(lines, prefix, True, False)
def _valueIterator(self, lines, prefix=None):
return self._baseIterator(lines, prefix, False, True)
def _keyValueIterator(self, lines, prefix=None):
return self._baseIterator(lines, prefix, True, True)
def _parse_line(self, line, prefix=None, returnKey: bool=True,
returnValue: bool=True, key=None):
if self.isLineNoKey:
k = key
v = line
else:
k, v = line.split(self.delimiter, 1)
if returnValue:
if self.storeContentHash:
value, _ = v.rsplit(self.delimiter, 1)
else:
value = v
if not prefix or k.startswith(prefix):
if returnKey and returnValue:
return k, value
elif returnKey:
return k
elif returnValue:
return value
# noinspection PyUnresolvedReferences
def _baseIterator(self, lines, prefix, returnKey: bool, returnValue: bool):
i = 1
for line in lines:
k = str(i)
yield self._parse_line(line, prefix, returnKey, returnValue, k)
if self.isLineNoKey:
i += 1
def _lines(self):
raise NotImplementedError()
# noinspection PyUnresolvedReferences
def iterator(self, includeKey=True, includeValue=True, prefix=None):
if not (includeKey or includeValue):
raise ValueError("At least one of includeKey or includeValue "
"should be true")
# Move to the beginning of file
self.dbFile.seek(0)
lines = self._lines()
if includeKey and includeValue:
return self._keyValueIterator(lines, prefix=prefix)
elif includeValue:
return self._valueIterator(lines, prefix=prefix)
else:
return self._keyIterator(lines, prefix=prefix)
def is_valid_range(self, start=None, end=None):
assert self.isLineNoKey
if start and end:
assert start <= end
def get_range(self, start=None, end=None):
self.is_valid_range(start, end)
for k, value in self.iterator():
k = int(k)
if (start is None or k >= start) and (end is None or k <= end):
yield k, value
if end is not None and k > end:
break
@property
def lastKey(self):
# TODO use the efficient way of seeking to the end and moving back till
# 2nd newline(1 st newline would be encountered immediately until its a
# blank file) is encountered and after newline read ahead till the
# delimiter or split the read string till now on delimiter
k = None
for k, v in self.iterator():
pass
return k
def appendNewLineIfReq(self):
try:
logging.debug("new line check for file: {}".format(self.dbPath))
with open(self.dbPath, 'a+b') as f:
size = f.tell()
if size > 0:
f.seek(-len(self.lineSep), 2) # last character in file
if f.read().decode() != self.lineSep:
linesep = self.lineSep if isinstance(self.lineSep, bytes) else self.lineSep.encode()
f.write(linesep)
logging.debug(
"new line added for file: {}".format(self.dbPath))
except FileNotFoundError:
pass
@property
def numKeys(self):
return sum(1 for l in self.iterator())
# noinspection PyUnresolvedReferences
def close(self):
self.dbFile.close()
# noinspection PyUnresolvedReferences
@property
def closed(self):
return self.dbFile.closed
# noinspection PyUnresolvedReferences
def reset(self):
self.dbFile.truncate(0)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
|
|
"""Debug Plugin for EasyEngine"""
from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.shellexec import *
from ee.core.mysql import EEMysql
from ee.core.services import EEService
from ee.core.logging import Log
from ee.cli.plugins.site_functions import logwatch
from ee.core.variables import EEVariables
from ee.core.fileutils import EEFileUtils
from pynginxconfig import NginxConfig
import os
import configparser
import glob
import signal
import subprocess
def ee_debug_hook(app):
# do something with the ``app`` object here.
pass
class EEDebugController(CementBaseController):
class Meta:
label = 'debug'
description = 'Used for server level debugging'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['--stop'],
dict(help='Stop debug', action='store_true')),
(['--start'],
dict(help='Start debug', action='store_true')),
(['--import-slow-log'],
dict(help='Import MySQL slow log to Anemometer database',
action='store_true')),
(['--nginx'],
dict(help='start/stop debugging nginx server '
'configuration for site',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--php'],
dict(help='start/stop debugging server php configuration',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--fpm'],
dict(help='start/stop debugging fastcgi configuration',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--mysql'],
dict(help='start/stop debugging mysql server',
action='store' or 'store_const',
choices=('on', 'off'), const='on', nargs='?')),
(['--wp'],
dict(help='start/stop wordpress debugging for site',
action='store' or 'store_const', choices=('on', 'off'),
const='on', nargs='?')),
(['--rewrite'],
dict(help='start/stop debugging nginx rewrite rules for site',
action='store' or 'store_const', choices=('on', 'off'),
const='on', nargs='?')),
(['--all'],
dict(help='start/stop debugging all server parameters',
action='store' or 'store_const', choices=('on', 'off'),
const='on', nargs='?')),
(['-i', '--interactive'],
dict(help='Interactive debug', action='store_true')),
(['--import-slow-log-interval'],
dict(help='Import MySQL slow log to Anemometer',
action='store', dest='interval')),
(['site_name'],
dict(help='Website Name', nargs='?', default=None))
]
usage = "ee debug [<site_name>] [options] "
@expose(hide=True)
def debug_nginx(self):
"""Start/Stop Nginx debug"""
# start global debug
if (self.app.pargs.nginx == 'on' and not self.app.pargs.site_name):
try:
debug_address = (self.app.config.get('stack', 'ip-address')
.split())
except Exception as e:
debug_address = ['0.0.0.0/0']
# Check if IP address is 127.0.0.1 then enable debug globally
if debug_address == ['127.0.0.1'] or debug_address == []:
debug_address = ['0.0.0.0/0']
for ip_addr in debug_address:
if not ("debug_connection "+ip_addr in open('/etc/nginx/'
'nginx.conf', encoding='utf-8').read()):
Log.info(self, "Setting up Nginx debug connection"
" for "+ip_addr)
EEShellExec.cmd_exec(self, "sed -i \"/events {{/a\\ \\ \\ "
"\\ $(echo debug_connection "
"{ip}\;)\" /etc/nginx/"
"nginx.conf".format(ip=ip_addr))
self.trigger_nginx = True
if not self.trigger_nginx:
Log.info(self, "Nginx debug connection already enabled")
self.msg = self.msg + ["/var/log/nginx/*.error.log"]
# stop global debug
elif (self.app.pargs.nginx == 'off' and not self.app.pargs.site_name):
if "debug_connection " in open('/etc/nginx/nginx.conf',
encoding='utf-8').read():
Log.info(self, "Disabling Nginx debug connections")
EEShellExec.cmd_exec(self, "sed -i \"/debug_connection.*/d\""
" /etc/nginx/nginx.conf")
self.trigger_nginx = True
else:
Log.info(self, "Nginx debug connection already disabled")
# start site specific debug
elif (self.app.pargs.nginx == 'on'and self.app.pargs.site_name):
config_path = ("/etc/nginx/sites-available/{0}"
.format(self.app.pargs.site_name))
if os.path.isfile(config_path):
if not EEShellExec.cmd_exec(self, "grep \"error.log debug\" "
"{0}".format(config_path)):
Log.info(self, "Starting NGINX debug connection for "
"{0}".format(self.app.pargs.site_name))
EEShellExec.cmd_exec(self, "sed -i \"s/error.log;/"
"error.log "
"debug;/\" {0}".format(config_path))
self.trigger_nginx = True
else:
Log.info(self, "Nginx debug for site already enabled")
self.msg = self.msg + ['{0}{1}/logs/error.log'
.format(EEVariables.ee_webroot,
self.app.pargs.site_name)]
else:
Log.info(self, "{0} domain not valid"
.format(self.app.pargs.site_name))
# stop site specific debug
elif (self.app.pargs.nginx == 'off' and self.app.pargs.site_name):
config_path = ("/etc/nginx/sites-available/{0}"
.format(self.app.pargs.site_name))
if os.path.isfile(config_path):
if EEShellExec.cmd_exec(self, "grep \"error.log debug\" {0}"
.format(config_path)):
Log.info(self, "Stoping NGINX debug connection for {0}"
.format(self.app.pargs.site_name))
EEShellExec.cmd_exec(self, "sed -i \"s/error.log debug;/"
"error.log;/\" {0}"
.format(config_path))
self.trigger_nginx = True
else:
Log.info(self, "Nginx debug for site already disabled")
else:
Log.info(self, "{0} domain not valid"
.format(self.app.pargs.site_name))
@expose(hide=True)
def debug_php(self):
"""Start/Stop PHP debug"""
# PHP global debug start
if (self.app.pargs.php == 'on' and not self.app.pargs.site_name):
if not (EEShellExec.cmd_exec(self, "sed -n \"/upstream php"
"{/,/}/p \" /etc/nginx/"
"conf.d/upstream.conf "
"| grep 9001")):
Log.info(self, "Enabling PHP debug")
# Change upstream.conf
nc = NginxConfig()
nc.loadf('/etc/nginx/conf.d/upstream.conf')
nc.set([('upstream','php',), 'server'], '127.0.0.1:9001')
if os.path.isfile("/etc/nginx/common/wpfc-hhvm.conf"):
nc.set([('upstream','hhvm',), 'server'], '127.0.0.1:9001')
nc.savef('/etc/nginx/conf.d/upstream.conf')
# Enable xdebug
EEFileUtils.searchreplace(self, "/etc/php5/mods-available/"
"xdebug.ini",
";zend_extension",
"zend_extension")
# Fix slow log is not enabled default in PHP5.6
config = configparser.ConfigParser()
config.read('/etc/php5/fpm/pool.d/debug.conf')
config['debug']['slowlog'] = '/var/log/php5/slow.log'
config['debug']['request_slowlog_timeout'] = '10s'
with open('/etc/php5/fpm/pool.d/debug.conf',
encoding='utf-8', mode='w') as confifile:
Log.debug(self, "Writting debug.conf configuration into "
"/etc/php5/fpm/pool.d/debug.conf")
config.write(confifile)
self.trigger_php = True
self.trigger_nginx = True
else:
Log.info(self, "PHP debug is already enabled")
self.msg = self.msg + ['/var/log/php5/slow.log']
# PHP global debug stop
elif (self.app.pargs.php == 'off' and not self.app.pargs.site_name):
if EEShellExec.cmd_exec(self, " sed -n \"/upstream php {/,/}/p\" "
"/etc/nginx/conf.d/upstream.conf "
"| grep 9001"):
Log.info(self, "Disabling PHP debug")
# Change upstream.conf
nc = NginxConfig()
nc.loadf('/etc/nginx/conf.d/upstream.conf')
nc.set([('upstream','php',), 'server'], '127.0.0.1:9000')
if os.path.isfile("/etc/nginx/common/wpfc-hhvm.conf"):
nc.set([('upstream','hhvm',), 'server'], '127.0.0.1:8000')
nc.savef('/etc/nginx/conf.d/upstream.conf')
# Disable xdebug
EEFileUtils.searchreplace(self, "/etc/php5/mods-available/"
"xdebug.ini",
"zend_extension",
";zend_extension")
self.trigger_php = True
self.trigger_nginx = True
else:
Log.info(self, "PHP debug is already disabled")
@expose(hide=True)
def debug_fpm(self):
"""Start/Stop PHP5-FPM debug"""
# PHP5-FPM start global debug
if (self.app.pargs.fpm == 'on' and not self.app.pargs.site_name):
if not EEShellExec.cmd_exec(self, "grep \"log_level = debug\" "
"/etc/php5/fpm/php-fpm.conf"):
Log.info(self, "Setting up PHP5-FPM log_level = debug")
config = configparser.ConfigParser()
config.read('/etc/php5/fpm/php-fpm.conf')
config.remove_option('global', 'include')
config['global']['log_level'] = 'debug'
config['global']['include'] = '/etc/php5/fpm/pool.d/*.conf'
with open('/etc/php5/fpm/php-fpm.conf',
encoding='utf-8', mode='w') as configfile:
Log.debug(self, "Writting php5-FPM configuration into "
"/etc/php5/fpm/php-fpm.conf")
config.write(configfile)
self.trigger_php = True
else:
Log.info(self, "PHP5-FPM log_level = debug already setup")
self.msg = self.msg + ['/var/log/php5/fpm.log']
# PHP5-FPM stop global debug
elif (self.app.pargs.fpm == 'off' and not self.app.pargs.site_name):
if EEShellExec.cmd_exec(self, "grep \"log_level = debug\" "
"/etc/php5/fpm/php-fpm.conf"):
Log.info(self, "Disabling PHP5-FPM log_level = debug")
config = configparser.ConfigParser()
config.read('/etc/php5/fpm/php-fpm.conf')
config.remove_option('global', 'include')
config['global']['log_level'] = 'notice'
config['global']['include'] = '/etc/php5/fpm/pool.d/*.conf'
with open('/etc/php5/fpm/php-fpm.conf',
encoding='utf-8', mode='w') as configfile:
Log.debug(self, "writting php5 configuration into "
"/etc/php5/fpm/php-fpm.conf")
config.write(configfile)
self.trigger_php = True
else:
Log.info(self, "PHP5-FPM log_level = debug already disabled")
@expose(hide=True)
def debug_mysql(self):
"""Start/Stop MySQL debug"""
# MySQL start global debug
if (self.app.pargs.mysql == 'on' and not self.app.pargs.site_name):
if not EEShellExec.cmd_exec(self, "mysql -e \"show variables like"
" \'slow_query_log\';\" | "
"grep ON"):
Log.info(self, "Setting up MySQL slow log")
EEMysql.execute(self, "set global slow_query_log = "
"\'ON\';")
EEMysql.execute(self, "set global slow_query_log_file = "
"\'/var/log/mysql/mysql-slow.log\';")
EEMysql.execute(self, "set global long_query_time = 2;")
EEMysql.execute(self, "set global log_queries_not_using"
"_indexes = \'ON\';")
else:
Log.info(self, "MySQL slow log is already enabled")
self.msg = self.msg + ['/var/log/mysql/mysql-slow.log']
# MySQL stop global debug
elif (self.app.pargs.mysql == 'off' and not self.app.pargs.site_name):
if EEShellExec.cmd_exec(self, "mysql -e \"show variables like \'"
"slow_query_log\';\" | grep ON"):
Log.info(self, "Disabling MySQL slow log")
EEMysql.execute(self, "set global slow_query_log = \'OFF\';")
EEMysql.execute(self, "set global slow_query_log_file = \'"
"/var/log/mysql/mysql-slow.log\';")
EEMysql.execute(self, "set global long_query_time = 10;")
EEMysql.execute(self, "set global log_queries_not_using_index"
"es = \'OFF\';")
EEShellExec.cmd_exec(self, "crontab -l | sed \'/#EasyEngine "
"start/,/#EasyEngine end/d\' | crontab -")
else:
Log.info(self, "MySQL slow log already disabled")
@expose(hide=True)
def debug_wp(self):
"""Start/Stop WordPress debug"""
if (self.app.pargs.wp == 'on' and self.app.pargs.site_name):
wp_config = ("{0}/{1}/wp-config.php"
.format(EEVariables.ee_webroot,
self.app.pargs.site_name))
webroot = "{0}{1}".format(EEVariables.ee_webroot,
self.app.pargs.site_name)
# Check wp-config.php file into htdocs folder
if not os.path.isfile(wp_config):
wp_config = ("{0}/{1}/htdocs/wp-config.php"
.format(EEVariables.ee_webroot,
self.app.pargs.site_name))
if os.path.isfile(wp_config):
if not EEShellExec.cmd_exec(self, "grep \"\'WP_DEBUG\'\" {0} |"
" grep true".format(wp_config)):
Log.info(self, "Starting WordPress debug")
open("{0}/htdocs/wp-content/debug.log".format(webroot),
encoding='utf-8', mode='a').close()
EEShellExec.cmd_exec(self, "chown {1}: {0}/htdocs/wp-"
"content/debug.log"
"".format(webroot,
EEVariables.ee_php_user))
EEShellExec.cmd_exec(self, "sed -i \"s/define(\'WP_DEBUG\'"
".*/define(\'WP_DEBUG\', true);\\n"
"define(\'WP_DEBUG_DISPLAY\', false);"
"\\ndefine(\'WP_DEBUG_LOG\', true);"
"\\ndefine(\'SAVEQUERIES\', true);/\""
" {0}".format(wp_config))
EEShellExec.cmd_exec(self, "cd {0}/htdocs/ && wp"
" plugin --allow-root install "
"developer query-monitor"
.format(webroot))
EEShellExec.cmd_exec(self, "chown -R {1}: {0}/htdocs/"
"wp-content/plugins"
.format(webroot,
EEVariables.ee_php_user))
self.msg = self.msg + ['{0}{1}/htdocs/wp-content'
'/debug.log'
.format(EEVariables.ee_webroot,
self.app.pargs.site_name)]
else:
Log.info(self, "Unable to find wp-config.php for site: {0}"
.format(self.app.pargs.site_name))
elif (self.app.pargs.wp == 'off' and self.app.pargs.site_name):
wp_config = ("{0}{1}/wp-config.php"
.format(EEVariables.ee_webroot,
self.app.pargs.site_name))
webroot = "{0}{1}".format(EEVariables.ee_webroot,
self.app.pargs.site_name)
# Check wp-config.php file into htdocs folder
if not os.path.isfile(wp_config):
wp_config = ("{0}/{1}/htdocs/wp-config.php"
.format(EEVariables.ee_webroot,
self.app.pargs.site_name))
if os.path.isfile(wp_config):
if EEShellExec.cmd_exec(self, "grep \"\'WP_DEBUG\'\" {0} | "
"grep true".format(wp_config)):
Log.info(self, "Disabling WordPress debug")
EEShellExec.cmd_exec(self, "sed -i \"s/define(\'WP_DEBUG\'"
", true);/define(\'WP_DEBUG\', "
"false);/\" {0}".format(wp_config))
EEShellExec.cmd_exec(self, "sed -i \"/define(\'"
"WP_DEBUG_DISPLAY\', false);/d\" {0}"
.format(wp_config))
EEShellExec.cmd_exec(self, "sed -i \"/define(\'"
"WP_DEBUG_LOG\', true);/d\" {0}"
.format(wp_config))
EEShellExec.cmd_exec(self, "sed -i \"/define(\'"
"SAVEQUERIES\', "
"true);/d\" {0}".format(wp_config))
else:
Log.info(self, "WordPress debug all already disabled")
else:
Log.error(self, "Missing argument site name")
@expose(hide=True)
def debug_rewrite(self):
"""Start/Stop Nginx rewrite rules debug"""
# Start Nginx rewrite debug globally
if (self.app.pargs.rewrite == 'on' and not self.app.pargs.site_name):
if not EEShellExec.cmd_exec(self, "grep \"rewrite_log on;\" "
"/etc/nginx/nginx.conf"):
Log.info(self, "Setting up Nginx rewrite logs")
EEShellExec.cmd_exec(self, "sed -i \'/http {/a \\\\t"
"rewrite_log on;\' /etc/nginx/nginx.conf")
self.trigger_nginx = True
else:
Log.info(self, "Nginx rewrite logs already enabled")
if '/var/log/nginx/*.error.log' not in self.msg:
self.msg = self.msg + ['/var/log/nginx/*.error.log']
# Stop Nginx rewrite debug globally
elif (self.app.pargs.rewrite == 'off'
and not self.app.pargs.site_name):
if EEShellExec.cmd_exec(self, "grep \"rewrite_log on;\" "
"/etc/nginx/nginx.conf"):
Log.info(self, "Disabling Nginx rewrite logs")
EEShellExec.cmd_exec(self, "sed -i \"/rewrite_log.*/d\""
" /etc/nginx/nginx.conf")
self.trigger_nginx = True
else:
Log.info(self, "Nginx rewrite logs already disabled")
# Start Nginx rewrite for site
elif (self.app.pargs.rewrite == 'on' and self.app.pargs.site_name):
config_path = ("/etc/nginx/sites-available/{0}"
.format(self.app.pargs.site_name))
if not EEShellExec.cmd_exec(self, "grep \"rewrite_log on;\" {0}"
.format(config_path)):
Log.info(self, "Setting up Nginx rewrite logs for {0}"
.format(self.app.pargs.site_name))
EEShellExec.cmd_exec(self, "sed -i \"/access_log/i \\\\\\t"
"rewrite_log on;\" {0}"
.format(config_path))
self.trigger_nginx = True
else:
Log.info(self, "Nginx rewrite logs for {0} already setup"
.format(self.app.pargs.site_name))
if ('{0}{1}/logs/error.log'.format(EEVariables.ee_webroot,
self.app.pargs.site_name)
not in self.msg):
self.msg = self.msg + ['{0}{1}/logs/error.log'
.format(EEVariables.ee_webroot,
self.app.pargs.site_name)]
# Stop Nginx rewrite for site
elif (self.app.pargs.rewrite == 'off' and self.app.pargs.site_name):
config_path = ("/etc/nginx/sites-available/{0}"
.format(self.app.pargs.site_name))
if EEShellExec.cmd_exec(self, "grep \"rewrite_log on;\" {0}"
.format(config_path)):
Log.info(self, "Disabling Nginx rewrite logs for {0}"
.format(self.app.pargs.site_name))
EEShellExec.cmd_exec(self, "sed -i \"/rewrite_log.*/d\" {0}"
.format(config_path))
self.trigger_nginx = True
else:
Log.info(self, "Nginx rewrite logs for {0} already "
" disabled".format(self.app.pargs.site_name))
@expose(hide=True)
def signal_handler(self, signal, frame):
"""Handle Ctrl+c hevent for -i option of debug"""
self.start = False
if self.app.pargs.nginx:
self.app.pargs.nginx = 'off'
self.debug_nginx()
if self.app.pargs.php:
self.app.pargs.php = 'off'
self.debug_php()
if self.app.pargs.fpm:
self.app.pargs.fpm = 'off'
self.debug_fpm()
if self.app.pargs.mysql:
# MySQL debug will not work for remote MySQL
if EEVariables.ee_mysql_host is "localhost":
self.app.pargs.mysql = 'off'
self.debug_mysql()
else:
Log.warn(self, "Remote MySQL found, EasyEngine will not "
"enable remote debug")
if self.app.pargs.wp:
self.app.pargs.wp = 'off'
self.debug_wp()
if self.app.pargs.rewrite:
self.app.pargs.rewrite = 'off'
self.debug_rewrite()
# Reload Nginx
if self.trigger_nginx:
EEService.reload_service(self, 'nginx')
# Reload PHP
if self.trigger_php:
EEService.reload_service(self, 'php5-fpm')
self.app.close(0)
@expose(hide=True)
def default(self):
"""Default function of debug"""
# self.start = True
self.interactive = False
self.msg = []
self.trigger_nginx = False
self.trigger_php = False
if ((not self.app.pargs.nginx) and (not self.app.pargs.php)
and (not self.app.pargs.fpm) and (not self.app.pargs.mysql)
and (not self.app.pargs.wp) and (not self.app.pargs.rewrite)
and (not self.app.pargs.all)
and (not self.app.pargs.site_name)
and (not self.app.pargs.import_slow_log)
and (not self.app.pargs.interval)):
if self.app.pargs.stop or self.app.pargs.start:
print("--start/stop option is deprecated since ee3.0.5")
self.app.args.print_help()
else:
self.app.args.print_help()
if self.app.pargs.import_slow_log:
self.import_slow_log()
if self.app.pargs.interval:
try:
cron_time = int(self.app.pargs.interval)
except Exception as e:
cron_time = 5
try:
if not EEShellExec.cmd_exec(self, "crontab -l | grep "
"'ee debug --import-slow-log'"):
if not cron_time == 0:
Log.info(self, "setting up crontab entry,"
" please wait...")
EEShellExec.cmd_exec(self, "/bin/bash -c \"crontab -l "
"2> /dev/null | {{ cat; echo -e"
" \\\"#EasyEngine start MySQL "
"slow log \\n*/{0} * * * * "
"/usr/local/bin/ee debug"
" --import-slow-log\\n"
"#EasyEngine end MySQL slow log"
"\\\"; }} | crontab -\""
.format(cron_time))
else:
if not cron_time == 0:
Log.info(self, "updating crontab entry,"
" please wait...")
if not EEShellExec.cmd_exec(self, "/bin/bash -c "
"\"crontab "
"-l | sed '/EasyEngine "
"start MySQL slow "
"log/!b;n;c\*\/{0} "
"\* \* \* "
"\* \/usr"
"\/local\/bin\/ee debug "
"--import\-slow\-log' "
"| crontab -\""
.format(cron_time)):
Log.error(self, "failed to update crontab entry")
else:
Log.info(self, "removing crontab entry,"
" please wait...")
if not EEShellExec.cmd_exec(self, "/bin/bash -c "
"\"crontab "
"-l | sed '/EasyEngine "
"start MySQL slow "
"log/,+2d'"
"| crontab -\""
.format(cron_time)):
Log.error(self, "failed to remove crontab entry")
except CommandExecutionError as e:
Log.debug(self, str(e))
if self.app.pargs.all == 'on':
if self.app.pargs.site_name:
self.app.pargs.wp = 'on'
self.app.pargs.nginx = 'on'
self.app.pargs.php = 'on'
self.app.pargs.fpm = 'on'
self.app.pargs.mysql = 'on'
self.app.pargs.rewrite = 'on'
if self.app.pargs.all == 'off':
if self.app.pargs.site_name:
self.app.pargs.wp = 'off'
self.app.pargs.nginx = 'off'
self.app.pargs.php = 'off'
self.app.pargs.fpm = 'off'
self.app.pargs.mysql = 'off'
self.app.pargs.rewrite = 'off'
if ((not self.app.pargs.nginx) and (not self.app.pargs.php)
and (not self.app.pargs.fpm) and (not self.app.pargs.mysql)
and (not self.app.pargs.wp) and (not self.app.pargs.rewrite)
and self.app.pargs.site_name):
self.app.args.print_help()
# self.app.pargs.nginx = 'on'
# self.app.pargs.wp = 'on'
# self.app.pargs.rewrite = 'on'
if self.app.pargs.nginx:
self.debug_nginx()
if self.app.pargs.php:
self.debug_php()
if self.app.pargs.fpm:
self.debug_fpm()
if self.app.pargs.mysql:
# MySQL debug will not work for remote MySQL
if EEVariables.ee_mysql_host is "localhost":
self.debug_mysql()
else:
Log.warn(self, "Remote MySQL found, EasyEngine will not "
"enable remote debug")
if self.app.pargs.wp:
self.debug_wp()
if self.app.pargs.rewrite:
self.debug_rewrite()
if self.app.pargs.interactive:
self.interactive = True
# Reload Nginx
if self.trigger_nginx:
EEService.reload_service(self, 'nginx')
# Reload PHP
if self.trigger_php:
EEService.restart_service(self, 'php5-fpm')
if len(self.msg) > 0:
if not self.app.pargs.interactive:
disp_msg = ' '.join(self.msg)
Log.info(self, "Use following command to check debug logs:\n"
+ Log.ENDC + "tail -f {0}".format(disp_msg))
else:
signal.signal(signal.SIGINT, self.signal_handler)
watch_list = []
for w_list in self.msg:
watch_list = watch_list + glob.glob(w_list)
logwatch(self, watch_list)
@expose(hide=True)
def import_slow_log(self):
"""Default function for import slow log"""
if os.path.isdir("{0}22222/htdocs/db/anemometer"
.format(EEVariables.ee_webroot)):
if os.path.isfile("/var/log/mysql/mysql-slow.log"):
# Get Anemometer user name and password
Log.info(self, "Importing MySQL slow log to Anemometer")
host = os.popen("grep -e \"\'host\'\" {0}22222/htdocs/"
.format(EEVariables.ee_webroot)
+ "db/anemometer/conf/config.inc.php "
"| head -1 | cut -d\\\' -f4 | "
"tr -d '\n'").read()
user = os.popen("grep -e \"\'user\'\" {0}22222/htdocs/"
.format(EEVariables.ee_webroot)
+ "db/anemometer/conf/config.inc.php "
"| head -1 | cut -d\\\' -f4 | "
"tr -d '\n'").read()
password = os.popen("grep -e \"\'password\'\" {0}22222/"
.format(EEVariables.ee_webroot)
+ "htdocs/db/anemometer/conf"
"/config.inc.php "
"| head -1 | cut -d\\\' -f4 | "
"tr -d '\n'").read()
# Import slow log Anemometer using pt-query-digest
try:
EEShellExec.cmd_exec(self, "pt-query-digest --user={0} "
"--password={1} "
"--review D=slow_query_log,"
"t=global_query_review "
"--history D=slow_query_log,t="
"global_query_review_history "
"--no-report --limit=0% "
"--filter=\" \\$event->{{Bytes}} = "
"length(\\$event->{{arg}}) "
"and \\$event->{{hostname}}=\\\""
"{2}\\\"\" "
"/var/log/mysql/mysql-slow.log"
.format(user, password, host))
except CommandExecutionError as e:
Log.debug(self, str(e))
Log.error(self, "MySQL slow log import failed.")
else:
Log.error(self, "MySQL slow log file not found,"
" so not imported slow logs")
else:
Log.error(self, "Anemometer is not installed." +
Log.ENDC + "\nYou can install Anemometer with "
"this command "
+ Log.BOLD + "\n `ee stack install --utils`"
+ Log.ENDC)
def load(app):
# register the plugin class.. this only happens if the plugin is enabled
handler.register(EEDebugController)
# register a hook (function) to run after arguments are parsed.
hook.register('post_argument_parsing', ee_debug_hook)
|
|
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
sys.path.append( '../packer' )
sys.path.append( '../glapi_parser' )
from pack_currenttypes import *
import apiutil
apiutil.CopyrightC()
print '''
#include "state/cr_currentpointers.h"
#include "state.h"
#include <stdio.h>
#ifdef WINDOWS
#pragma warning( disable : 4127 )
#endif
typedef void (*convert_func) (GLfloat *, const unsigned char *);
'''
import convert
for k in current_fns.keys():
name = k
name = '%s%s' % (k[:1].lower(),k[1:])
ucname = k.upper()
num_members = len(current_fns[k]['default']) + 1
print '#define VPINCH_CONVERT_%s(op,data,dst) \\' % ucname
print '{\\'
print '\tGLfloat vdata[%d] = {' % num_members,
## Copy dst data into vdata
i = 0;
for defaultvar in current_fns[k]['default']:
print '%d' % defaultvar,
if i != num_members:
print ',',
i += 1
print '};\\'
print '\tswitch (op) { \\'
for type in current_fns[k]['types']:
if type[0:1] == "N":
normalize = 1
type = type[1:]
else:
normalize = 0
for size in current_fns[k]['sizes']:
uctype = type.upper()
if ucname == 'EDGEFLAG':
print '\tcase CR_%s_OPCODE: \\' % ucname
else:
print '\tcase CR_%s%d%s_OPCODE: \\' % (ucname,size,uctype)
if (ucname == 'COLOR' or ucname == 'NORMAL' or ucname == 'SECONDARYCOLOR' or normalize) and type != 'f' and type != 'd':
print '\t\t__convert_rescale_%s%d (vdata, (%s *) (data)); \\' % (type,size,gltypes[type]['type'])
else:
print '\t\t__convert_%s%d (vdata, (%s *) (data)); \\' % (type,size,gltypes[type]['type'])
print '\t\tbreak; \\'
print '\tdefault: \\'
print '\t\tcrSimpleError ( "Unknown opcode in VPINCH_CONVERT_%s" ); \\' % ucname
print '\t}\\'
i = 0
for member in current_fns[k]['members']:
print '\t(dst).%s = vdata[%d];\\' % (member,i)
i += 1
print '}\n'
print '''
void crStateCurrentRecover( void )
{
const unsigned char *v;
convert_func convert=NULL;
CRContext *g = GetCurrentContext();
CRCurrentState *c = &(g->current);
CRStateBits *sb = GetCurrentBits();
CRCurrentBits *cb = &(sb->current);
static const GLfloat color_default[4] = {0.0f, 0.0f, 0.0f, 1.0f};
static const GLfloat secondaryColor_default[4] = {0.0f, 0.0f, 0.0f, 0.0f};
static const GLfloat texCoord_default[4] = {0.0f, 0.0f, 0.0f, 1.0f};
static const GLfloat normal_default[4] = {0.0f, 0.0f, 0.0f, 1.0f};
static const GLfloat index_default = 0.0f;
static const GLboolean edgeFlag_default = GL_TRUE;
static const GLfloat vertexAttrib_default[4] = {0.0f, 0.0f, 0.0f, 1.0f};
static const GLfloat fogCoord_default = 0.0f;
GLnormal_p *normal = &(c->current->c.normal);
GLcolor_p *color = &(c->current->c.color);
GLsecondarycolor_p *secondaryColor = &(c->current->c.secondaryColor);
GLtexcoord_p *texCoord = &(c->current->c.texCoord);
GLindex_p *index = &(c->current->c.index);
GLedgeflag_p *edgeFlag = &(c->current->c.edgeFlag);
GLvertexattrib_p *vertexAttrib = &(c->current->c.vertexAttrib);
GLfogcoord_p *fogCoord = &(c->current->c.fogCoord);
unsigned int i;
CRbitvalue nbitID[CR_MAX_BITARRAY];
/*
* If the calling SPU hasn't called crStateSetCurrentPointers()
* we can't recover anything, so abort now. (i.e. we don't have
* a pincher, oh, and just emit the message once).
*/
if (!c->current) {
static int donewarning = 0;
if (!donewarning)
crWarning("No pincher, please call crStateSetCurrentPointers() in your SPU");
donewarning = 1;
return; /* never get here */
}
c->attribsUsedMask = c->current->attribsUsedMask;
/* silence warnings */
(void) __convert_b1;
(void) __convert_b2;
(void) __convert_b3;
(void) __convert_b4;
(void) __convert_ui1;
(void) __convert_ui2;
(void) __convert_ui3;
(void) __convert_ui4;
(void) __convert_l1;
(void) __convert_l2;
(void) __convert_l3;
(void) __convert_l4;
(void) __convert_us1;
(void) __convert_us2;
(void) __convert_us3;
(void) __convert_us4;
(void) __convert_ub1;
(void) __convert_ub2;
(void) __convert_ub3;
(void) __convert_ub4;
(void) __convert_rescale_s1;
(void) __convert_rescale_s2;
(void) __convert_rescale_b1;
(void) __convert_rescale_b2;
(void) __convert_rescale_ui1;
(void) __convert_rescale_ui2;
(void) __convert_rescale_i1;
(void) __convert_rescale_i2;
(void) __convert_rescale_us1;
(void) __convert_rescale_us2;
(void) __convert_rescale_ub1;
(void) __convert_rescale_ub2;
(void) __convert_Ni1;
(void) __convert_Ni2;
(void) __convert_Ni3;
(void) __convert_Ni4;
(void) __convert_Nb1;
(void) __convert_Nb2;
(void) __convert_Nb3;
(void) __convert_Nb4;
(void) __convert_Nus1;
(void) __convert_Nus2;
(void) __convert_Nus3;
(void) __convert_Nus4;
(void) __convert_Nui1;
(void) __convert_Nui2;
(void) __convert_Nui3;
(void) __convert_Nui4;
(void) __convert_Ns1;
(void) __convert_Ns2;
(void) __convert_Ns3;
(void) __convert_Ns4;
(void) __convert_Nub1;
(void) __convert_Nub2;
(void) __convert_Nub3;
(void) __convert_Nub4;
DIRTY(nbitID, g->neg_bitid);
/* Save pre state */
for (i = 0; i < CR_MAX_VERTEX_ATTRIBS; i++) {
COPY_4V(c->vertexAttribPre[i] , c->vertexAttrib[i]);
}
c->edgeFlagPre = c->edgeFlag;
c->colorIndexPre = c->colorIndex;
'''
for k in current_fns.keys():
print '\t/* %s */' % k
print '\tv=NULL;'
name = '%s%s' % (k[:1].lower(),k[1:])
indent = ""
if current_fns[k].has_key( 'array' ):
print '\tfor (i = 0 ; i < %s ; i++)' % current_fns[k]['array']
print '\t{'
indent += "\t"
for type in current_fns[k]['types']:
if type[0:1] == "N":
normalized = 1
type2 = type[1:]
else:
normalized = 0
type2 = type
for size in current_fns[k]['sizes']:
ptr = '%s->%s%d' % (name, type, size )
if current_fns[k].has_key( 'array' ):
ptr += "[i]"
print '%s\tif (v < %s)' % (indent, ptr)
print '%s\t{' % indent
print '%s\t\tv = %s;' % (indent, ptr)
if (k == 'Color' or k == 'Normal' or k == 'SecondaryColor' or normalized) and type != 'f' and type != 'd' and type != 'l':
print '%s\t\tconvert = (convert_func) __convert_rescale_%s%d;' % (indent,type,size)
else:
print '%s\t\tconvert = (convert_func) __convert_%s%d;' % (indent,type,size)
print '%s\t}' % indent
print ''
print '%s\tif (v != NULL) {' % indent
if current_fns[k].has_key( 'array' ):
if k == 'TexCoord':
print '%s\t\tCOPY_4V(c->vertexAttrib[VERT_ATTRIB_TEX0 + i], %s_default);' % (indent,name)
else:
print '%s\t\tCOPY_4V(c->%s[i], %s_default);' % (indent,name,name)
else:
if k == 'Normal':
print '%s\t\tCOPY_4V(c->vertexAttrib[VERT_ATTRIB_NORMAL], %s_default);' % (indent,name)
elif k == 'FogCoord':
print '%s\t\tc->vertexAttrib[VERT_ATTRIB_FOG][0] = %s_default;' % (indent,name)
elif k == 'Color':
print '%s\t\tCOPY_4V(c->vertexAttrib[VERT_ATTRIB_COLOR0], %s_default);' % (indent,name)
elif k == 'SecondaryColor':
print '%s\t\tCOPY_4V(c->vertexAttrib[VERT_ATTRIB_COLOR1], %s_default);' % (indent,name)
elif k == 'TexCoord':
print '%s\t\tCOPY_4V(c->vertexAttrib[VERT_ATTRIB_TEX0], %s_default);' % (indent,name)
elif k == 'Index':
print '%s\t\tc->colorIndex = %s_default;' % (indent,name)
elif k == 'EdgeFlag':
print '%s\t\tc->edgeFlag = %s_default;' % (indent,name)
else:
print '%s\t\tc->%s = %s_default;' % (indent,name,name)
if k == 'EdgeFlag':
print '%s\t\t__convert_boolean (&c->edgeFlag, v);' % (indent)
dirtyVar = 'cb->edgeFlag'
elif k == 'Normal':
print '%s\t\tconvert(&(c->vertexAttrib[VERT_ATTRIB_NORMAL][0]), v);' % (indent)
dirtyVar = 'cb->vertexAttrib[VERT_ATTRIB_NORMAL]'
elif k == 'TexCoord':
print '%s\t\tconvert(&(c->vertexAttrib[VERT_ATTRIB_TEX0 + i][0]), v);' % (indent)
dirtyVar = 'cb->vertexAttrib[VERT_ATTRIB_TEX0 + i]'
elif k == 'Color':
print '%s\t\tconvert(&(c->vertexAttrib[VERT_ATTRIB_COLOR0][0]), v);' % (indent)
dirtyVar = 'cb->vertexAttrib[VERT_ATTRIB_COLOR0]'
elif k == 'Index':
print '%s\t\tconvert(&(c->colorIndex), v);' % (indent)
dirtyVar = 'cb->colorIndex'
elif k == 'SecondaryColor':
print '%s\t\tconvert(&(c->vertexAttrib[VERT_ATTRIB_COLOR1][0]), v);' % (indent)
dirtyVar = 'cb->vertexAttrib[VERT_ATTRIB_COLOR1]'
elif k == 'FogCoord':
print '%s\t\tconvert(&(c->vertexAttrib[VERT_ATTRIB_FOG][0]), v);' % (indent)
dirtyVar = 'cb->vertexAttrib[VERT_ATTRIB_FOG]'
elif k == 'VertexAttrib':
print '%s\t\tconvert(&(c->vertexAttrib[i][0]), v);' % (indent)
dirtyVar = 'cb->vertexAttrib[i]'
else:
assert 0 # should never get here
print '%s\t\tDIRTY(%s, nbitID);' % (indent, dirtyVar)
# if current_fns[k].has_key( 'array' ):
# print '%s\t\tDIRTY(cb->%s[i], nbitID);' % (indent,name)
# else:
# print '%s\t\tDIRTY(cb->%s, nbitID);' % (indent,name)
print '%s\t\tDIRTY(cb->dirty, nbitID);' % indent
print '%s\t}' % indent
if current_fns[k].has_key( 'array' ):
print '%s\t%s->ptr[i] = v;' % (indent, name )
else:
print '%s\t%s->ptr = v;' % (indent, name )
if current_fns[k].has_key( 'array' ):
print '\t}'
print '}'
|
|
#!/usr/bin/python
#
# Copyright (C) 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing qa.qa_config"""
import unittest
import tempfile
import shutil
import os
from ganeti import utils
from ganeti import serializer
from ganeti import constants
from ganeti import compat
from qa import qa_config
from qa import qa_error
import testutils
class TestTestEnabled(unittest.TestCase):
def testSimple(self):
for name in ["test", ["foobar"], ["a", "b"]]:
self.assertTrue(qa_config.TestEnabled(name, _cfg={}))
for default in [False, True]:
self.assertFalse(qa_config.TestEnabled("foo", _cfg={
"tests": {
"default": default,
"foo": False,
},
}))
self.assertTrue(qa_config.TestEnabled("bar", _cfg={
"tests": {
"default": default,
"bar": True,
},
}))
def testEitherWithDefault(self):
names = qa_config.Either("one")
self.assertTrue(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
},
}))
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": False,
},
}))
def testEither(self):
names = [qa_config.Either(["one", "two"]),
qa_config.Either("foo"),
"hello",
["bar", "baz"]]
self.assertTrue(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
},
}))
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": False,
},
}))
for name in ["foo", "bar", "baz", "hello"]:
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
name: False,
},
}))
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
"one": False,
"two": False,
},
}))
self.assertTrue(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
"one": False,
"two": True,
},
}))
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
"one": True,
"two": True,
"foo": False,
},
}))
def testEitherNestedWithAnd(self):
names = qa_config.Either([["one", "two"], "foo"])
self.assertTrue(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
},
}))
for name in ["one", "two"]:
self.assertFalse(qa_config.TestEnabled(names, _cfg={
"tests": {
"default": True,
"foo": False,
name: False,
},
}))
def testCallable(self):
self.assertTrue(qa_config.TestEnabled([lambda: True], _cfg={}))
for value in [None, False, "", 0]:
self.assertFalse(qa_config.TestEnabled(lambda: value, _cfg={}))
class TestQaConfigLoad(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testLoadNonExistent(self):
filename = utils.PathJoin(self.tmpdir, "does.not.exist")
self.assertRaises(EnvironmentError, qa_config._QaConfig.Load, filename)
@staticmethod
def _WriteConfig(filename, data):
utils.WriteFile(filename, data=serializer.DumpJson(data))
def _CheckLoadError(self, filename, data, expected):
self._WriteConfig(filename, data)
try:
qa_config._QaConfig.Load(filename)
except qa_error.Error, err:
self.assertTrue(str(err).startswith(expected))
else:
self.fail("Exception was not raised")
def testFailsValidation(self):
filename = utils.PathJoin(self.tmpdir, "qa.json")
testconfig = {}
check_fn = compat.partial(self._CheckLoadError, filename, testconfig)
# No cluster name
check_fn("Cluster name is required")
testconfig["name"] = "cluster.example.com"
# No nodes
check_fn("Need at least one node")
testconfig["nodes"] = [
{
"primary": "xen-test-0",
"secondary": "192.0.2.1",
},
]
# No instances
check_fn("Need at least one instance")
testconfig["instances"] = [
{
"name": "xen-test-inst1",
},
]
# Missing "disk" and "disk-growth"
check_fn("Config option 'disks'")
testconfig["disks"] = []
# Minimal accepted configuration
self._WriteConfig(filename, testconfig)
result = qa_config._QaConfig.Load(filename)
self.assertTrue(result.get("nodes"))
# Non-existent instance check script
testconfig[qa_config._INSTANCE_CHECK_KEY] = \
utils.PathJoin(self.tmpdir, "instcheck")
check_fn("Can't find instance check script")
del testconfig[qa_config._INSTANCE_CHECK_KEY]
# No enabled hypervisor
testconfig[qa_config._ENABLED_HV_KEY] = None
check_fn("No hypervisor is enabled")
# Unknown hypervisor
testconfig[qa_config._ENABLED_HV_KEY] = ["#unknownhv#"]
check_fn("Unknown hypervisor(s) enabled:")
del testconfig[qa_config._ENABLED_HV_KEY]
# Invalid path for virtual cluster base directory
testconfig[qa_config._VCLUSTER_MASTER_KEY] = "value"
testconfig[qa_config._VCLUSTER_BASEDIR_KEY] = "./not//normalized/"
check_fn("Path given in option 'vcluster-basedir' must be")
# Inconsistent virtual cluster settings
testconfig.pop(qa_config._VCLUSTER_MASTER_KEY)
testconfig[qa_config._VCLUSTER_BASEDIR_KEY] = "/tmp"
check_fn("All or none of the")
testconfig[qa_config._VCLUSTER_MASTER_KEY] = "master.example.com"
testconfig.pop(qa_config._VCLUSTER_BASEDIR_KEY)
check_fn("All or none of the")
# Accepted virtual cluster settings
testconfig[qa_config._VCLUSTER_MASTER_KEY] = "master.example.com"
testconfig[qa_config._VCLUSTER_BASEDIR_KEY] = "/tmp"
self._WriteConfig(filename, testconfig)
result = qa_config._QaConfig.Load(filename)
self.assertEqual(result.GetVclusterSettings(),
("master.example.com", "/tmp"))
class TestQaConfigWithSampleConfig(unittest.TestCase):
"""Tests using C{qa-sample.json}.
This test case serves two purposes:
- Ensure shipped C{qa-sample.json} file is considered a valid QA
configuration
- Test some functions of L{qa_config._QaConfig} without having to
mock a whole configuration file
"""
def setUp(self):
filename = "%s/qa/qa-sample.json" % testutils.GetSourceDir()
self.config = qa_config._QaConfig.Load(filename)
def testGetEnabledHypervisors(self):
self.assertEqual(self.config.GetEnabledHypervisors(),
[constants.DEFAULT_ENABLED_HYPERVISOR])
def testGetDefaultHypervisor(self):
self.assertEqual(self.config.GetDefaultHypervisor(),
constants.DEFAULT_ENABLED_HYPERVISOR)
def testGetInstanceCheckScript(self):
self.assertTrue(self.config.GetInstanceCheckScript() is None)
def testGetAndGetItem(self):
self.assertEqual(self.config["nodes"], self.config.get("nodes"))
def testGetMasterNode(self):
self.assertEqual(self.config.GetMasterNode(), self.config["nodes"][0])
def testGetVclusterSettings(self):
# Shipped default settings should be to not use a virtual cluster
self.assertEqual(self.config.GetVclusterSettings(), (None, None))
self.assertFalse(qa_config.UseVirtualCluster(_cfg=self.config))
class TestQaConfig(unittest.TestCase):
def setUp(self):
filename = \
testutils.TestDataFilename("qa-minimal-nodes-instances-only.json")
self.config = qa_config._QaConfig.Load(filename)
def testExclusiveStorage(self):
self.assertRaises(AssertionError, self.config.GetExclusiveStorage)
for value in [False, True, 0, 1, 30804, ""]:
self.config.SetExclusiveStorage(value)
self.assertEqual(self.config.GetExclusiveStorage(), bool(value))
def testIsTemplateSupported(self):
enabled_dts = self.config.GetEnabledDiskTemplates()
for e_s in [False, True]:
self.config.SetExclusiveStorage(e_s)
for template in constants.DISK_TEMPLATES:
if (template not in enabled_dts or
e_s and template not in constants.DTS_EXCL_STORAGE):
self.assertFalse(self.config.IsTemplateSupported(template))
else:
self.assertTrue(self.config.IsTemplateSupported(template))
def testInstanceConversion(self):
self.assertTrue(isinstance(self.config["instances"][0],
qa_config._QaInstance))
def testNodeConversion(self):
self.assertTrue(isinstance(self.config["nodes"][0],
qa_config._QaNode))
def testAcquireAndReleaseInstance(self):
self.assertFalse(compat.any(i.used for i in self.config["instances"]))
inst = qa_config.AcquireInstance(_cfg=self.config)
self.assertTrue(inst.used)
self.assertTrue(inst.disk_template is None)
inst.Release()
self.assertFalse(inst.used)
self.assertTrue(inst.disk_template is None)
self.assertFalse(compat.any(i.used for i in self.config["instances"]))
def testAcquireInstanceTooMany(self):
# Acquire all instances
for _ in range(len(self.config["instances"])):
inst = qa_config.AcquireInstance(_cfg=self.config)
self.assertTrue(inst.used)
self.assertTrue(inst.disk_template is None)
# The next acquisition must fail
self.assertRaises(qa_error.OutOfInstancesError,
qa_config.AcquireInstance, _cfg=self.config)
def testAcquireNodeNoneAdded(self):
self.assertFalse(compat.any(n.added for n in self.config["nodes"]))
# First call must return master node
node = qa_config.AcquireNode(_cfg=self.config)
self.assertEqual(node, self.config.GetMasterNode())
# Next call with exclusion list fails
self.assertRaises(qa_error.OutOfNodesError, qa_config.AcquireNode,
exclude=[node], _cfg=self.config)
def testAcquireNodeTooMany(self):
# Mark all nodes as marked (master excluded)
for node in self.config["nodes"]:
if node != self.config.GetMasterNode():
node.MarkAdded()
nodecount = len(self.config["nodes"])
self.assertTrue(nodecount > 1)
acquired = []
for _ in range(nodecount):
node = qa_config.AcquireNode(exclude=acquired, _cfg=self.config)
if node == self.config.GetMasterNode():
self.assertFalse(node.added)
else:
self.assertTrue(node.added)
self.assertEqual(node.use_count, 1)
acquired.append(node)
self.assertRaises(qa_error.OutOfNodesError, qa_config.AcquireNode,
exclude=acquired, _cfg=self.config)
def testAcquireNodeOrder(self):
# Mark all nodes as marked (master excluded)
for node in self.config["nodes"]:
if node != self.config.GetMasterNode():
node.MarkAdded()
nodecount = len(self.config["nodes"])
for iterations in [0, 1, 3, 100, 127, 7964]:
acquired = []
for i in range(iterations):
node = qa_config.AcquireNode(_cfg=self.config)
self.assertTrue(node.use_count > 0)
self.assertEqual(node.use_count, (i / nodecount + 1))
acquired.append((node.use_count, node.primary, node))
# Check if returned nodes were in correct order
key_fn = lambda (a, b, c): (a, utils.NiceSortKey(b), c)
self.assertEqual(acquired, sorted(acquired, key=key_fn))
# Release previously acquired nodes
qa_config.ReleaseManyNodes([a[2] for a in acquired])
# Check if nodes were actually released
for node in self.config["nodes"]:
self.assertEqual(node.use_count, 0)
self.assertTrue(node.added or node == self.config.GetMasterNode())
class TestRepresentation(unittest.TestCase):
def _Check(self, target, part):
self.assertTrue(part in repr(target).split())
def testQaInstance(self):
inst = qa_config._QaInstance("inst1.example.com", [])
self._Check(inst, "name=inst1.example.com")
self._Check(inst, "nicmac=[]")
# Default values
self._Check(inst, "disk_template=None")
self._Check(inst, "used=None")
# Use instance
inst.Use()
self._Check(inst, "used=True")
# Disk template
inst.SetDiskTemplate(constants.DT_DRBD8)
self._Check(inst, "disk_template=%s" % constants.DT_DRBD8)
# Release instance
inst.Release()
self._Check(inst, "used=False")
self._Check(inst, "disk_template=None")
def testQaNode(self):
node = qa_config._QaNode("primary.example.com", "192.0.2.1")
self._Check(node, "primary=primary.example.com")
self._Check(node, "secondary=192.0.2.1")
self._Check(node, "added=False")
self._Check(node, "use_count=0")
# Mark as added
node.MarkAdded()
self._Check(node, "added=True")
# Use node
for i in range(1, 5):
node.Use()
self._Check(node, "use_count=%s" % i)
# Release node
for i in reversed(range(1, 5)):
node.Release()
self._Check(node, "use_count=%s" % (i - 1))
self._Check(node, "use_count=0")
# Mark as added
node.MarkRemoved()
self._Check(node, "added=False")
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core import validators
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators as utils_validators
from openstack_dashboard import api
from openstack_dashboard.utils import filters
class CreateGroup(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"),
max_length=255,
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validators.validate_slug])
description = forms.CharField(label=_("Description"))
def handle(self, request, data):
try:
sg = api.network.security_group_create(request,
data['name'],
data['description'])
messages.success(request,
_('Successfully created security group: %s')
% data['name'])
return sg
except Exception:
redirect = reverse("horizon:project:access_and_security:index")
exceptions.handle(request,
_('Unable to create security group.'),
redirect=redirect)
class UpdateGroup(forms.SelfHandlingForm):
id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
max_length=255,
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validators.validate_slug])
description = forms.CharField(label=_("Description"))
def handle(self, request, data):
try:
sg = api.network.security_group_update(request,
data['id'],
data['name'],
data['description'])
messages.success(request,
_('Successfully updated security group: %s')
% data['name'])
return sg
except Exception:
redirect = reverse("horizon:project:access_and_security:index")
exceptions.handle(request,
_('Unable to update security group.'),
redirect=redirect)
class AddRule(forms.SelfHandlingForm):
id = forms.CharField(widget=forms.HiddenInput())
rule_menu = forms.ChoiceField(label=_('Rule'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'rule_menu'}))
# "direction" field is enabled only when custom mode.
# It is because most common rules in local_settings.py is meaningful
# when its direction is 'ingress'.
direction = forms.ChoiceField(
label=_('Direction'),
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-tcp': _('Direction'),
'data-rule_menu-udp': _('Direction'),
'data-rule_menu-icmp': _('Direction'),
'data-rule_menu-custom': _('Direction'),
'data-rule_menu-all_tcp': _('Direction'),
'data-rule_menu-all_udp': _('Direction'),
'data-rule_menu-all_icmp': _('Direction'),
}))
ip_protocol = forms.IntegerField(
label=_('IP Protocol'), required=False,
help_text=_("Enter an integer value between 0 and 255 "
"(or -1 which means wildcard)."),
validators=[utils_validators.validate_ip_protocol],
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-custom': _('IP Protocol')}))
port_or_range = forms.ChoiceField(
label=_('Open Port'),
choices=[('port', _('Port')),
('range', _('Port Range'))],
widget=forms.Select(attrs={
'class': 'switchable switched',
'data-slug': 'range',
'data-switch-on': 'rule_menu',
'data-rule_menu-tcp': _('Open Port'),
'data-rule_menu-udp': _('Open Port')}))
port = forms.IntegerField(label=_("Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-port': _('Port')}),
validators=[
utils_validators.validate_port_range])
from_port = forms.IntegerField(label=_("From Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('From Port')}),
validators=[
utils_validators.validate_port_range])
to_port = forms.IntegerField(label=_("To Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('To Port')}),
validators=[
utils_validators.validate_port_range])
icmp_type = forms.IntegerField(label=_("Type"),
required=False,
help_text=_("Enter a value for ICMP type "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-icmp': _('Type')}),
validators=[
utils_validators.validate_port_range])
icmp_code = forms.IntegerField(label=_("Code"),
required=False,
help_text=_("Enter a value for ICMP code "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-icmp': _('Code')}),
validators=[
utils_validators.validate_port_range])
remote = forms.ChoiceField(label=_('Remote'),
choices=[('cidr', _('CIDR')),
('sg', _('Security Group'))],
help_text=_('To specify an allowed IP '
'range, select "CIDR". To '
'allow access from all '
'members of another security '
'group select "Security '
'Group".'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'remote'}))
cidr = forms.IPField(label=_("CIDR"),
required=False,
initial="0.0.0.0/0",
help_text=_("Classless Inter-Domain Routing "
"(e.g. 192.168.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True,
widget=forms.TextInput(
attrs={'class': 'switched',
'data-switch-on': 'remote',
'data-remote-cidr': _('CIDR')}))
security_group = forms.ChoiceField(label=_('Security Group'),
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'remote',
'data-remote-sg': _('Security '
'Group')}))
# When cidr is used ethertype is determined from IP version of cidr.
# When source group, ethertype needs to be specified explicitly.
ethertype = forms.ChoiceField(label=_('Ether Type'),
required=False,
choices=[('IPv4', _('IPv4')),
('IPv6', _('IPv6'))],
widget=forms.Select(attrs={
'class': 'switched',
'data-slug': 'ethertype',
'data-switch-on': 'remote',
'data-remote-sg': _('Ether Type')}))
def __init__(self, *args, **kwargs):
sg_list = kwargs.pop('sg_list', [])
super(AddRule, self).__init__(*args, **kwargs)
# Determine if there are security groups available for the
# remote group option; add the choices and enable the option if so.
if sg_list:
security_groups_choices = sg_list
else:
security_groups_choices = [("", _("No security groups available"))]
self.fields['security_group'].choices = security_groups_choices
backend = api.network.security_group_backend(self.request)
rules_dict = getattr(settings, 'SECURITY_GROUP_RULES', [])
common_rules = [(k, rules_dict[k]['name'])
for k in rules_dict
if rules_dict[k].get('backend', backend) == backend]
common_rules.sort()
custom_rules = [('tcp', _('Custom TCP Rule')),
('udp', _('Custom UDP Rule')),
('icmp', _('Custom ICMP Rule'))]
if backend == 'neutron':
custom_rules.append(('custom', _('Other Protocol')))
self.fields['rule_menu'].choices = custom_rules + common_rules
self.rules = rules_dict
if backend == 'neutron':
self.fields['direction'].choices = [('ingress', _('Ingress')),
('egress', _('Egress'))]
else:
# direction and ethertype are not supported in Nova secgroup.
self.fields['direction'].widget = forms.HiddenInput()
self.fields['ethertype'].widget = forms.HiddenInput()
# ip_protocol field is to specify arbitrary protocol number
# and it is available only for neutron security group.
self.fields['ip_protocol'].widget = forms.HiddenInput()
def clean(self):
cleaned_data = super(AddRule, self).clean()
def update_cleaned_data(key, value):
cleaned_data[key] = value
self.errors.pop(key, None)
rule_menu = cleaned_data.get('rule_menu')
port_or_range = cleaned_data.get("port_or_range")
remote = cleaned_data.get("remote")
icmp_type = cleaned_data.get("icmp_type", None)
icmp_code = cleaned_data.get("icmp_code", None)
from_port = cleaned_data.get("from_port", None)
to_port = cleaned_data.get("to_port", None)
port = cleaned_data.get("port", None)
if rule_menu == 'icmp':
update_cleaned_data('ip_protocol', rule_menu)
if icmp_type is None:
msg = _('The ICMP type is invalid.')
raise ValidationError(msg)
if icmp_code is None:
msg = _('The ICMP code is invalid.')
raise ValidationError(msg)
if icmp_type not in range(-1, 256):
msg = _('The ICMP type not in range (-1, 255)')
raise ValidationError(msg)
if icmp_code not in range(-1, 256):
msg = _('The ICMP code not in range (-1, 255)')
raise ValidationError(msg)
update_cleaned_data('from_port', icmp_type)
update_cleaned_data('to_port', icmp_code)
update_cleaned_data('port', None)
elif rule_menu == 'tcp' or rule_menu == 'udp':
update_cleaned_data('ip_protocol', rule_menu)
update_cleaned_data('icmp_code', None)
update_cleaned_data('icmp_type', None)
if port_or_range == "port":
update_cleaned_data('from_port', port)
update_cleaned_data('to_port', port)
if port is None:
msg = _('The specified port is invalid.')
raise ValidationError(msg)
else:
update_cleaned_data('port', None)
if from_port is None:
msg = _('The "from" port number is invalid.')
raise ValidationError(msg)
if to_port is None:
msg = _('The "to" port number is invalid.')
raise ValidationError(msg)
if to_port < from_port:
msg = _('The "to" port number must be greater than '
'or equal to the "from" port number.')
raise ValidationError(msg)
elif rule_menu == 'custom':
pass
else:
cleaned_data['ip_protocol'] = self.rules[rule_menu]['ip_protocol']
cleaned_data['from_port'] = int(self.rules[rule_menu]['from_port'])
cleaned_data['to_port'] = int(self.rules[rule_menu]['to_port'])
if rule_menu not in ['all_tcp', 'all_udp', 'all_icmp']:
direction = self.rules[rule_menu].get('direction')
cleaned_data['direction'] = direction
# NOTE(amotoki): There are two cases where cleaned_data['direction']
# is empty: (1) Nova Security Group is used. Since "direction" is
# HiddenInput, direction field exists but its value is ''.
# (2) Template except all_* is used. In this case, the default value
# is None. To make sure 'direction' field has 'ingress' or 'egress',
# fill this field here if it is not specified.
if not cleaned_data['direction']:
cleaned_data['direction'] = 'ingress'
if remote == "cidr":
update_cleaned_data('security_group', None)
else:
update_cleaned_data('cidr', None)
# If cleaned_data does not contain cidr, cidr is already marked
# as invalid, so skip the further validation for cidr.
# In addition cleaned_data['cidr'] is None means source_group is used.
if 'cidr' in cleaned_data and cleaned_data['cidr'] is not None:
cidr = cleaned_data['cidr']
if not cidr:
msg = _('CIDR must be specified.')
self._errors['cidr'] = self.error_class([msg])
else:
# If cidr is specified, ethertype is determined from IP address
# version. It is used only when Neutron is enabled.
ip_ver = netaddr.IPNetwork(cidr).version
cleaned_data['ethertype'] = 'IPv6' if ip_ver == 6 else 'IPv4'
return cleaned_data
def handle(self, request, data):
try:
rule = api.network.security_group_rule_create(
request,
filters.get_int_or_uuid(data['id']),
data['direction'],
data['ethertype'],
data['ip_protocol'],
data['from_port'],
data['to_port'],
data['cidr'],
data['security_group'])
messages.success(request,
_('Successfully added rule: %s') % unicode(rule))
return rule
except Exception:
redirect = reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[data['id']])
exceptions.handle(request,
_('Unable to add rule to security group.'),
redirect=redirect)
|
|
"""Support for interface with a Gree climate systems."""
from __future__ import annotations
import logging
from greeclimate.device import (
TEMP_MAX,
TEMP_MAX_F,
TEMP_MIN,
TEMP_MIN_F,
FanSpeed,
HorizontalSwing,
Mode,
TemperatureUnits,
VerticalSwing,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
PRESET_SLEEP,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SWING_BOTH,
SWING_HORIZONTAL,
SWING_OFF,
SWING_VERTICAL,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_WHOLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
COORDINATORS,
DISPATCH_DEVICE_DISCOVERED,
DISPATCHERS,
DOMAIN,
FAN_MEDIUM_HIGH,
FAN_MEDIUM_LOW,
TARGET_TEMPERATURE_STEP,
)
_LOGGER = logging.getLogger(__name__)
HVAC_MODES = {
Mode.Auto: HVAC_MODE_AUTO,
Mode.Cool: HVAC_MODE_COOL,
Mode.Dry: HVAC_MODE_DRY,
Mode.Fan: HVAC_MODE_FAN_ONLY,
Mode.Heat: HVAC_MODE_HEAT,
}
HVAC_MODES_REVERSE = {v: k for k, v in HVAC_MODES.items()}
PRESET_MODES = [
PRESET_ECO, # Power saving mode
PRESET_AWAY, # Steady heat, or 8C mode on gree units
PRESET_BOOST, # Turbo mode
PRESET_NONE, # Default operating mode
PRESET_SLEEP, # Sleep mode
]
FAN_MODES = {
FanSpeed.Auto: FAN_AUTO,
FanSpeed.Low: FAN_LOW,
FanSpeed.MediumLow: FAN_MEDIUM_LOW,
FanSpeed.Medium: FAN_MEDIUM,
FanSpeed.MediumHigh: FAN_MEDIUM_HIGH,
FanSpeed.High: FAN_HIGH,
}
FAN_MODES_REVERSE = {v: k for k, v in FAN_MODES.items()}
SWING_MODES = [SWING_OFF, SWING_VERTICAL, SWING_HORIZONTAL, SWING_BOTH]
SUPPORTED_FEATURES = (
SUPPORT_TARGET_TEMPERATURE
| SUPPORT_FAN_MODE
| SUPPORT_PRESET_MODE
| SUPPORT_SWING_MODE
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Gree HVAC device from a config entry."""
@callback
def init_device(coordinator):
"""Register the device."""
async_add_entities([GreeClimateEntity(coordinator)])
for coordinator in hass.data[DOMAIN][COORDINATORS]:
init_device(coordinator)
hass.data[DOMAIN][DISPATCHERS].append(
async_dispatcher_connect(hass, DISPATCH_DEVICE_DISCOVERED, init_device)
)
class GreeClimateEntity(CoordinatorEntity, ClimateEntity):
"""Representation of a Gree HVAC device."""
def __init__(self, coordinator):
"""Initialize the Gree device."""
super().__init__(coordinator)
self._name = coordinator.device.device_info.name
self._mac = coordinator.device.device_info.mac
@property
def name(self) -> str:
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique id for the device."""
return self._mac
@property
def device_info(self) -> DeviceInfo:
"""Return device specific attributes."""
return DeviceInfo(
connections={(CONNECTION_NETWORK_MAC, self._mac)},
identifiers={(DOMAIN, self._mac)},
manufacturer="Gree",
name=self._name,
)
@property
def temperature_unit(self) -> str:
"""Return the temperature units for the device."""
units = self.coordinator.device.temperature_units
return TEMP_CELSIUS if units == TemperatureUnits.C else TEMP_FAHRENHEIT
@property
def precision(self) -> float:
"""Return the precision of temperature for the device."""
return PRECISION_WHOLE
@property
def current_temperature(self) -> float:
"""Return the reported current temperature for the device."""
return self.coordinator.device.current_temperature
@property
def target_temperature(self) -> float:
"""Return the target temperature for the device."""
return self.coordinator.device.target_temperature
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
if ATTR_TEMPERATURE not in kwargs:
raise ValueError(f"Missing parameter {ATTR_TEMPERATURE}")
temperature = kwargs[ATTR_TEMPERATURE]
_LOGGER.debug(
"Setting temperature to %d for %s",
temperature,
self._name,
)
self.coordinator.device.target_temperature = round(temperature)
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def min_temp(self) -> float:
"""Return the minimum temperature supported by the device."""
return TEMP_MIN if self.temperature_unit == TEMP_CELSIUS else TEMP_MIN_F
@property
def max_temp(self) -> float:
"""Return the maximum temperature supported by the device."""
return TEMP_MAX if self.temperature_unit == TEMP_CELSIUS else TEMP_MAX_F
@property
def target_temperature_step(self) -> float:
"""Return the target temperature step support by the device."""
return TARGET_TEMPERATURE_STEP
@property
def hvac_mode(self) -> str:
"""Return the current HVAC mode for the device."""
if not self.coordinator.device.power:
return HVAC_MODE_OFF
return HVAC_MODES.get(self.coordinator.device.mode)
async def async_set_hvac_mode(self, hvac_mode) -> None:
"""Set new target hvac mode."""
if hvac_mode not in self.hvac_modes:
raise ValueError(f"Invalid hvac_mode: {hvac_mode}")
_LOGGER.debug(
"Setting HVAC mode to %s for device %s",
hvac_mode,
self._name,
)
if hvac_mode == HVAC_MODE_OFF:
self.coordinator.device.power = False
await self.coordinator.push_state_update()
self.async_write_ha_state()
return
if not self.coordinator.device.power:
self.coordinator.device.power = True
self.coordinator.device.mode = HVAC_MODES_REVERSE.get(hvac_mode)
await self.coordinator.push_state_update()
self.async_write_ha_state()
async def async_turn_on(self) -> None:
"""Turn on the device."""
_LOGGER.debug("Turning on HVAC for device %s", self._name)
self.coordinator.device.power = True
await self.coordinator.push_state_update()
self.async_write_ha_state()
async def async_turn_off(self) -> None:
"""Turn off the device."""
_LOGGER.debug("Turning off HVAC for device %s", self._name)
self.coordinator.device.power = False
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def hvac_modes(self) -> list[str]:
"""Return the HVAC modes support by the device."""
modes = [*HVAC_MODES_REVERSE]
modes.append(HVAC_MODE_OFF)
return modes
@property
def preset_mode(self) -> str:
"""Return the current preset mode for the device."""
if self.coordinator.device.steady_heat:
return PRESET_AWAY
if self.coordinator.device.power_save:
return PRESET_ECO
if self.coordinator.device.sleep:
return PRESET_SLEEP
if self.coordinator.device.turbo:
return PRESET_BOOST
return PRESET_NONE
async def async_set_preset_mode(self, preset_mode):
"""Set new preset mode."""
if preset_mode not in PRESET_MODES:
raise ValueError(f"Invalid preset mode: {preset_mode}")
_LOGGER.debug(
"Setting preset mode to %s for device %s",
preset_mode,
self._name,
)
self.coordinator.device.steady_heat = False
self.coordinator.device.power_save = False
self.coordinator.device.turbo = False
self.coordinator.device.sleep = False
if preset_mode == PRESET_AWAY:
self.coordinator.device.steady_heat = True
elif preset_mode == PRESET_ECO:
self.coordinator.device.power_save = True
elif preset_mode == PRESET_BOOST:
self.coordinator.device.turbo = True
elif preset_mode == PRESET_SLEEP:
self.coordinator.device.sleep = True
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def preset_modes(self) -> list[str]:
"""Return the preset modes support by the device."""
return PRESET_MODES
@property
def fan_mode(self) -> str:
"""Return the current fan mode for the device."""
speed = self.coordinator.device.fan_speed
return FAN_MODES.get(speed)
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
if fan_mode not in FAN_MODES_REVERSE:
raise ValueError(f"Invalid fan mode: {fan_mode}")
self.coordinator.device.fan_speed = FAN_MODES_REVERSE.get(fan_mode)
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def fan_modes(self) -> list[str]:
"""Return the fan modes support by the device."""
return [*FAN_MODES_REVERSE]
@property
def swing_mode(self) -> str:
"""Return the current swing mode for the device."""
h_swing = self.coordinator.device.horizontal_swing == HorizontalSwing.FullSwing
v_swing = self.coordinator.device.vertical_swing == VerticalSwing.FullSwing
if h_swing and v_swing:
return SWING_BOTH
if h_swing:
return SWING_HORIZONTAL
if v_swing:
return SWING_VERTICAL
return SWING_OFF
async def async_set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
if swing_mode not in SWING_MODES:
raise ValueError(f"Invalid swing mode: {swing_mode}")
_LOGGER.debug(
"Setting swing mode to %s for device %s",
swing_mode,
self._name,
)
self.coordinator.device.horizontal_swing = HorizontalSwing.Center
self.coordinator.device.vertical_swing = VerticalSwing.FixedMiddle
if swing_mode in (SWING_BOTH, SWING_HORIZONTAL):
self.coordinator.device.horizontal_swing = HorizontalSwing.FullSwing
if swing_mode in (SWING_BOTH, SWING_VERTICAL):
self.coordinator.device.vertical_swing = VerticalSwing.FullSwing
await self.coordinator.push_state_update()
self.async_write_ha_state()
@property
def swing_modes(self) -> list[str]:
"""Return the swing modes currently supported for this device."""
return SWING_MODES
@property
def supported_features(self) -> int:
"""Return the supported features for this device integration."""
return SUPPORTED_FEATURES
|
|
"""Let's Encrypt main entry point."""
from __future__ import print_function
import atexit
import functools
import logging.handlers
import os
import sys
import time
import traceback
import zope.component
from acme import jose
import letsencrypt
from letsencrypt import account
from letsencrypt import client
from letsencrypt import cli
from letsencrypt import crypto_util
from letsencrypt import colored_logging
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import hooks
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt import log
from letsencrypt import reporter
from letsencrypt import renewal
from letsencrypt import storage
from letsencrypt.display import util as display_util, ops as display_ops
from letsencrypt.plugins import disco as plugins_disco
from letsencrypt.plugins import selection as plug_sel
logger = logging.getLogger(__name__)
def _suggest_donation_if_appropriate(config, action):
"""Potentially suggest a donation to support Let's Encrypt."""
if config.staging or config.verb == "renew":
# --dry-run implies --staging
return
if action not in ["renew", "newcert"]:
return
reporter_util = zope.component.getUtility(interfaces.IReporter)
msg = ("If you like Let's Encrypt, please consider supporting our work by:\n\n"
"Donating to ISRG / Let's Encrypt: https://letsencrypt.org/donate\n"
"Donating to EFF: https://eff.org/donate-le\n\n")
reporter_util.add_message(msg, reporter_util.LOW_PRIORITY)
def _report_successful_dry_run(config):
reporter_util = zope.component.getUtility(interfaces.IReporter)
if config.verb != "renew":
reporter_util.add_message("The dry run was successful.",
reporter_util.HIGH_PRIORITY, on_crash=False)
def _auth_from_domains(le_client, config, domains, lineage=None):
"""Authenticate and enroll certificate."""
# Note: This can raise errors... caught above us though. This is now
# a three-way case: reinstall (which results in a no-op here because
# although there is a relevant lineage, we don't do anything to it
# inside this function -- we don't obtain a new certificate), renew
# (which results in treating the request as a renewal), or newcert
# (which results in treating the request as a new certificate request).
# If lineage is specified, use that one instead of looking around for
# a matching one.
if lineage is None:
# This will find a relevant matching lineage that exists
action, lineage = _treat_as_renewal(config, domains)
else:
# Renewal, where we already know the specific lineage we're
# interested in
action = "renew"
if action == "reinstall":
# The lineage already exists; allow the caller to try installing
# it without getting a new certificate at all.
return lineage, "reinstall"
hooks.pre_hook(config)
try:
if action == "renew":
renewal.renew_cert(config, domains, le_client, lineage)
elif action == "newcert":
# TREAT AS NEW REQUEST
lineage = le_client.obtain_and_enroll_certificate(domains)
if lineage is False:
raise errors.Error("Certificate could not be obtained")
finally:
hooks.post_hook(config)
if not config.dry_run and not config.verb == "renew":
_report_new_cert(lineage.cert, lineage.fullchain)
return lineage, action
def _handle_subset_cert_request(config, domains, cert):
"""Figure out what to do if a previous cert had a subset of the names now requested
:param storage.RenewableCert cert:
:returns: Tuple of (str action, cert_or_None) as per _treat_as_renewal
action can be: "newcert" | "renew" | "reinstall"
:rtype: tuple
"""
existing = ", ".join(cert.names())
question = (
"You have an existing certificate that contains a portion of "
"the domains you requested (ref: {0}){br}{br}It contains these "
"names: {1}{br}{br}You requested these names for the new "
"certificate: {2}.{br}{br}Do you want to expand and replace this existing "
"certificate with the new certificate?"
).format(cert.configfile.filename,
existing,
", ".join(domains),
br=os.linesep)
if config.expand or config.renew_by_default or zope.component.getUtility(
interfaces.IDisplay).yesno(question, "Expand", "Cancel",
cli_flag="--expand"):
return "renew", cert
else:
reporter_util = zope.component.getUtility(interfaces.IReporter)
reporter_util.add_message(
"To obtain a new certificate that contains these names without "
"replacing your existing certificate for {0}, you must use the "
"--duplicate option.{br}{br}"
"For example:{br}{br}{1} --duplicate {2}".format(
existing,
sys.argv[0], " ".join(sys.argv[1:]),
br=os.linesep
),
reporter_util.HIGH_PRIORITY)
raise errors.Error(
"User chose to cancel the operation and may "
"reinvoke the client.")
def _handle_identical_cert_request(config, cert):
"""Figure out what to do if a cert has the same names as a previously obtained one
:param storage.RenewableCert cert:
:returns: Tuple of (str action, cert_or_None) as per _treat_as_renewal
action can be: "newcert" | "renew" | "reinstall"
:rtype: tuple
"""
if renewal.should_renew(config, cert):
return "renew", cert
if config.reinstall:
# Set with --reinstall, force an identical certificate to be
# reinstalled without further prompting.
return "reinstall", cert
question = (
"You have an existing certificate that contains exactly the same "
"domains you requested and isn't close to expiry."
"{br}(ref: {0}){br}{br}What would you like to do?"
).format(cert.configfile.filename, br=os.linesep)
if config.verb == "run":
keep_opt = "Attempt to reinstall this existing certificate"
elif config.verb == "certonly":
keep_opt = "Keep the existing certificate for now"
choices = [keep_opt,
"Renew & replace the cert (limit ~5 per 7 days)"]
display = zope.component.getUtility(interfaces.IDisplay)
response = display.menu(question, choices, "OK", "Cancel", default=0)
if response[0] == display_util.CANCEL:
# TODO: Add notification related to command-line options for
# skipping the menu for this case.
raise errors.Error(
"User chose to cancel the operation and may "
"reinvoke the client.")
elif response[1] == 0:
return "reinstall", cert
elif response[1] == 1:
return "renew", cert
else:
assert False, "This is impossible"
def _treat_as_renewal(config, domains):
"""Determine whether there are duplicated names and how to handle
them (renew, reinstall, newcert, or raising an error to stop
the client run if the user chooses to cancel the operation when
prompted).
:returns: Two-element tuple containing desired new-certificate behavior as
a string token ("reinstall", "renew", or "newcert"), plus either
a RenewableCert instance or None if renewal shouldn't occur.
:raises .Error: If the user would like to rerun the client again.
"""
# Considering the possibility that the requested certificate is
# related to an existing certificate. (config.duplicate, which
# is set with --duplicate, skips all of this logic and forces any
# kind of certificate to be obtained with renewal = False.)
if config.duplicate:
return "newcert", None
# TODO: Also address superset case
ident_names_cert, subset_names_cert = _find_duplicative_certs(config, domains)
# XXX ^ schoen is not sure whether that correctly reads the systemwide
# configuration file.
if ident_names_cert is None and subset_names_cert is None:
return "newcert", None
if ident_names_cert is not None:
return _handle_identical_cert_request(config, ident_names_cert)
elif subset_names_cert is not None:
return _handle_subset_cert_request(config, domains, subset_names_cert)
def _find_duplicative_certs(config, domains):
"""Find existing certs that duplicate the request."""
identical_names_cert, subset_names_cert = None, None
cli_config = configuration.RenewerConfiguration(config)
configs_dir = cli_config.renewal_configs_dir
# Verify the directory is there
le_util.make_or_verify_dir(configs_dir, mode=0o755, uid=os.geteuid())
for renewal_file in renewal.renewal_conf_files(cli_config):
try:
candidate_lineage = storage.RenewableCert(renewal_file, cli_config)
except (errors.CertStorageError, IOError):
logger.warning("Renewal conf file %s is broken. Skipping.", renewal_file)
logger.debug("Traceback was:\n%s", traceback.format_exc())
continue
# TODO: Handle these differently depending on whether they are
# expired or still valid?
candidate_names = set(candidate_lineage.names())
if candidate_names == set(domains):
identical_names_cert = candidate_lineage
elif candidate_names.issubset(set(domains)):
# This logic finds and returns the largest subset-names cert
# in the case where there are several available.
if subset_names_cert is None:
subset_names_cert = candidate_lineage
elif len(candidate_names) > len(subset_names_cert.names()):
subset_names_cert = candidate_lineage
return identical_names_cert, subset_names_cert
def _find_domains(config, installer):
if config.domains:
domains = config.domains
else:
domains = display_ops.choose_names(installer)
if not domains:
raise errors.Error("Please specify --domains, or --installer that "
"will help in domain names autodiscovery")
return domains
def _report_new_cert(cert_path, fullchain_path):
"""Reports the creation of a new certificate to the user.
:param str cert_path: path to cert
:param str fullchain_path: path to full chain
"""
expiry = crypto_util.notAfter(cert_path).date()
reporter_util = zope.component.getUtility(interfaces.IReporter)
if fullchain_path:
# Print the path to fullchain.pem because that's what modern webservers
# (Nginx and Apache2.4) will want.
and_chain = "and chain have"
path = fullchain_path
else:
# Unless we're in .csr mode and there really isn't one
and_chain = "has "
path = cert_path
# XXX Perhaps one day we could detect the presence of known old webservers
# and say something more informative here.
msg = ("Congratulations! Your certificate {0} been saved at {1}."
" Your cert will expire on {2}. To obtain a new version of the "
"certificate in the future, simply run Let's Encrypt again."
.format(and_chain, path, expiry))
reporter_util.add_message(msg, reporter_util.MEDIUM_PRIORITY)
def _determine_account(config):
"""Determine which account to use.
In order to make the renewer (configuration de/serialization) happy,
if ``config.account`` is ``None``, it will be updated based on the
user input. Same for ``config.email``.
:param argparse.Namespace config: CLI arguments
:param letsencrypt.interface.IConfig config: Configuration object
:param .AccountStorage account_storage: Account storage.
:returns: Account and optionally ACME client API (biproduct of new
registration).
:rtype: `tuple` of `letsencrypt.account.Account` and
`acme.client.Client`
"""
account_storage = account.AccountFileStorage(config)
acme = None
if config.account is not None:
acc = account_storage.load(config.account)
else:
accounts = account_storage.find_all()
if len(accounts) > 1:
acc = display_ops.choose_account(accounts)
elif len(accounts) == 1:
acc = accounts[0]
else: # no account registered yet
if config.email is None and not config.register_unsafely_without_email:
config.namespace.email = display_ops.get_email()
def _tos_cb(regr):
if config.tos:
return True
msg = ("Please read the Terms of Service at {0}. You "
"must agree in order to register with the ACME "
"server at {1}".format(
regr.terms_of_service, config.server))
obj = zope.component.getUtility(interfaces.IDisplay)
return obj.yesno(msg, "Agree", "Cancel", cli_flag="--agree-tos")
try:
acc, acme = client.register(
config, account_storage, tos_cb=_tos_cb)
except errors.MissingCommandlineFlag:
raise
except errors.Error as error:
logger.debug(error, exc_info=True)
raise errors.Error(
"Unable to register an account with ACME server")
config.namespace.account = acc.id
return acc, acme
def _init_le_client(config, authenticator, installer):
if authenticator is not None:
# if authenticator was given, then we will need account...
acc, acme = _determine_account(config)
logger.debug("Picked account: %r", acc)
# XXX
#crypto_util.validate_key_csr(acc.key)
else:
acc, acme = None, None
return client.Client(config, acc, authenticator, installer, acme=acme)
def install(config, plugins):
"""Install a previously obtained cert in a server."""
# XXX: Update for renewer/RenewableCert
# FIXME: be consistent about whether errors are raised or returned from
# this function ...
try:
installer, _ = plug_sel.choose_configurator_plugins(config, plugins, "install")
except errors.PluginSelectionError as e:
return e.message
domains = _find_domains(config, installer)
le_client = _init_le_client(config, authenticator=None, installer=installer)
assert config.cert_path is not None # required=True in the subparser
le_client.deploy_certificate(
domains, config.key_path, config.cert_path, config.chain_path,
config.fullchain_path)
le_client.enhance_config(domains, config)
def plugins_cmd(config, plugins): # TODO: Use IDisplay rather than print
"""List server software plugins."""
logger.debug("Expected interfaces: %s", config.ifaces)
ifaces = [] if config.ifaces is None else config.ifaces
filtered = plugins.visible().ifaces(ifaces)
logger.debug("Filtered plugins: %r", filtered)
if not config.init and not config.prepare:
print(str(filtered))
return
filtered.init(config)
verified = filtered.verify(ifaces)
logger.debug("Verified plugins: %r", verified)
if not config.prepare:
print(str(verified))
return
verified.prepare()
available = verified.available()
logger.debug("Prepared plugins: %s", available)
print(str(available))
def rollback(config, plugins):
"""Rollback server configuration changes made during install."""
client.rollback(config.installer, config.checkpoints, config, plugins)
def config_changes(config, unused_plugins):
"""Show changes made to server config during installation
View checkpoints and associated configuration changes.
"""
client.view_config_changes(config, num=config.num)
def revoke(config, unused_plugins): # TODO: coop with renewal config
"""Revoke a previously obtained certificate."""
# For user-agent construction
config.namespace.installer = config.namespace.authenticator = "None"
if config.key_path is not None: # revocation by cert key
logger.debug("Revoking %s using cert key %s",
config.cert_path[0], config.key_path[0])
key = jose.JWK.load(config.key_path[1])
else: # revocation by account key
logger.debug("Revoking %s using Account Key", config.cert_path[0])
acc, _ = _determine_account(config)
key = acc.key
acme = client.acme_from_config_key(config, key)
cert = crypto_util.pyopenssl_load_certificate(config.cert_path[1])[0]
acme.revoke(jose.ComparableX509(cert))
def run(config, plugins): # pylint: disable=too-many-branches,too-many-locals
"""Obtain a certificate and install."""
# TODO: Make run as close to auth + install as possible
# Possible difficulties: config.csr was hacked into auth
try:
installer, authenticator = plug_sel.choose_configurator_plugins(config, plugins, "run")
except errors.PluginSelectionError as e:
return e.message
domains = _find_domains(config, installer)
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(config, authenticator, installer)
lineage, action = _auth_from_domains(le_client, config, domains)
le_client.deploy_certificate(
domains, lineage.privkey, lineage.cert,
lineage.chain, lineage.fullchain)
le_client.enhance_config(domains, config)
if len(lineage.available_versions("cert")) == 1:
display_ops.success_installation(domains)
else:
display_ops.success_renewal(domains, action)
_suggest_donation_if_appropriate(config, action)
def _csr_obtain_cert(config, le_client):
"""Obtain a cert using a user-supplied CSR
This works differently in the CSR case (for now) because we don't
have the privkey, and therefore can't construct the files for a lineage.
So we just save the cert & chain to disk :/
"""
csr, typ = config.actual_csr
certr, chain = le_client.obtain_certificate_from_csr(config.domains, csr, typ)
if config.dry_run:
logger.info(
"Dry run: skipping saving certificate to %s", config.cert_path)
else:
cert_path, _, cert_fullchain = le_client.save_certificate(
certr, chain, config.cert_path, config.chain_path, config.fullchain_path)
_report_new_cert(cert_path, cert_fullchain)
def obtain_cert(config, plugins, lineage=None):
"""Authenticate & obtain cert, but do not install it.
This implements the 'certonly' subcommand, and is also called from within the
'renew' command."""
# SETUP: Select plugins and construct a client instance
try:
# installers are used in auth mode to determine domain names
installer, auth = plug_sel.choose_configurator_plugins(config, plugins, "certonly")
except errors.PluginSelectionError as e:
logger.info("Could not choose appropriate plugin: %s", e)
raise
le_client = _init_le_client(config, auth, installer)
# SHOWTIME: Possibly obtain/renew a cert, and set action to renew | newcert | reinstall
if config.csr is None: # the common case
domains = _find_domains(config, installer)
_, action = _auth_from_domains(le_client, config, domains, lineage)
else:
assert lineage is None, "Did not expect a CSR with a RenewableCert"
_csr_obtain_cert(config, le_client)
action = "newcert"
# POSTPRODUCTION: Cleanup, deployment & reporting
notify = zope.component.getUtility(interfaces.IDisplay).notification
if config.dry_run:
_report_successful_dry_run(config)
elif config.verb == "renew":
if installer is None:
notify("new certificate deployed without reload, fullchain is {0}".format(
lineage.fullchain), pause=False)
else:
# In case of a renewal, reload server to pick up new certificate.
# In principle we could have a configuration option to inhibit this
# from happening.
installer.restart()
notify("new certificate deployed with reload of {0} server; fullchain is {1}".format(
config.installer, lineage.fullchain), pause=False)
elif action == "reinstall" and config.verb == "certonly":
notify("Certificate not yet due for renewal; no action taken.")
_suggest_donation_if_appropriate(config, action)
def renew(config, unused_plugins):
"""Renew previously-obtained certificates."""
try:
renewal.renew_all_lineages(config)
finally:
hooks.post_hook(config, final=True)
def setup_log_file_handler(config, logfile, fmt):
"""Setup file debug logging."""
log_file_path = os.path.join(config.logs_dir, logfile)
handler = logging.handlers.RotatingFileHandler(
log_file_path, maxBytes=2 ** 20, backupCount=10)
# rotate on each invocation, rollover only possible when maxBytes
# is nonzero and backupCount is nonzero, so we set maxBytes as big
# as possible not to overrun in single CLI invocation (1MB).
handler.doRollover() # TODO: creates empty letsencrypt.log.1 file
handler.setLevel(logging.DEBUG)
handler_formatter = logging.Formatter(fmt=fmt)
handler_formatter.converter = time.gmtime # don't use localtime
handler.setFormatter(handler_formatter)
return handler, log_file_path
def _cli_log_handler(config, level, fmt):
if config.text_mode or config.noninteractive_mode or config.verb == "renew":
handler = colored_logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt))
else:
handler = log.DialogHandler()
# dialog box is small, display as less as possible
handler.setFormatter(logging.Formatter("%(message)s"))
handler.setLevel(level)
return handler
def setup_logging(config, cli_handler_factory, logfile):
"""Setup logging."""
fmt = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
level = -config.verbose_count * 10
file_handler, log_file_path = setup_log_file_handler(
config, logfile=logfile, fmt=fmt)
cli_handler = cli_handler_factory(config, level, fmt)
# TODO: use fileConfig?
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG) # send all records to handlers
root_logger.addHandler(cli_handler)
root_logger.addHandler(file_handler)
logger.debug("Root logging level set at %d", level)
logger.info("Saving debug log to %s", log_file_path)
def _handle_exception(exc_type, exc_value, trace, config):
"""Logs exceptions and reports them to the user.
Config is used to determine how to display exceptions to the user. In
general, if config.debug is True, then the full exception and traceback is
shown to the user, otherwise it is suppressed. If config itself is None,
then the traceback and exception is attempted to be written to a logfile.
If this is successful, the traceback is suppressed, otherwise it is shown
to the user. sys.exit is always called with a nonzero status.
"""
logger.debug(
"Exiting abnormally:%s%s",
os.linesep,
"".join(traceback.format_exception(exc_type, exc_value, trace)))
if issubclass(exc_type, Exception) and (config is None or not config.debug):
if config is None:
logfile = "letsencrypt.log"
try:
with open(logfile, "w") as logfd:
traceback.print_exception(
exc_type, exc_value, trace, file=logfd)
except: # pylint: disable=bare-except
sys.exit("".join(
traceback.format_exception(exc_type, exc_value, trace)))
if issubclass(exc_type, errors.Error):
sys.exit(exc_value)
else:
# Here we're passing a client or ACME error out to the client at the shell
# Tell the user a bit about what happened, without overwhelming
# them with a full traceback
err = traceback.format_exception_only(exc_type, exc_value)[0]
# Typical error from the ACME module:
# acme.messages.Error: urn:acme:error:malformed :: The request message was
# malformed :: Error creating new registration :: Validation of contact
# mailto:none@longrandomstring.biz failed: Server failure at resolver
if (("urn:acme" in err and ":: " in err and
config.verbose_count <= cli.flag_default("verbose_count"))):
# prune ACME error code, we have a human description
_code, _sep, err = err.partition(":: ")
msg = "An unexpected error occurred:\n" + err + "Please see the "
if config is None:
msg += "logfile '{0}' for more details.".format(logfile)
else:
msg += "logfiles in {0} for more details.".format(config.logs_dir)
sys.exit(msg)
else:
sys.exit("".join(
traceback.format_exception(exc_type, exc_value, trace)))
def main(cli_args=sys.argv[1:]):
"""Command line argument parsing and main script execution."""
sys.excepthook = functools.partial(_handle_exception, config=None)
plugins = plugins_disco.PluginsRegistry.find_all()
# note: arg parser internally handles --help (and exits afterwards)
args = cli.prepare_and_parse_args(plugins, cli_args)
config = configuration.NamespaceConfig(args)
zope.component.provideUtility(config)
# Setup logging ASAP, otherwise "No handlers could be found for
# logger ..." TODO: this should be done before plugins discovery
for directory in config.config_dir, config.work_dir:
le_util.make_or_verify_dir(
directory, constants.CONFIG_DIRS_MODE, os.geteuid(),
"--strict-permissions" in cli_args)
# TODO: logs might contain sensitive data such as contents of the
# private key! #525
le_util.make_or_verify_dir(
config.logs_dir, 0o700, os.geteuid(), "--strict-permissions" in cli_args)
setup_logging(config, _cli_log_handler, logfile='letsencrypt.log')
logger.debug("letsencrypt version: %s", letsencrypt.__version__)
# do not log `config`, as it contains sensitive data (e.g. revoke --key)!
logger.debug("Arguments: %r", cli_args)
logger.debug("Discovered plugins: %r", plugins)
sys.excepthook = functools.partial(_handle_exception, config=config)
# Displayer
if config.quiet:
config.noninteractive_mode = True
displayer = display_util.NoninteractiveDisplay(open(os.devnull, "w"))
elif config.noninteractive_mode:
displayer = display_util.NoninteractiveDisplay(sys.stdout)
elif config.text_mode:
displayer = display_util.FileDisplay(sys.stdout)
elif config.verb == "renew":
config.noninteractive_mode = True
displayer = display_util.NoninteractiveDisplay(sys.stdout)
else:
displayer = display_util.NcursesDisplay()
zope.component.provideUtility(displayer)
# Reporter
report = reporter.Reporter(config)
zope.component.provideUtility(report)
atexit.register(report.atexit_print_messages)
return config.func(config, plugins)
if __name__ == "__main__":
err_string = main()
if err_string:
logger.warn("Exiting with message %s", err_string)
sys.exit(err_string) # pragma: no cover
|
|
# -*- coding: utf-8 -*-
from subprocess import Popen, PIPE
from os.path import join, isfile
from re import search, DOTALL
from tests.functional_tests import isolate, run_tuttle_file, tuttle_invalidate
from tuttle.invalidation import BROTHER_INVALID
class TestCommands:
@isolate(['A'])
def test_command_invalidate(self):
""" Should display a message if there is no tuttlefile in the current directory"""
project = """file://B <- file://A
echo A creates B
echo A creates B > B
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0
assert isfile('B')
rcode, output = tuttle_invalidate(urls=['file://B'])
assert rcode == 0, output
assert output.find('* file://B') >= 0, output
assert not isfile('B'), output
@isolate(['A'])
def test_command_invalidate_with_dependencies(self):
""" Should display a message if there is no tuttlefile in the current directory"""
project = """file://B <- file://A
echo A creates B
echo A creates B > B
file://C <- file://B
echo A creates C
echo A creates C > C
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0
assert isfile('B')
assert isfile('C')
rcode, output = tuttle_invalidate(urls=['file://B'])
assert rcode == 0, output
assert output.find('* file://B') >= 0, output
assert output.find('* file://C') >= 0, output
assert not isfile('B'), output
assert not isfile('C'), output
@isolate(['A'])
def test_duration(self):
""" Should display a message if there is no tuttlefile in the current directory"""
project = """file://B <- file://A
echo A creates B
python -c "import time; time.sleep(1)"
echo A creates B > B
file://C <- file://B
echo A creates C
python -c "import time; time.sleep(1.2)"
echo A creates C > C
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0, output
assert isfile('B')
assert isfile('C')
rcode, output = tuttle_invalidate(urls=['file://B'])
assert rcode == 0, output
assert output.find('* file://B') >= 0, output
assert output.find('* file://C') >= 0, output
assert output.find('s of processing will be lost') >= 0, output
assert output.find('\n0s') == -1, output
assert not isfile('B'), output
assert not isfile('C'), output
@isolate
def test_invalidate_no_tuttle_file(self):
""" Should display a message when launching invalidate and there is tuttlefile in the current directory"""
proc = Popen(['tuttle', 'invalidate', 'file://B'], stdout=PIPE)
output = proc.stdout.read()
rcode = proc.wait()
assert rcode == 2, output
assert output.find('No tuttlefile') >= 0, output
@isolate
def test_invalidate_nothing_have_run(self):
""" Should display a message when launching invalidate and tuttle hasn't been run before :
nothing to invalidate """
project = """file://B <- file://A
echo A creates B
echo A creates B > B
"""
rcode, output = tuttle_invalidate(project=project)
assert rcode == 2, output
assert output.find("Tuttle has not run yet ! It has produced nothing, "
"so there is nothing to invalidate.") >= 0, output
@isolate(['A'])
def test_try_invalidate_bad_project(self):
""" Should display a message if the tuttlefile is incorrect"""
project = """file://B <- file://A
echo A produces B
echo A produces B > B
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0
bad_project = """file://B <- file://A bad
echo A produces B
echo A produces B > B
"""
rcode, output = tuttle_invalidate(project=bad_project, urls=['file://B'])
assert rcode == 2, output
assert output.find('Invalidation has failed because tuttlefile is has errors') >= 0, output
@isolate(['A'])
def test_invalidate_no_urls(self):
""" Should remove everything that is not in the last version of the tuttlefile """
project = """file://B <- file://A
echo A produces B
echo A produces B > B
file://C <- file://B
echo B produces C
echo B produces C > C
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0
new_project = """file://B <- file://A
echo A produces B
echo A produces B > B
"""
rcode, output = tuttle_invalidate(project=new_project)
assert rcode == 0, output
assert output.find('* file://C') >= 0, output
assert output.find('no longer created') >= 0, output
@isolate(['A'])
def test_invalid_url_should_fail(self):
""" Should display an error if the url passed in parameter is not valid or unknown scheme """
project = """file://B <- file://A
echo A produces B
echo A produces B > B
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0
rcode, output = tuttle_invalidate(urls=['error://B'])
assert rcode == 2, output
assert output.find("'error://B'") >= 0, output
@isolate(['A'])
def test_unknown_resource_should_be_ignored(self):
""" Should display a message if there is no tuttlefile in the current directory"""
project = """file://B <- file://A
echo A produces B
echo A produces B > B
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0
rcode, output = tuttle_invalidate(urls=['file://C'])
assert rcode == 0, output
assert output.find("Ignoring file://C") >= 0, output
@isolate(['A'])
def test_not_produced_resource_should_be_ignored(self):
""" Should display a message if there is no tuttlefile in the current directory"""
project = """file://B <- file://A
echo A produces B
echo A produces B > B
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0
project = """file://B <- file://A
echo A produces B
echo A produces B > B
file://C <- file://B
echo B produces C
echo B produces C > C
"""
rcode, output = tuttle_invalidate(project=project, urls=['file://C'])
assert rcode == 0, output
assert output.find("Ignoring file://C : this resource has not been produced yet") >= 0, output
@isolate(['A'])
def test_invalidate_an_output_should_invalidate_all_outputs(self):
""" Should invalidate all outputs if one is invalidated """
project = """file://B file://C <- file://A
echo A produces B
echo A produces B > B
echo A produces C
echo A produces C > C
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0
rcode, output = tuttle_invalidate(urls=['file://C'])
assert rcode == 0, output
assert output.find("* file://B") >= 0, output
assert output.find("* file://C") >= 0, output
assert output.find(BROTHER_INVALID.format("file://C")) >= 0, output
@isolate(['A'])
def test_new_primary_resources_should_not_be_invalidated(self):
""" A primary resource that was produced with previous workflow shouldn't invalidate dependencies
if it hasn't changed"""
project = """file://B <- file://A
echo A produces B
echo A produces B > B
file://C <- file://B
echo B produces C
echo B produces C > C
"""
rcode, output = run_tuttle_file(project)
print output
assert rcode == 0, output
project = """
file://C <- file://B
echo B produces C
echo B produces C > C
"""
rcode, output = tuttle_invalidate(project=project)
assert rcode == 0, output
assert output.find("Report has been updated to reflect") >= 0, output
@isolate(['A'])
def test_modified_new_primary_resources_should_invalidate_dependencies(self):
""" If a resource has become a primary resource, but signature has not changed
that was produced with previous workflow shouldn't invalidate dependencies
if it hasn't changed"""
project = """file://B <- file://A
echo A produces B
echo A produces B > B
file://C <- file://B
echo B produces C
echo B produces C > C
"""
rcode, output = run_tuttle_file(project)
print output
assert rcode == 0, output
with open('B', "w") as f:
f.write("Another B")
project = """
file://C <- file://B
echo B produces C
echo B produces C > C
"""
rcode, output = tuttle_invalidate(project=project)
assert rcode == 0, output
assert output.find("file://C") >= 0, output
@isolate(['A'])
def test_not_modified_new_primary_resources_should_not_invalidate_dependencies(self):
""" If a resource has become a primary resource, but signature has not changed
that was produced with previous workflow shouldn't invalidate dependencies
if it hasn't changed"""
project = """file://B <- file://A
echo A produces B
echo A produces B > B
file://C <- file://B
echo B produces C
echo B produces C > C
"""
rcode, output = run_tuttle_file(project)
print output
assert rcode == 0, output
project = """
file://C <- file://B
echo B produces C
echo B produces C > C
"""
rcode, output = tuttle_invalidate(project=project)
assert rcode == 0, output
assert output.find("Report has been updated to reflect") >= 0, output
@isolate(['A'])
def test_adding_an_output_invalidates_process(self):
""" Adding an ingutput to a process that have succeeded should invalidate the whole process,
thus invalidate all other resources """
# TODO : Really ?
project = """file://B <- file://A
echo A produces B
echo A produces B > B
echo A produces C
echo A produces C > C
"""
rcode, output = run_tuttle_file(project)
print output
assert rcode == 0, output
project = """file://B file://C<- file://A
echo A produces B
echo A produces B > B
echo A produces C
echo A produces C > C
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0, output
assert output.find("file://B") >= 0, output
assert output.find("A produces B") >= 0, output
assert output.find("A produces C") >= 0, output
@isolate(['A', 'B'])
def test_removing_an_output_invalidates_process(self):
""" Removing an output to a process that have succeeded should invalidate the whole process,
thus invalidating all resources """
project = """file://B file://C <- file://A
echo A produces B
echo A produces B > B
echo A produces C
echo A produces C > C
"""
rcode, output = run_tuttle_file(project)
print output
assert rcode == 0, output
project = """file://B <- file://A
echo A produces B
echo A produces B > B
echo A produces C
echo A produces C > C
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0, output
assert output.find("file://B") >= 0, output
assert output.find("file://C") >= 0, output
assert output.find("A produces B") >= 0, output
assert output.find("A produces C") >= 0, output
@isolate(['A'])
def test_workflow_must_be_run_after_resource_invalidation(self):
""" After invalidation of a resource, tuttle run should re-produce this resource """
project = """file://B <- file://A
echo A produces B
echo A produces B > B
file://C <- file://B
echo B produces C
echo B produces C > C
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0, output
rcode, output = tuttle_invalidate(urls=["file://C"])
assert rcode == 0, output
assert output.find("file://C") >= 0, output
rcode, output = run_tuttle_file(project)
assert output.find("Nothing to do") == -1, output
assert output.find("B produces C") >= 0, output
@isolate(['A'])
def test_workflow_must_run_after_invalidation_because_of_an_error(self):
""" If a process fails, it can be invalidated then run again (from bug) """
project = """file://B <- file://A
echo A produces B
echo A produces B > B
file://C <- file://B
ERROR
"""
rcode, output = run_tuttle_file(project)
assert rcode == 2, output
rcode, output = tuttle_invalidate()
assert rcode == 0, output
rcode, output = run_tuttle_file(project)
# If if we get here, it means no exception was raised
assert rcode == 2, output
@isolate(['A'])
def test_workflow_must_be_run_after_resource_invalidation_in_cascade(self):
""" After invalidation of a resource, tuttle run should re-produce this resource and the dependencies"""
project = """file://B <- file://A
echo A produces B
echo A produces B > B
"""
rcode, output = run_tuttle_file(project)
assert rcode == 0, output
rcode, output = tuttle_invalidate(urls=["file://A"])
assert rcode == 0, output
assert output.find("Ignoring file://A") >= 0, output
@isolate(['A'])
def test_process_in_error_should_be_invalidated(self):
""" If a process failed, its dependencies should be invalidated """
project = """file://B <- file://A
echo A produces B
echo A produces B > B
an error
"""
rcode, output = run_tuttle_file(project)
print output
assert rcode == 2, output
assert isfile('B')
rcode, output = tuttle_invalidate(project=project)
assert rcode == 0, output
assert output.find("file://B") >= 0, output
assert not isfile('B'), output
@isolate(['A'])
def test_a_failing_process_without_output_should_be_invalidated(self):
""" When a process fail, Tuttle should exit with status code 2, even if the process has no outputs"""
project = """file://B <- file://A
echo A produces B
echo B > B
<- file://B
error
echo This should not be written
echo C > C
"""
rcode, output = run_tuttle_file(project)
assert rcode == 2
assert isfile('B')
assert not isfile('C')
report_path = join('.tuttle', 'report.html')
assert isfile(report_path)
report = open(report_path).read()
title_match_failure = search(r'<h1>.*Failure.*</h1>', report, DOTALL)
assert title_match_failure, report
rcode, output = tuttle_invalidate()
assert rcode == 0
report = open(report_path).read()
title_match_failure = search(r'<h1>.*Failure.*</h1>', report, DOTALL)
assert not title_match_failure, title_match_failure.group()
@isolate(['A', 'B'])
def test_dont_invalidate_outputless_process(self):
""" Don't invalidate a successful process without outputs(from bug) """
first = """file://C <- file://A
echo A produces C > C
<- file://B
echo Action after B is created
"""
rcode, output = run_tuttle_file(first)
assert rcode == 0, output
rcode, output = tuttle_invalidate()
assert rcode == 0, output
rcode, output = run_tuttle_file(first)
assert rcode == 0
assert output.find("Nothing to do") >= 0, output
assert output.find("Action") == -1, output
@isolate(['A'])
def test_changes_in_the_graph_without_removing_resource(self):
""" If the graph changes without removing resource tuttle should display a message
event if the removed resource is used elsewhere (from bug) """
first = """ <- file://A
echo Action after A is created.
file://B <- file://A
echo B > B
file://C <- file://B
echo C > C
"""
rcode, output = run_tuttle_file(first)
print output
assert rcode == 0, output
second = """ <- file://A
echo Action after A is created.
file://C <- file://B
echo C > C
"""
rcode, output = tuttle_invalidate(project=second)
assert rcode == 0, output
assert output.find("Report has been updated to reflect") >= 0, output
@isolate(['A', 'B'])
def test_dont_mess_up_with_outputless_process(self):
""" Successful outputless process must not run again, even if some other
process have the same input (from bug) """
first = """file://C <- file://A
echo A produces C > C
<- file://A
echo Action from A
"""
rcode, output = run_tuttle_file(first)
assert rcode == 0, output
rcode, output = run_tuttle_file(first)
assert rcode == 0
assert output.find("Nothing to do") >= 0, output
assert output.find("Action") == -1, output
@isolate(['A'])
def test_changes_in_a_process_invalidates_depending_failing_process_on_run(self):
""" If a process failed, changing a process that it depends on should
invalidate it before running (from bug) """
first = """ file://B <- file://A
echo A produces B > B
file://C <- file://B
echo B produces invalid C
echo B produces invalid C > C
error
"""
rcode, output = run_tuttle_file(first)
print output
assert rcode == 2, output
second = """file://B <- file://A
echo A produces another B > B
file://C <- file://B
echo B produces invalid C
echo B produces invalid C > C
error
"""
rcode, output = run_tuttle_file(second)
assert rcode == 2, output
assert output.find("* file://B") >= 0, output
assert output.find("B produces invalid C") >= 0, output
@isolate(['A'])
def test_changes_in_a_process_invalidates_depending_failing_process_on_invalidate(self):
""" If a process failed, changing a process that it depends on should
invalidate it before running (from bug) """
first = """ file://B <- file://A
echo A produces B > B
file://C <- file://B
echo B produces invalid C
echo B produces invalid C > C
error
"""
rcode, output = run_tuttle_file(first)
print output
assert rcode == 2, output
second = """file://B <- file://A
echo A produces another B > B
file://C <- file://B
echo B produces invalid C
echo B produces invalid C > C
error
"""
rcode, output = run_tuttle_file(second)
assert rcode == 2, output
assert output.find("* file://B") >= 0, output
assert output.find("* file://C") >= 0, output
@isolate(['A'])
def test_changes_in_resource_invalidates_failing_process(self):
""" If a process failed, changing input should invalidate it before running (from bug) """
project = """ file://B <- file://A
echo A produces B > B
error
"""
rcode, output = run_tuttle_file(project)
print output
assert rcode == 2, output
open('A', 'w').write('modfified')
rcode, output = run_tuttle_file(project)
assert rcode == 2, output
assert output.find("* file://B") >= 0, output
assert output.find("Fix the process and run tuttle again") == -1, output
|
|
'''Tokens and tokenization
Created on 24 august 2012
@author: F. Peschanski
'''
from copy import deepcopy
from collections import defaultdict
from popparser.llparser import ParsePosition
class Token:
def __init__(self, token_type, value, start_pos, end_pos):
self.token_type = token_type
self.value = value
self.start_pos = start_pos
self.end_pos = end_pos
@property
def iseof(self):
return False
@property
def iserror(self):
return False
def __str__(self):
return str(self.value)
def __repr__(self):
return "Token(token_type='{0}', value='{1}', "\
"start_pos={2}, end_pos={3})"\
.format(self.token_type, self.value,
self.start_pos, self.end_pos)
class EOFToken(Token):
def __init__(self, pos):
Token.__init__(self, '<<EOF>>', '<<EOF>>', pos, pos)
@property
def iseof(self):
return True
@property
def pos(self):
return self.start_pos
def __str__(self):
return '<<EOF>>'
def __repr__(self):
return "EOFToken(start_pos={0}, end_pos={1})"\
.format(self.start_pos, self.end_pos)
class ErrorToken(Token):
def __init__(self, message, pos):
Token.__init__(self, '<<ERROR>>', message, pos, pos)
@property
def message(self):
return str(self.value)
@property
def iserror(self):
return True
def __str__(self):
return "Token error: " + self.message
def __repr__(self):
return "ErrorToken(token_type='{0}', msg='{1}', "\
"start_pos={2}, end_pos={3})"\
.format(self.token_type, self.value,
self.start_pos, self.end_pos)
class Tokenizer:
def __init__(self):
self.__token_rules = {} # dict[str,List[TokenRule]]
self.__none_rules = [] # rules with no lookup available
self.__backend = None
self.reset()
def reset(self):
self.pos = ParsePosition()
self.lines = defaultdict() # dict[int,ParsePosition]
@property
def backend(self):
return self.__backend
@backend.setter
def backend(self, backend_):
self.backend = backend_
def from_string(self, string):
self.__backend = StrTokenizer(self, string)
self.reset()
@property
def position(self):
return self.pos
def add_rule(self, token_rule):
if token_rule.lookups is None:
self.__none_rules.append(token_rule)
else:
for lookup in token_rule.lookups:
if lookup not in self.__token_rules:
rules = []
self.__token_rules[lookup] = rules
else:
rules = self.__token_rules[lookup]
rules.append(token_rule)
def forward(self):
char = self.peek_char()
if char is None:
return False # cannot advance forward
if char == '\n':
self.lines[self.pos.offset] = self.pos
self.pos = self.pos.next_line()
else:
self.pos = self.pos.next_char()
return True
def forwards(self, nb):
if nb < 0:
return self.backwards(-nb)
saved_pos = self.pos
saved_lines = deepcopy(self.lines)
for _ in range(nb):
moved = self.forward()
if not moved:
self.pos = saved_pos
self.lines = saved_lines
return False
# end of for
return True
def backward(self):
if self.pos.offset == 0:
return False
try:
prev = self.lines[self.pos.offset - 1]
self.pos = prev
del self.lines[self.pos.offset]
return True
except KeyError:
# not an end of line
self.pos = self.pos.prev_char()
return True
def backwards(self, nb):
if nb < 0:
return self.forwards(-nb)
saved_pos = self.pos
saved_lines = deepcopy(self.lines)
for _ in range(nb):
moved = self.backward()
if not moved:
self.pos = saved_pos
self.lines = saved_lines
return False
# end of for
return True
def peek_char(self):
if not self.__backend:
raise NotImplementedError("No backend")
else:
return self.__backend.peek_char()
def peek_line(self):
if not self.__backend:
raise NotImplementedError("No backend")
else:
return self.__backend.peek_line()
@property
def at_eof(self):
return self.peek_char() is None
def next_char(self):
char = self.peek_char()
if char is None:
return None
self.forward()
return char
def consume(self, string):
saved_pos = self.pos
saved_lines = deepcopy(self.lines)
for char in string:
next_char = self.peek_char()
if (next_char is None) or (next_char != char):
self.pos = saved_pos
self.lines = saved_lines
return False
# ok, same char
self.forward()
# end of for, the string has been consumed
return True
def put_back(self, token):
self.backwards(token.end_pos.offset - token.start_pos.offset)
#XXX: check needed ?
#if self.peek() != token:
# raise ValueError("Wrong token to put back")
def next(self):
'''Return the next token.
'''
lookup = self.peek_char()
if lookup is None:
return EOFToken(self.pos)
if lookup in self.__token_rules:
rules = self.__token_rules[lookup] + self.__none_rules
else:
rules = self.__none_rules
for rule in rules:
token = rule.recognize(self)
if token is not None:
return token
return ErrorToken(repr(lookup), self.pos)
def peek(self):
saved_pos = self.pos
saved_lines = deepcopy(self.lines)
token = self.next()
self.pos = saved_pos
self.lines = saved_lines
return token
def substring(self, start_offset, end_offset):
return self.__backend.substring(start_offset, end_offset)
def __str__(self):
msg = ""
msg += str(self.pos.line_pos)
msg += ": "
if self.pos.line_pos in self.lines:
start_offset = self.lines[self.pos.line_pos]
else:
start_offset = 0
end_offset = self.pos.offset
msg += self.substring(start_offset, end_offset)
msg += "_"
msg += self.peek_line()
return msg
def __repr__(self):
return "<Tokenizer: " + str(self) + ">"
class TokenizerBackend:
pass
class StrTokenizer(TokenizerBackend):
def __init__(self, tokenizer, string):
self.tokenizer = tokenizer
self.string = string
def peek_char(self):
if self.tokenizer.pos.offset > len(self.string) - 1:
return None
return self.string[self.tokenizer.pos.offset]
def peek_line(self):
line = None
offset = self.tokenizer.pos.offset
lenstr = len(self.string)
while True:
if offset >= lenstr:
break
if line is None:
line = ""
char = self.string[offset]
if char == '\n':
break
line += char
offset += 1
# at the end of the line
return line
def substring(self, start_offset, end_offset):
return self.string[start_offset:end_offset]
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plot loss curves from saved CSV files for the influence balancing experiment.
Example:
--------
python plot_toy_regression.py
"""
import os
import csv
import pdb
import pickle as pkl
from collections import defaultdict
import numpy as np
import scipy.ndimage
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
sns.set_style('white')
sns.set_palette('bright')
# Lighter colors
mycolors = [
'#FF349B',
'#18DF29',
'#674DEA',
'#FF8031',
'#02D4F9',
'#4F4C4B',
]
sns.set_palette(mycolors)
sns.palplot(sns.color_palette())
figure_dir = 'figures/toy_regression'
if not os.path.exists(figure_dir):
os.makedirs(figure_dir)
# Plotting from saved CSV files
def load_log(exp_dir, log_filename='iteration.csv'):
result_dict = defaultdict(list)
with open(os.path.join(exp_dir, log_filename), newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for key in row:
try:
if key in ['global_iteration', 'iteration', 'epoch']:
result_dict[key].append(int(row[key]))
else:
result_dict[key].append(float(row[key]))
except:
pass
return result_dict
def plot_heatmap(pkl_path,
xlabel,
ylabel,
smoothed=False,
sigma=5.0,
cmap=plt.cm.viridis,
colorbar=True,
figsize=(10, 8)):
with open(pkl_path, 'rb') as f:
heatmap_data = pkl.load(f)
if smoothed:
smoothed_F_grid = scipy.ndimage.gaussian_filter(
heatmap_data['L_grid'], sigma=sigma)
best_smoothed_theta = np.unravel_index(smoothed_F_grid.argmin(),
smoothed_F_grid.shape)
best_smoothed_x = heatmap_data['xv'][best_smoothed_theta]
best_smoothed_y = heatmap_data['yv'][best_smoothed_theta]
plt.figure(figsize=figsize)
cmesh = plt.pcolormesh(
heatmap_data['xv'],
heatmap_data['yv'],
heatmap_data['L_grid'],
vmin=5e2,
vmax=1e4,
norm=colors.LogNorm(),
cmap=cmap)
if colorbar:
cbar = plt.colorbar(cmesh)
cbar.ax.tick_params(labelsize=18)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel(xlabel, fontsize=22)
plt.ylabel(ylabel, fontsize=22)
else:
plt.figure(figsize=figsize)
cmesh = plt.pcolormesh(
heatmap_data['xv'],
heatmap_data['yv'],
heatmap_data['L_grid'],
vmin=5e2,
vmax=1e4,
norm=colors.LogNorm(),
cmap=cmap,
linewidth=0,
rasterized=True)
if colorbar:
cbar = plt.colorbar(cmesh)
cbar.ax.tick_params(labelsize=18)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel(xlabel, fontsize=22)
plt.ylabel(ylabel, fontsize=22)
tbptt_k10 = load_log(
'saves/toy_regression/tbptt-sum-lr:linear--4.0,-4.0-optim:adam-lr:0.01-T:100-K:10-Nc:1-Npc:100-sigma:0.3-seed:3'
)
rtrl_k10 = load_log(
'saves/toy_regression/rtrl-sum-lr:linear--4.0,-4.0-optim:adam-lr:0.01-T:100-K:10-Nc:1-Npc:100-sigma:0.3-seed:3'
)
uoro_k10 = load_log(
'saves/toy_regression/uoro-sum-lr:linear--4.0,-4.0-optim:adam-lr:0.01-T:100-K:10-Nc:1-Npc:100-sigma:0.3-seed:3'
)
es_k10 = load_log(
'saves/toy_regression/es-sum-lr:linear--4.0,-4.0-optim:adam-lr:0.01-T:100-K:10-Nc:1-Npc:100-sigma:1.0-seed:3'
)
pes_k10 = load_log(
'saves/toy_regression/pes-sum-lr:linear--4.0,-4.0-optim:adam-lr:0.01-T:100-K:10-Nc:1-Npc:100-sigma:0.3-seed:3'
)
plot_heatmap(
'saves/toy_regression/sgd_lr:linear_sum_T_100_N_2000_grid.pkl',
xlabel='Initial Log-LR',
ylabel='Final Log-LR',
smoothed=False,
cmap=plt.cm.Purples_r,
colorbar=True,
figsize=(8, 5))
plt.plot(
np.array(tbptt_k10['theta0']),
np.array(tbptt_k10['theta1']),
linewidth=3,
label='TBPTT')
plt.plot(
np.array(uoro_k10['theta0']),
np.array(uoro_k10['theta1']),
linewidth=3,
label='UORO')
plt.plot(
np.array(rtrl_k10['theta0']),
np.array(rtrl_k10['theta1']),
linewidth=3,
label='RTRL')
plt.plot(
np.array(es_k10['theta0']),
np.array(es_k10['theta1']),
linewidth=3,
label='ES')
plt.plot(
np.array(pes_k10['theta0']),
np.array(pes_k10['theta1']),
linewidth=3,
label='PES')
plt.legend(fontsize=18, fancybox=True, framealpha=0.3, loc='upper left')
plt.savefig(
os.path.join(figure_dir, 'toy_regression_heatmap.png'),
bbox_inches='tight',
pad_inches=0,
dpi=300)
plt.savefig(
os.path.join(figure_dir, 'toy_regression_heatmap.pdf'),
bbox_inches='tight',
pad_inches=0)
# =============================================================================
plt.figure(figsize=(6,4))
plt.plot(
tbptt_k10['inner_problem_steps'],
tbptt_k10['L'],
linewidth=3,
label='TBPTT')
plt.plot(
uoro_k10['inner_problem_steps'], uoro_k10['L'], linewidth=3, label='UORO')
plt.plot(
rtrl_k10['inner_problem_steps'], rtrl_k10['L'], linewidth=3, label='RTRL')
plt.plot(es_k10['inner_problem_steps'], es_k10['L'], linewidth=3, label='ES')
plt.plot(pes_k10['inner_problem_steps'], pes_k10['L'], linewidth=3, label='PES')
plt.xscale('log')
plt.xticks(fontsize=18)
plt.yticks([500, 1000, 1500, 2000, 2500], fontsize=18)
plt.xlabel('Inner Iterations', fontsize=20)
plt.ylabel('Meta Objective', fontsize=20)
plt.legend(fontsize=18, fancybox=True, framealpha=0.3)
sns.despine()
plt.savefig(
os.path.join(figure_dir, 'toy_regression_meta_obj.png'),
bbox_inches='tight',
pad_inches=0)
plt.savefig(
os.path.join(figure_dir, 'toy_regression_meta_obj.pdf'),
bbox_inches='tight',
pad_inches=0)
|
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import os
import tempfile
import unittest
from StringIO import StringIO
from webkitpy.common.checkout.changelog import *
class ChangeLogTest(unittest.TestCase):
_example_entry = u'''2009-08-17 Peter Kasting <pkasting@google.com>
Reviewed by Tor Arne Vestb\xf8.
https://bugs.webkit.org/show_bug.cgi?id=27323
Only add Cygwin to the path when it isn't already there. This avoids
causing problems for people who purposefully have non-Cygwin versions of
executables like svn in front of the Cygwin ones in their paths.
* DumpRenderTree/win/DumpRenderTree.vcproj:
* DumpRenderTree/win/ImageDiff.vcproj:
* DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj:
'''
_rolled_over_footer = '== Rolled over to ChangeLog-2009-06-16 =='
# More example text than we need. Eventually we need to support parsing this all and write tests for the parsing.
_example_changelog = u"""2009-08-17 Tor Arne Vestb\xf8 <vestbo@webkit.org>
<http://webkit.org/b/28393> check-webkit-style: add check for use of std::max()/std::min() instead of MAX()/MIN()
Reviewed by David Levin.
* Scripts/modules/cpp_style.py:
(_ERROR_CATEGORIES): Added 'runtime/max_min_macros'.
(check_max_min_macros): Added. Returns level 4 error when MAX()
and MIN() macros are used in header files and C++ source files.
(check_style): Added call to check_max_min_macros().
* Scripts/modules/cpp_style_unittest.py: Added unit tests.
(test_max_macro): Added.
(test_min_macro): Added.
2009-08-16 David Kilzer <ddkilzer@apple.com>
Backed out r47343 which was mistakenly committed
* Scripts/bugzilla-tool:
* Scripts/modules/scm.py:
2009-06-18 Darin Adler <darin@apple.com>
Rubber stamped by Mark Rowe.
* DumpRenderTree/mac/DumpRenderTreeWindow.mm:
(-[DumpRenderTreeWindow close]): Resolved crashes seen during regression
tests. The close method can be called on a window that's already closed
so we can't assert here.
2011-11-04 Benjamin Poulain <bpoulain@apple.com>
[Mac] ResourceRequest's nsURLRequest() does not differentiate null and empty URLs with CFNetwork
https://bugs.webkit.org/show_bug.cgi?id=71539
Reviewed by David Kilzer.
In order to have CFURL and NSURL to be consistent when both are used on Mac,
KURL::createCFURL() is changed to support empty URL values.
* This change log entry is made up to test _parse_entry:
* a list of things
* platform/cf/KURLCFNet.cpp:
(WebCore::createCFURLFromBuffer):
(WebCore::KURL::createCFURL):
* platform/mac/KURLMac.mm :
(WebCore::KURL::operator NSURL *):
(WebCore::KURL::createCFURL):
* WebCoreSupport/ChromeClientEfl.cpp:
(WebCore::ChromeClientEfl::closeWindowSoon): call new function and moves its
previous functionality there.
* ewk/ewk_private.h:
* ewk/ewk_view.cpp:
2011-03-02 Carol Szabo <carol.szabo@nokia.com>
Reviewed by David Hyatt <hyatt@apple.com>
content property doesn't support quotes
https://bugs.webkit.org/show_bug.cgi?id=6503
Added full support for quotes as defined by CSS 2.1.
Tests: fast/css/content/content-quotes-01.html
fast/css/content/content-quotes-02.html
fast/css/content/content-quotes-03.html
fast/css/content/content-quotes-04.html
fast/css/content/content-quotes-05.html
fast/css/content/content-quotes-06.html
2011-03-31 Brent Fulgham <bfulgham@webkit.org>
Reviewed Adam Roben.
[WinCairo] Implement Missing drawWindowsBitmap method.
https://bugs.webkit.org/show_bug.cgi?id=57409
2011-03-28 Dirk Pranke <dpranke@chromium.org>
RS=Tony Chang.
r81977 moved FontPlatformData.h from
WebCore/platform/graphics/cocoa to platform/graphics. This
change updates the chromium build accordingly.
https://bugs.webkit.org/show_bug.cgi?id=57281
* platform/graphics/chromium/CrossProcessFontLoading.mm:
2011-05-04 Alexis Menard <alexis.menard@openbossa.org>
Unreviewed warning fix.
The variable is just used in the ASSERT macro. Let's use ASSERT_UNUSED to avoid
a warning in Release build.
* accessibility/AccessibilityRenderObject.cpp:
(WebCore::lastChildConsideringContinuation):
2011-10-11 Antti Koivisto <antti@apple.com>
Resolve regular and visited link style in a single pass
https://bugs.webkit.org/show_bug.cgi?id=69838
Reviewed by Darin Adler
We can simplify and speed up selector matching by removing the recursive matching done
to generate the style for the :visited pseudo selector. Both regular and visited link style
can be generated in a single pass through the style selector.
== Rolled over to ChangeLog-2009-06-16 ==
"""
def test_parse_bug_id_from_changelog(self):
commit_text = '''
2011-03-23 Ojan Vafai <ojan@chromium.org>
Add failing result for WebKit2. All tests that require
focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988.
* platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added.
'''
self.assertEqual(56988, parse_bug_id_from_changelog(commit_text))
commit_text = '''
2011-03-23 Ojan Vafai <ojan@chromium.org>
Add failing result for WebKit2. All tests that require
focus fail on WebKit2. See https://bugs.webkit.org/show_bug.cgi?id=56988.
https://bugs.webkit.org/show_bug.cgi?id=12345
* platform/mac-wk2/fast/css/pseudo-any-expected.txt: Added.
'''
self.assertEqual(12345, parse_bug_id_from_changelog(commit_text))
commit_text = '''
2011-03-31 Adam Roben <aroben@apple.com>
Quote the executable path we pass to ::CreateProcessW
This will ensure that spaces in the path will be interpreted correctly.
Fixes <http://webkit.org/b/57569> Web process sometimes fails to launch when there are
spaces in its path
Reviewed by Steve Falkenburg.
* UIProcess/Launcher/win/ProcessLauncherWin.cpp:
(WebKit::ProcessLauncher::launchProcess): Surround the executable path in quotes.
'''
self.assertEqual(57569, parse_bug_id_from_changelog(commit_text))
commit_text = '''
2011-03-29 Timothy Hatcher <timothy@apple.com>
Update WebCore Localizable.strings to contain WebCore, WebKit/mac and WebKit2 strings.
https://webkit.org/b/57354
Reviewed by Sam Weinig.
* English.lproj/Localizable.strings: Updated.
* StringsNotToBeLocalized.txt: Removed. To hard to maintain in WebCore.
* platform/network/cf/LoaderRunLoopCF.h: Remove a single quote in an #error so
extract-localizable-strings does not complain about unbalanced single quotes.
'''
self.assertEqual(57354, parse_bug_id_from_changelog(commit_text))
def test_parse_log_entries_from_changelog(self):
changelog_file = StringIO(self._example_changelog)
parsed_entries = list(ChangeLog.parse_entries_from_file(changelog_file))
self.assertEqual(len(parsed_entries), 9)
self.assertEqual(parsed_entries[0].reviewer_text(), "David Levin")
self.assertEqual(parsed_entries[1].author_email(), "ddkilzer@apple.com")
self.assertEqual(parsed_entries[2].reviewer_text(), "Mark Rowe")
self.assertEqual(parsed_entries[2].touched_files(), ["DumpRenderTree/mac/DumpRenderTreeWindow.mm"])
self.assertEqual(parsed_entries[3].author_name(), "Benjamin Poulain")
self.assertEqual(parsed_entries[3].touched_files(), ["platform/cf/KURLCFNet.cpp", "platform/mac/KURLMac.mm",
"WebCoreSupport/ChromeClientEfl.cpp", "ewk/ewk_private.h", "ewk/ewk_view.cpp"])
self.assertEqual(parsed_entries[4].reviewer_text(), "David Hyatt")
self.assertEqual(parsed_entries[5].reviewer_text(), "Adam Roben")
self.assertEqual(parsed_entries[6].reviewer_text(), "Tony Chang")
self.assertEqual(parsed_entries[7].reviewer_text(), None)
self.assertEqual(parsed_entries[8].reviewer_text(), 'Darin Adler')
def test_parse_log_entries_from_annotated_file(self):
# Note that there are trailing spaces on some of the lines intentionally.
changelog_file = StringIO(u"100000 ossy@webkit.org 2011-11-11 Csaba Osztrogon\u00e1c <ossy@webkit.org>\n"
u"100000 ossy@webkit.org\n"
u"100000 ossy@webkit.org 100,000 !!!\n"
u"100000 ossy@webkit.org \n"
u"100000 ossy@webkit.org Reviewed by Zoltan Herczeg.\n"
u"100000 ossy@webkit.org \n"
u"100000 ossy@webkit.org * ChangeLog: Point out revision 100,000.\n"
u"100000 ossy@webkit.org \n"
u"93798 ap@apple.com 2011-08-25 Alexey Proskuryakov <ap@apple.com>\n"
u"93798 ap@apple.com \n"
u"93798 ap@apple.com Fix build when GCC 4.2 is not installed.\n"
u"93798 ap@apple.com \n"
u"93798 ap@apple.com * gtest/xcode/Config/CompilerVersion.xcconfig: Copied from Source/WebCore/Configurations/CompilerVersion.xcconfig.\n"
u"93798 ap@apple.com * gtest/xcode/Config/General.xcconfig:\n"
u"93798 ap@apple.com Use the same compiler version as other projects do.\n"
u"93798 ap@apple.com\n"
u"99491 andreas.kling@nokia.com 2011-11-03 Andreas Kling <kling@webkit.org>\n"
u"99491 andreas.kling@nokia.com \n"
u"99190 andreas.kling@nokia.com Unreviewed build fix, sigh.\n"
u"99190 andreas.kling@nokia.com \n"
u"99190 andreas.kling@nokia.com * css/CSSFontFaceRule.h:\n"
u"99190 andreas.kling@nokia.com * css/CSSMutableStyleDeclaration.h:\n"
u"99190 andreas.kling@nokia.com\n"
u"99190 andreas.kling@nokia.com 2011-11-03 Andreas Kling <kling@webkit.org>\n"
u"99190 andreas.kling@nokia.com \n"
u"99187 andreas.kling@nokia.com Unreviewed build fix, out-of-line StyleSheet::parentStyleSheet()\n"
u"99187 andreas.kling@nokia.com again since there's a cycle in the includes between CSSRule/StyleSheet.\n"
u"99187 andreas.kling@nokia.com \n"
u"99187 andreas.kling@nokia.com * css/StyleSheet.cpp:\n"
u"99187 andreas.kling@nokia.com (WebCore::StyleSheet::parentStyleSheet):\n"
u"99187 andreas.kling@nokia.com * css/StyleSheet.h:\n"
u"99187 andreas.kling@nokia.com \n")
parsed_entries = list(ChangeLog.parse_entries_from_file(changelog_file))
self.assertEqual(parsed_entries[0].revision(), 100000)
self.assertEqual(parsed_entries[0].reviewer_text(), "Zoltan Herczeg")
self.assertEqual(parsed_entries[0].author_name(), u"Csaba Osztrogon\u00e1c")
self.assertEqual(parsed_entries[0].author_email(), "ossy@webkit.org")
self.assertEqual(parsed_entries[1].revision(), 93798)
self.assertEqual(parsed_entries[1].author_name(), "Alexey Proskuryakov")
self.assertEqual(parsed_entries[2].revision(), 99190)
self.assertEqual(parsed_entries[2].author_name(), "Andreas Kling")
self.assertEqual(parsed_entries[3].revision(), 99187)
self.assertEqual(parsed_entries[3].author_name(), "Andreas Kling")
def _assert_parse_reviewer_text_and_list(self, text, expected_reviewer_text, expected_reviewer_text_list=None):
reviewer_text, reviewer_text_list = ChangeLogEntry._parse_reviewer_text(text)
self.assertEqual(reviewer_text, expected_reviewer_text)
if expected_reviewer_text_list:
self.assertEqual(reviewer_text_list, expected_reviewer_text_list)
else:
self.assertEqual(reviewer_text_list, [expected_reviewer_text])
def _assert_parse_reviewer_text_list(self, text, expected_reviewer_text_list):
reviewer_text, reviewer_text_list = ChangeLogEntry._parse_reviewer_text(text)
self.assertEqual(reviewer_text_list, expected_reviewer_text_list)
def test_parse_reviewer_text(self):
self._assert_parse_reviewer_text_and_list(' reviewed by Ryosuke Niwa, Oliver Hunt, and Dimitri Glazkov',
'Ryosuke Niwa, Oliver Hunt, and Dimitri Glazkov', ['Ryosuke Niwa', 'Oliver Hunt', 'Dimitri Glazkov'])
self._assert_parse_reviewer_text_and_list('Reviewed by Brady Eidson and David Levin, landed by Brady Eidson',
'Brady Eidson and David Levin', ['Brady Eidson', 'David Levin'])
self._assert_parse_reviewer_text_and_list('Reviewed by Simon Fraser. Committed by Beth Dakin.', 'Simon Fraser')
self._assert_parse_reviewer_text_and_list('Reviewed by Geoff Garen. V8 fixes courtesy of Dmitry Titov.', 'Geoff Garen')
self._assert_parse_reviewer_text_and_list('Reviewed by Adam Roben&Dirk Schulze', 'Adam Roben&Dirk Schulze', ['Adam Roben', 'Dirk Schulze'])
self._assert_parse_reviewer_text_and_list('Rubber stamps by Darin Adler & Sam Weinig.', 'Darin Adler & Sam Weinig', ['Darin Adler', 'Sam Weinig'])
self._assert_parse_reviewer_text_and_list('Reviewed by adam,andy and andy adam, andy smith',
'adam,andy and andy adam, andy smith', ['adam', 'andy', 'andy adam', 'andy smith'])
self._assert_parse_reviewer_text_and_list('rubber stamped by Oliver Hunt (oliver@apple.com) and Darin Adler (darin@apple.com)',
'Oliver Hunt and Darin Adler', ['Oliver Hunt', 'Darin Adler'])
self._assert_parse_reviewer_text_and_list('rubber Stamped by David Hyatt <hyatt@apple.com>', 'David Hyatt')
self._assert_parse_reviewer_text_and_list('Rubber-stamped by Antti Koivisto.', 'Antti Koivisto')
self._assert_parse_reviewer_text_and_list('Rubberstamped by Dan Bernstein.', 'Dan Bernstein')
self._assert_parse_reviewer_text_and_list('Reviews by Ryosuke Niwa', 'Ryosuke Niwa')
self._assert_parse_reviewer_text_and_list('Reviews Ryosuke Niwa', 'Ryosuke Niwa')
self._assert_parse_reviewer_text_and_list('Rubberstamp Ryosuke Niwa', 'Ryosuke Niwa')
self._assert_parse_reviewer_text_and_list('Typed and reviewed by Alexey Proskuryakov.', 'Alexey Proskuryakov')
self._assert_parse_reviewer_text_and_list('Reviewed and landed by Brady Eidson', 'Brady Eidson')
self._assert_parse_reviewer_text_and_list('Reviewed by rniwa@webkit.org.', 'rniwa@webkit.org')
self._assert_parse_reviewer_text_and_list('Reviewed by Dirk Schulze / Darin Adler.', 'Dirk Schulze / Darin Adler', ['Dirk Schulze', 'Darin Adler'])
self._assert_parse_reviewer_text_and_list('Reviewed by Sam Weinig + Oliver Hunt.', 'Sam Weinig + Oliver Hunt', ['Sam Weinig', 'Oliver Hunt'])
self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig, and given a good once-over by Jeff Miller.', ['Sam Weinig', 'Jeff Miller'])
self._assert_parse_reviewer_text_list(' Reviewed by Sam Weinig, even though this is just a...', ['Sam Weinig'])
self._assert_parse_reviewer_text_list('Rubber stamped by by Gustavo Noronha Silva', ['Gustavo Noronha Silva'])
self._assert_parse_reviewer_text_list('Rubberstamped by Noam Rosenthal, who wrote the original code.', ['Noam Rosenthal'])
self._assert_parse_reviewer_text_list('Reviewed by Dan Bernstein (relanding of r47157)', ['Dan Bernstein'])
self._assert_parse_reviewer_text_list('Reviewed by Geoffrey "Sean/Shawn/Shaun" Garen', ['Geoffrey Garen'])
self._assert_parse_reviewer_text_list('Reviewed by Dave "Messy" Hyatt.', ['Dave Hyatt'])
self._assert_parse_reviewer_text_list('Reviewed by Sam \'The Belly\' Weinig', ['Sam Weinig'])
self._assert_parse_reviewer_text_list('Rubber-stamped by David "I\'d prefer not" Hyatt.', ['David Hyatt'])
self._assert_parse_reviewer_text_list('Reviewed by Mr. Geoffrey Garen.', ['Geoffrey Garen'])
self._assert_parse_reviewer_text_list('Reviewed by Darin (ages ago)', ['Darin'])
self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig (except for a few comment and header tweaks).', ['Sam Weinig'])
self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig (all but the FormDataListItem rename)', ['Sam Weinig'])
self._assert_parse_reviewer_text_list('Reviewed by Darin Adler, tweaked and landed by Beth.', ['Darin Adler'])
self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig with no hesitation', ['Sam Weinig'])
self._assert_parse_reviewer_text_list('Reviewed by Oliver Hunt, okayed by Darin Adler.', ['Oliver Hunt'])
self._assert_parse_reviewer_text_list('Reviewed by Darin Adler).', ['Darin Adler'])
# For now, we let unofficial reviewers recognized as reviewers
self._assert_parse_reviewer_text_list('Reviewed by Sam Weinig, Anders Carlsson, and (unofficially) Adam Barth.',
['Sam Weinig', 'Anders Carlsson', 'Adam Barth'])
self._assert_parse_reviewer_text_list('Reviewed by NOBODY.', None)
self._assert_parse_reviewer_text_list('Reviewed by NOBODY - Build Fix.', None)
self._assert_parse_reviewer_text_list('Reviewed by NOBODY, layout tests fix.', None)
self._assert_parse_reviewer_text_list('Reviewed by NOBODY (Qt build fix pt 2).', None)
self._assert_parse_reviewer_text_list('Reviewed by NOBODY(rollout)', None)
self._assert_parse_reviewer_text_list('Reviewed by NOBODY (Build fix, forgot to svn add this file)', None)
self._assert_parse_reviewer_text_list('Reviewed by nobody (trivial follow up fix), Joseph Pecoraro LGTM-ed.', None)
def _entry_with_author(self, author_text):
return ChangeLogEntry('''2009-08-19 AUTHOR_TEXT
Reviewed by Ryosuke Niwa
* Scripts/bugzilla-tool:
'''.replace("AUTHOR_TEXT", author_text))
def _entry_with_reviewer(self, reviewer_line):
return ChangeLogEntry('''2009-08-19 Eric Seidel <eric@webkit.org>
REVIEW_LINE
* Scripts/bugzilla-tool:
'''.replace("REVIEW_LINE", reviewer_line))
def _contributors(self, names):
return [CommitterList().contributor_by_name(name) for name in names]
def _assert_fuzzy_reviewer_match(self, reviewer_text, expected_text_list, expected_contributors):
unused, reviewer_text_list = ChangeLogEntry._parse_reviewer_text(reviewer_text)
self.assertEqual(reviewer_text_list, expected_text_list)
self.assertEqual(self._entry_with_reviewer(reviewer_text).reviewers(), self._contributors(expected_contributors))
def test_fuzzy_reviewer_match__none(self):
self._assert_fuzzy_reviewer_match('Reviewed by BUILD FIX', ['BUILD FIX'], [])
self._assert_fuzzy_reviewer_match('Reviewed by Mac build fix', ['Mac build fix'], [])
def test_fuzzy_reviewer_match_adam_barth(self):
self._assert_fuzzy_reviewer_match('Reviewed by Adam Barth.:w', ['Adam Barth.:w'], ['Adam Barth'])
def test_fuzzy_reviewer_match_darin_adler_et_al(self):
self._assert_fuzzy_reviewer_match('Reviewed by Darin Adler in <https://bugs.webkit.org/show_bug.cgi?id=47736>.', ['Darin Adler in'], ['Darin Adler'])
self._assert_fuzzy_reviewer_match('Reviewed by Darin Adler, Dan Bernstein, Adele Peterson, and others.',
['Darin Adler', 'Dan Bernstein', 'Adele Peterson', 'others'], ['Darin Adler', 'Dan Bernstein', 'Adele Peterson'])
def test_fuzzy_reviewer_match_dimitri_glazkov(self):
self._assert_fuzzy_reviewer_match('Reviewed by Dimitri Glazkov, build fix', ['Dimitri Glazkov', 'build fix'], ['Dimitri Glazkov'])
def test_fuzzy_reviewer_match_george_staikos(self):
self._assert_fuzzy_reviewer_match('Reviewed by George Staikos (and others)', ['George Staikos', 'others'], ['George Staikos'])
def test_fuzzy_reviewer_match_mark_rowe(self):
self._assert_fuzzy_reviewer_match('Reviewed by Mark Rowe, but Dan Bernstein also reviewed and asked thoughtful questions.',
['Mark Rowe', 'but Dan Bernstein also reviewed', 'asked thoughtful questions'], ['Mark Rowe'])
def test_fuzzy_reviewer_match_initial(self):
self._assert_fuzzy_reviewer_match('Reviewed by Alejandro G. Castro.',
['Alejandro G. Castro'], ['Alejandro G. Castro'])
self._assert_fuzzy_reviewer_match('Reviewed by G. Alejandro G. Castro and others.',
['G. Alejandro G. Castro', 'others'], ['Alejandro G. Castro'])
# If a reviewer has a name that ended with an initial, the regular expression
# will incorrectly trim the last period, but it will still match fuzzily to
# the full reviewer name.
self._assert_fuzzy_reviewer_match('Reviewed by G. Alejandro G. G. Castro G.',
['G. Alejandro G. G. Castro G'], ['Alejandro G. Castro'])
def _assert_parse_authors(self, author_text, expected_contributors):
parsed_authors = [(author['name'], author['email']) for author in self._entry_with_author(author_text).authors()]
self.assertEqual(parsed_authors, expected_contributors)
def test_parse_authors(self):
self._assert_parse_authors(u'Aaron Colwell <acolwell@chromium.org>', [(u'Aaron Colwell', u'acolwell@chromium.org')])
self._assert_parse_authors('Eric Seidel <eric@webkit.org>, Ryosuke Niwa <rniwa@webkit.org>',
[('Eric Seidel', 'eric@webkit.org'), ('Ryosuke Niwa', 'rniwa@webkit.org')])
self._assert_parse_authors('Zan Dobersek <zandobersek@gmail.com> and Philippe Normand <pnormand@igalia.com>',
[('Zan Dobersek', 'zandobersek@gmail.com'), ('Philippe Normand', 'pnormand@igalia.com')])
self._assert_parse_authors('New Contributor <new@webkit.org> and Noob <noob@webkit.org>',
[('New Contributor', 'new@webkit.org'), ('Noob', 'noob@webkit.org')])
self._assert_parse_authors('Adam Barth <abarth@webkit.org> && Benjamin Poulain <bpoulain@apple.com>',
[('Adam Barth', 'abarth@webkit.org'), ('Benjamin Poulain', 'bpoulain@apple.com')])
def _assert_has_valid_reviewer(self, reviewer_line, expected):
self.assertEqual(self._entry_with_reviewer(reviewer_line).has_valid_reviewer(), expected)
def test_has_valid_reviewer(self):
self._assert_has_valid_reviewer("Reviewed by Eric Seidel.", True)
self._assert_has_valid_reviewer("Reviewed by Eric Seidel", True) # Not picky about the '.'
self._assert_has_valid_reviewer("Reviewed by Eric.", False)
self._assert_has_valid_reviewer("Reviewed by Eric C Seidel.", False)
self._assert_has_valid_reviewer("Rubber-stamped by Eric.", False)
self._assert_has_valid_reviewer("Rubber-stamped by Eric Seidel.", True)
self._assert_has_valid_reviewer("Rubber stamped by Eric.", False)
self._assert_has_valid_reviewer("Rubber stamped by Eric Seidel.", True)
self._assert_has_valid_reviewer("Unreviewed build fix.", True)
def test_latest_entry_parse(self):
changelog_contents = u"%s\n%s" % (self._example_entry, self._example_changelog)
changelog_file = StringIO(changelog_contents)
latest_entry = ChangeLog.parse_latest_entry_from_file(changelog_file)
self.assertEqual(latest_entry.contents(), self._example_entry)
self.assertEqual(latest_entry.author_name(), "Peter Kasting")
self.assertEqual(latest_entry.author_email(), "pkasting@google.com")
self.assertEqual(latest_entry.reviewer_text(), u"Tor Arne Vestb\xf8")
self.assertEqual(latest_entry.touched_files(), ["DumpRenderTree/win/DumpRenderTree.vcproj", "DumpRenderTree/win/ImageDiff.vcproj", "DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj"])
self.assertTrue(latest_entry.reviewer()) # Make sure that our UTF8-based lookup of Tor works.
def test_latest_entry_parse_single_entry(self):
changelog_contents = u"%s\n%s" % (self._example_entry, self._rolled_over_footer)
changelog_file = StringIO(changelog_contents)
latest_entry = ChangeLog.parse_latest_entry_from_file(changelog_file)
self.assertEqual(latest_entry.contents(), self._example_entry)
self.assertEqual(latest_entry.author_name(), "Peter Kasting")
@staticmethod
def _write_tmp_file_with_contents(byte_array):
assert(isinstance(byte_array, str))
(file_descriptor, file_path) = tempfile.mkstemp() # NamedTemporaryFile always deletes the file on close in python < 2.6
with os.fdopen(file_descriptor, "w") as file:
file.write(byte_array)
return file_path
@staticmethod
def _read_file_contents(file_path, encoding):
with codecs.open(file_path, "r", encoding) as file:
return file.read()
# FIXME: We really should be getting this from prepare-ChangeLog itself.
_new_entry_boilerplate = '''2009-08-19 Eric Seidel <eric@webkit.org>
Need a short description (OOPS!).
Need the bug URL (OOPS!).
Reviewed by NOBODY (OOPS!).
* Scripts/bugzilla-tool:
'''
_new_entry_boilerplate_with_bugurl = '''2009-08-19 Eric Seidel <eric@webkit.org>
Need a short description (OOPS!).
https://bugs.webkit.org/show_bug.cgi?id=12345
Reviewed by NOBODY (OOPS!).
* Scripts/bugzilla-tool:
'''
_new_entry_boilerplate_with_multiple_bugurl = '''2009-08-19 Eric Seidel <eric@webkit.org>
Need a short description (OOPS!).
https://bugs.webkit.org/show_bug.cgi?id=12345
http://webkit.org/b/12345
Reviewed by NOBODY (OOPS!).
* Scripts/bugzilla-tool:
'''
_new_entry_boilerplate_without_reviewer_line = '''2009-08-19 Eric Seidel <eric@webkit.org>
Need a short description (OOPS!).
https://bugs.webkit.org/show_bug.cgi?id=12345
* Scripts/bugzilla-tool:
'''
_new_entry_boilerplate_without_reviewer_multiple_bugurl = '''2009-08-19 Eric Seidel <eric@webkit.org>
Need a short description (OOPS!).
https://bugs.webkit.org/show_bug.cgi?id=12345
http://webkit.org/b/12345
* Scripts/bugzilla-tool:
'''
def test_set_reviewer(self):
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate_with_bugurl, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
reviewer_name = 'Test Reviewer'
ChangeLog(changelog_path).set_reviewer(reviewer_name)
actual_contents = self._read_file_contents(changelog_path, "utf-8")
expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
os.remove(changelog_path)
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
changelog_contents_without_reviewer_line = u"%s\n%s" % (self._new_entry_boilerplate_without_reviewer_line, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents_without_reviewer_line.encode("utf-8"))
ChangeLog(changelog_path).set_reviewer(reviewer_name)
actual_contents = self._read_file_contents(changelog_path, "utf-8")
os.remove(changelog_path)
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
changelog_contents_without_reviewer_line = u"%s\n%s" % (self._new_entry_boilerplate_without_reviewer_multiple_bugurl, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents_without_reviewer_line.encode("utf-8"))
ChangeLog(changelog_path).set_reviewer(reviewer_name)
actual_contents = self._read_file_contents(changelog_path, "utf-8")
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate_with_multiple_bugurl, self._example_changelog)
expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
os.remove(changelog_path)
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
def test_set_short_description_and_bug_url(self):
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate_with_bugurl, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
short_description = "A short description"
bug_url = "http://example.com/b/2344"
ChangeLog(changelog_path).set_short_description_and_bug_url(short_description, bug_url)
actual_contents = self._read_file_contents(changelog_path, "utf-8")
expected_message = "%s\n %s" % (short_description, bug_url)
expected_contents = changelog_contents.replace("Need a short description (OOPS!).", expected_message)
os.remove(changelog_path)
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
short_description = "A short description 2"
bug_url = "http://example.com/b/2345"
ChangeLog(changelog_path).set_short_description_and_bug_url(short_description, bug_url)
actual_contents = self._read_file_contents(changelog_path, "utf-8")
expected_message = "%s\n %s" % (short_description, bug_url)
expected_contents = changelog_contents.replace("Need a short description (OOPS!).\n Need the bug URL (OOPS!).", expected_message)
os.remove(changelog_path)
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import boto3
import botocore
from builtins import input
import click
import prettytable
import sys
# All ebs image, not instance-store. All latest ubuntu 12.04, but version not
# specified. Maybe need to get from amazon
images = {
'ap-southeast-1': {'ubuntu14': 'ami-21d30f42'}, # Singapore
'ap-south-1': {'ubuntu14': 'ami-4a90fa25'}, # Mumbai
'us-east-1': {'ubuntu14': 'ami-2d39803a'}, # nvirginia
'us-west-1': {'ubuntu14': 'ami-48db9d28'}, # northcalif
'us-west-2': {'ubuntu14': 'ami-d732f0b7'}, # oregon
'eu-west-1': {'ubuntu14': 'ami-ed82e39e'}, # ireland
'eu-central-1': {'ubuntu14': 'ami-26c43149'}, # frankfurt
'ap-northeast-1': {'ubuntu14': 'ami-a21529cc'}, # tokyo
'ap-northeast-2': {'ubuntu14': 'ami-09dc1267'}, # seoul
'ap-southeast-2': {'ubuntu14': 'ami-ba3e14d9'}, # sydney
'sa-east-1': {'ubuntu14': 'ami-dc48dcb0'}, # saopaolo
}
def get_connection():
"""Ensures that the AWS is configured properly.
If not, tell how to configure it.
Returns connection object if configured properly, else None.
"""
try:
ec2 = boto3.resource('ec2')
except (botocore.exceptions.NoRegionError,
botocore.exceptions.NoCredentialsError) as e:
# TODO(rushiagr): instead of telling people to run credentials, ask
# credentials here itself
print('Credentials and region not configured? Run "aws configure" to configure it.')
# TODO(rushiagr): let people provide singapore, and guess region name from
# that.
print('Provide region as "ap-southeast-1" for Singapore.')
return None
return ec2
def get_region_specific_ami_id(distro):
region = boto3.session.Session().region_name
return images.get(region).get(distro)
def abort_if_false(ctx, param, value):
if not value:
ctx.abort()
@click.command()
@click.option('-s', 'show_vol_info', flag_value=True,
help='Show VM disk sizes (GBs), starting with root disk')
@click.option('-n', 'filter_name',
help='Show only VMs which matches given string (case-insensitive)')
def lsvm(show_vol_info, filter_name):
'''List all EC2 VMs. '''
ec2 = get_connection()
if not ec2:
return
filter_name = filter_name.lower() if filter_name else None
if show_vol_info:
table = prettytable.PrettyTable(
['ID', 'Name', 'Status', 'Flavor', 'IP', 'Vols(GB)'])
else:
table = prettytable.PrettyTable(
['ID', 'Name', 'Status', 'Flavor', 'IP', 'Vols'])
table.left_padding_width=0
table.right_padding_width=1
table.border=False
instances = ec2.instances.all()
instances_to_print = []
if not filter_name:
instances_to_print = instances
else:
for i in instances:
if i.tags is not None and len(i.tags) > 0:
for tag in i.tags:
if(tag['Key'] == 'Name' and
tag['Value'].lower().find(filter_name) > -1):
instances_to_print.append(i)
break
for i in instances_to_print:
row = [
i.id,
i.tags[0]['Value'] if i.tags is not None else '',
i.state['Name'],
i.instance_type,
i.public_ip_address]
if show_vol_info:
row.append([vol.size for vol in i.volumes.all()])
else:
row.append(len(i.block_device_mappings))
table.add_row(row)
print(table.get_string(sortby='Status'))
@click.command()
def mkvm():
ec2 = get_connection()
if not ec2:
return
flavor_names = ['t1.micro', 'm1.small', 'm1.medium', 'm1.large',
'm1.xlarge', 'm3.medium', 'm3.large', 'm3.xlarge', 'm3.2xlarge',
'm4.large', 'm4.xlarge', 'm4.2xlarge', 'm4.4xlarge', 'm4.10xlarge',
't2.micro', 't2.small', 't2.medium', 't2.large', 'm2.xlarge',
'm2.2xlarge', 'm2.4xlarge', 'cr1.8xlarge', 'i2.xlarge',
'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', 'hi1.4xlarge',
'hs1.8xlarge', 'c1.medium', 'c1.xlarge', 'c3.large', 'c3.xlarge',
'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', 'c4.large', 'c4.xlarge',
'c4.2xlarge', 'c4.4xlarge', 'c4.8xlarge', 'cc1.4xlarge',
'cc2.8xlarge', 'g2.2xlarge', 'cg1.4xlarge', 'r3.large',
'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', 'r3.8xlarge', 'd2.xlarge',
'd2.2xlarge', 'd2.4xlarge', 'd2.8xlarge']
print('Only Ubuntu image and Singapore region supported as of now')
print('Available flavors:', ' '.join(flavor_names))
selected_flavor=''
while True:
sys.stdout.write("Select flavor ['l' to list]: ")
flavor=input()
if flavor.lower() == 'l':
print(flavor_names)
continue
elif flavor in flavor_names:
selected_flavor=flavor
break
else:
print('Invalid flavor name.')
keypairs = ec2.key_pairs.all()
keypair_names = [kp.name for kp in keypairs]
print('Available key pairs:', ' '.join(keypair_names))
sys.stdout.write("Select keypair: ")
selected_keypair=input()
secgroups = list(ec2.security_groups.all())
secgroup_name_id_dict = {}
for sg in secgroups:
secgroup_name_id_dict[sg.group_name] = sg.id
secgroup_names = [sg.group_name for sg in secgroups]
print('Available security groups:\n ', '\t'.join(secgroup_names))
sys.stdout.write("Select security group [empty for no security group]: ")
selected_security_group_name=input()
sys.stdout.write("Enter root volume size in GBs: ")
selected_vol_size=input()
ami_id = get_region_specific_ami_id('ubuntu14')
if ami_id is None:
print('We do not have Ubuntu image for this region')
return
if not selected_security_group_name:
ec2.create_instances(DryRun=False, ImageId=ami_id, MinCount=1,
MaxCount=1, KeyName=selected_keypair, InstanceType=flavor,
BlockDeviceMappings=[{'DeviceName': '/dev/sda1',
'Ebs': {"VolumeSize": int(selected_vol_size)}}])
else:
ec2.create_instances(DryRun=False, ImageId=ami_id, MinCount=1,
MaxCount=1, KeyName=selected_keypair, InstanceType=flavor,
BlockDeviceMappings=[{'DeviceName': '/dev/sda1',
'Ebs': {"VolumeSize": int(selected_vol_size)}}],
SecurityGroupIds=[
secgroup_name_id_dict[selected_security_group_name]])
@click.command()
def lskp():
ec2 = get_connection()
if not ec2:
return
keypairs = ec2.key_pairs.all()
keypair_names = [kp.name for kp in keypairs]
print('Available keypairs:\n ', '\n '.join(keypair_names))
@click.command()
def lsimg():
ec2 = get_connection()
if not ec2:
return
client = boto3.client('ec2')
images = client.describe_images(Owners=['self'])
image_id_names = [i['ImageId']+' '+i['Name'] for i in images['Images']]
print('Images:\n ', '\n '.join(image_id_names))
@click.command()
@click.option('-a', 'is_detail', flag_value=True,
help='Show security group rules.')
def lssg(is_detail):
ec2 = get_connection()
if not ec2:
return
secgroups = list(ec2.security_groups.all())
if not is_detail:
secgroup_names = [sg.group_name for sg in secgroups]
print('Available security groups:\n ', '\n '.join(secgroup_names))
print('\nExecute "lssg -a" for viewing security group rules')
else:
for sg in secgroups:
print('\nSecurity group: Name:', sg.group_name, 'ID:', sg.id,
'Description:', sg.description)
ip_permissions = sg.ip_permissions
print(' Protocol\t IP\t\tfrom\tto')
for perm in ip_permissions:
if perm['IpRanges']:
print(' tcp\t' + perm['IpRanges'][0]['CidrIp'] + '\t' +
str(perm['FromPort']) + '\t' + str(perm['ToPort']))
@click.command()
@click.argument('vm_ids', nargs=-1, required=True)
@click.option('--yes', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Are you sure you want to stop and terminate the VM/VMs?'
' You can stop the VM by using "stpvm" command.')
def rmvm(vm_ids):
# TODO(rushiagr): not required as we're already checking 'required=True'
if len(vm_ids) == 0:
print('No VM IDs provided. Aborting')
return
print('Stopping and terminating VMs with IDs: ', vm_ids)
# TODO(rushiagr): use re.match('i-[0-9a-f]+', 'i-abcd1334') to confirm
# it's an ID
ec2 = get_connection()
if not ec2:
return
ec2.instances.filter(InstanceIds=vm_ids).stop()
ec2.instances.filter(InstanceIds=vm_ids).terminate()
@click.command()
@click.argument('vm_ids', nargs=-1, required=True)
@click.option('--yes', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Are you sure you want to stop the VM?')
def stpvm(vm_ids):
print('Stopping (but not terminating) VMs with IDs: ', vm_ids)
# TODO(rushiagr): not required as we're already checking 'required=True'
if len(vm_ids) == 0:
print('No VM IDs provided. Aborting')
return
# TODO(rushiagr): use re.match('i-[0-9a-f]+', 'i-abcd1334') to confirm
# it's an ID
ec2 = get_connection()
if not ec2:
return
ec2.instances.filter(InstanceIds=vm_ids).stop()
@click.command()
def mkkp():
ec2 = get_connection()
if not ec2:
return
sys.stdout.write("Keypair name (required): ")
keypair_name=input()
kp = ec2.create_key_pair(KeyName=keypair_name)
print('Keypair', keypair_name, 'created. Private key:')
print(kp.key_material)
@click.command()
@click.argument('keypair_name', required=False)
@click.option('--yes', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Are you sure you want to delete the keypair?')
def rmkp(keypair_name):
ec2 = get_connection()
if not ec2:
return
if keypair_name is None:
sys.stdout.write("Keypair name (required): ")
keypair_name=input()
kp = ec2.KeyPair(keypair_name)
kp.delete()
print('Keypair', keypair_name, 'deleted.')
@click.command()
def mksg():
ec2 = get_connection()
if not ec2:
return
sys.stdout.write("Note that only TCP rules are supported as of now.\n")
sys.stdout.write("Security group name (required): ")
sg_name=input()
sys.stdout.write("Security group description (required): ")
sg_description=input()
ip_portrange_tuples = []
while True:
sys.stdout.write("Add security group rule? [y/n]: ")
bool_inp = input()
if bool_inp.lower().startswith('y'):
sys.stdout.write("IP (e.g. 0.0.0.0/0): ")
ip = input()
sys.stdout.write("Port or port range (e.g. '8080' or '8000-8999': ")
port_range = input()
if port_range.find('-') > -1:
start_port, end_port = port_range.split('-')
else:
start_port = end_port = port_range
start_port, end_port = int(start_port), int(end_port)
if start_port > end_port:
start_port, end_port = end_port, start_port
ip_portrange_tuples.append((ip, start_port, end_port))
else:
break
mysg = ec2.create_security_group(GroupName=sg_name,
Description=sg_description)
for ip, start_port, end_port in ip_portrange_tuples:
mysg.authorize_ingress(IpProtocol="tcp", CidrIp=ip,
FromPort=start_port, ToPort=end_port)
print('Security group', sg_name, 'created')
@click.command()
@click.argument('secgroup_name', required=False)
@click.option('--yes', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Are you sure you want to delete the security group?')
def rmsg(secgroup_name):
ec2 = get_connection()
if not ec2:
return
if secgroup_name is None:
sys.stdout.write("Security group name (required): ")
secgroup_name=input()
sg = [sg for sg in ec2.security_groups.filter(GroupNames=[secgroup_name])][0]
sg.delete()
print('Security group', secgroup_name, 'deleted.')
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import decorators
from telemetry.internal.platform.tracing_agent import (
chrome_devtools_tracing_backend)
from telemetry.internal.platform.tracing_agent import chrome_tracing_agent
from telemetry.timeline import tracing_category_filter
from telemetry.timeline import tracing_config
from telemetry.timeline import tracing_options
from pylib.device import device_utils
class FakePlatformBackend(object):
pass
class FakeAndroidPlatformBackend(FakePlatformBackend):
def __init__(self):
devices = device_utils.DeviceUtils.HealthyDevices()
self.device = devices[0]
def GetOSName(self):
return 'android'
class FakeLinuxPlatformBackend(FakePlatformBackend):
def GetOSName(self):
return 'linux'
class FakeMacPlatformBackend(FakePlatformBackend):
def GetOSName(self):
return 'mac'
class FakeWinPlatformBackend(FakePlatformBackend):
def GetOSName(self):
return 'win'
class FakeDevtoolsClient(object):
def __init__(self, remote_port):
self.is_alive = True
self.tracing_started = False
self.remote_port = remote_port
self.will_raise_exception_in_stop_tracing = False
def IsAlive(self):
return self.is_alive
def StartChromeTracing(self, _trace_options, _filter_string, _timeout=10):
self.tracing_started = True
def StopChromeTracing(self, _trace_data_builder):
self.tracing_started = False
if self.will_raise_exception_in_stop_tracing:
raise Exception
def IsChromeTracingSupported(self):
return True
class FakeTraceOptions(object):
def __init__(self):
self.enable_chrome_trace = True
class FakeCategoryFilter(object):
def __init__(self):
self.filter_string = 'foo'
class ChromeTracingAgentUnittest(unittest.TestCase):
def setUp(self):
self.platform1 = FakePlatformBackend()
self.platform2 = FakePlatformBackend()
self.platform3 = FakePlatformBackend()
def StartTracing(self, platform_backend, enable_chrome_trace=True):
assert chrome_tracing_agent.ChromeTracingAgent.IsSupported(platform_backend)
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
trace_options = FakeTraceOptions()
trace_options.enable_chrome_trace = enable_chrome_trace
agent.Start(trace_options, FakeCategoryFilter(), 10)
return agent
def StopTracing(self, tracing_agent):
tracing_agent.Stop(None)
def testRegisterDevtoolsClient(self):
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(1), self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(2), self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(3), self.platform1)
tracing_agent_of_platform1 = self.StartTracing(self.platform1)
with self.assertRaises(
chrome_devtools_tracing_backend.ChromeTracingStartedError):
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(4), self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(5), self.platform2)
self.StopTracing(tracing_agent_of_platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
FakeDevtoolsClient(6), self.platform1)
def testIsSupport(self):
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool2, self.platform2)
devtool2.is_alive = False
# Chrome tracing is only supported on platform 1 since only platform 1 has
# an alive devtool.
self.assertTrue(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
def testStartAndStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
devtool3 = FakeDevtoolsClient(3)
devtool4 = FakeDevtoolsClient(2)
# Register devtools 1, 2, 3 on platform1 and devtool 4 on platform 2
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool2, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool3, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool4, self.platform2)
devtool2.is_alive = False
tracing_agent1 = self.StartTracing(self.platform1)
with self.assertRaises(
chrome_devtools_tracing_backend.ChromeTracingStartedError):
self.StartTracing(self.platform1)
self.assertTrue(devtool1.tracing_started)
self.assertFalse(devtool2.tracing_started)
self.assertTrue(devtool3.tracing_started)
# Devtool 4 shouldn't have tracing started although it has the same remote
# port as devtool 2
self.assertFalse(devtool4.tracing_started)
self.StopTracing(tracing_agent1)
self.assertFalse(devtool1.tracing_started)
self.assertFalse(devtool2.tracing_started)
self.assertFalse(devtool3.tracing_started)
self.assertFalse(devtool4.tracing_started)
# Test that it should be ok to start & stop tracing on platform1 again.
tracing_agent1 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent1)
tracing_agent2 = self.StartTracing(self.platform2)
self.assertTrue(devtool4.tracing_started)
self.StopTracing(tracing_agent2)
self.assertFalse(devtool4.tracing_started)
def testExceptionRaisedInStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
# Register devtools 1, 2 on platform 1
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool2, self.platform1)
tracing_agent1 = self.StartTracing(self.platform1)
self.assertTrue(devtool1.tracing_started)
self.assertTrue(devtool2.tracing_started)
devtool2.will_raise_exception_in_stop_tracing = True
with self.assertRaises(
chrome_devtools_tracing_backend.ChromeTracingStoppedError):
self.StopTracing(tracing_agent1)
devtool1.is_alive = False
devtool2.is_alive = False
# Register devtools 3 on platform 1 should not raise any exception.
devtool3 = FakeDevtoolsClient(3)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
devtool3, self.platform1)
# Start & Stop tracing on platform 1 should work just fine.
tracing_agent2 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent2)
@decorators.Enabled('android')
@decorators.Isolated
def testCreateAndRemoveTraceConfigFileOnAndroid(self):
platform_backend = FakeAndroidPlatformBackend()
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
config = tracing_config.TracingConfig(
tracing_options.TracingOptions(),
tracing_category_filter.TracingCategoryFilter())
config_file_path = os.path.join(
chrome_tracing_agent._CHROME_TRACE_CONFIG_DIR_ANDROID,
chrome_tracing_agent._CHROME_TRACE_CONFIG_FILE_NAME)
agent._CreateTraceConfigFileOnAndroid(config)
config_file_str = platform_backend.device.ReadFile(config_file_path,
as_root=True)
self.assertTrue(platform_backend.device.PathExists(config_file_path))
self.assertEqual(config.GetTraceConfigJsonString(),
config_file_str.strip())
agent._RemoveTraceConfigFileOnAndroid()
self.assertFalse(platform_backend.device.PathExists(config_file_path))
# robust to multiple file removal
agent._RemoveTraceConfigFileOnAndroid()
self.assertFalse(platform_backend.device.PathExists(config_file_path))
def CreateAndRemoveTraceConfigFileOnDesktop(self, platform_backend):
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
config = tracing_config.TracingConfig(
tracing_options.TracingOptions(),
tracing_category_filter.TracingCategoryFilter())
config_file_path = os.path.join(
chrome_tracing_agent._CHROME_TRACE_CONFIG_DIR_DESKTOP,
chrome_tracing_agent._CHROME_TRACE_CONFIG_FILE_NAME)
agent._CreateTraceConfigFileOnDesktop(config)
self.assertTrue(os.path.exists(config_file_path))
with open(config_file_path, 'r') as f:
config_file_str = f.read()
self.assertEqual(config.GetTraceConfigJsonString(),
config_file_str.strip())
agent._RemoveTraceConfigFileOnDesktop()
self.assertFalse(os.path.exists(config_file_path))
# robust to multiple file removal
agent._RemoveTraceConfigFileOnDesktop()
self.assertFalse(os.path.exists(config_file_path))
@decorators.Enabled('linux')
@decorators.Isolated
def testCreateAndRemoveTraceConfigFileOnLinux(self):
self.CreateAndRemoveTraceConfigFileOnDesktop(FakeLinuxPlatformBackend())
@decorators.Enabled('mac')
@decorators.Isolated
def testCreateAndRemoveTraceConfigFileOnMac(self):
self.CreateAndRemoveTraceConfigFileOnDesktop(FakeMacPlatformBackend())
@decorators.Enabled('win')
@decorators.Isolated
def testCreateAndRemoveTraceConfigFileOnWin(self):
self.CreateAndRemoveTraceConfigFileOnDesktop(FakeWinPlatformBackend())
|
|
from JumpScale import j
from JumpScale.servers.serverbase import returnCodes
from JumpScale.core.errorhandling.JSExceptions import BaseJSException
import inspect
import time
class Session:
def __init__(self, ddict):
self.__dict__ = ddict
if not hasattr(self, 'nid'):
self.nid = None
def __repr__(self):
return str(self.__dict__)
def updateNodeId(self, new_node_id):
"""
Sets the session's NID attribute to the provided new_node_id.
"""
self.nid = new_node_id
__str__ = __repr__
class DaemonCMDS:
def __init__(self, daemon):
self.daemon = daemon
def authenticate(self, session):
return True # will authenticall all (is std)
def registerpubkey(self, organization, user, pubkey, session):
self.daemon.keystor.setPubKey(organization, user, pubkey)
return ""
def listCategories(self, session):
return list(self.daemon.cmdsInterfaces.keys())
def getpubkeyserver(self, session):
return self.daemon.keystor.getPubKey(self.daemon.sslorg, self.daemon.ssluser, returnAsString=True)
def registersession(self, sessiondata, ssl, session):
"""
@param sessiondata is encrypted data (SSL)
"""
# ser=j.data.serializer.serializers.getMessagePack()
# sessiondictstr=ser.loads(data)
print(("register session:%s " % session))
# for k, v in list(sessiondata.items()):
# if isinstance(k, bytes):
# sessiondata.pop(k)
# k = k.decode('utf-8', 'ignore')
# if isinstance(v, bytes):
# v = v.decode('utf-8', 'ignore')
# sessiondata[k] = v
session = Session(sessiondata)
if ssl:
session.encrkey = self.daemon.decrypt(session.encrkey, session)
session.passwd = self.daemon.decrypt(session.passwd, session)
if not self.authenticate(session):
raise j.exceptions.RuntimeError("Cannot Authenticate User:%s" % session.user)
self.daemon.sessions[session.id] = session
print("OK")
return "OK"
def logeco(self, eco, session):
"""
log eco object (as dict)
"""
eco["epoch"] = self.daemon.now
eco = j.errorconditionhandler.getErrorConditionObject(ddict=eco)
self.daemon.eventhandlingTE.executeV2(eco=eco, history=self.daemon.eventsMemLog)
def introspect(self, cat, session=None):
methods = {}
interface = self.daemon.cmdsInterfaces[cat]
for name, method in inspect.getmembers(interface, inspect.ismethod):
if name.startswith('_'):
continue
args = inspect.getargspec(method)
# Remove the 'session' parameter
if 'session' in args.args:
session_index = args.args.index('session')
if session_index != len(args.args) - 1:
raise j.exceptions.RuntimeError(
"session arg needs to be last argument of method. Cat:%s Method:%s \nArgs:%s" % (cat, name, args))
del args.args[session_index]
if args.defaults:
session_default_index = session_index - len(args.args) - 1
defaults = list(args.defaults)
del defaults[session_default_index]
args = inspect.ArgSpec(args.args, args.varargs, args.keywords, defaults)
methods[name] = {'args': args, 'doc': inspect.getdoc(method)}
return methods
class Daemon:
def __init__(self, name=None):
j.application.interactive = False # make sure errorhandler does not require input we are daemon
self.name = name
self._command_handlers = {} # A cache used by command_handler()
self.cmds = {}
self.cmdsInterfaces = {}
self.cmdsInterfacesProxy = {}
self._now = 0
self.sessions = {}
self.key = ""
self.errorconditionserializer = j.data.serializer.serializers.getSerializerType("m")
self.addCMDsInterface(DaemonCMDS, "core")
def getTime(self):
# can overrule this to e.g. in gevent set the time every sec, takes less resource (using self._now)
return int(time.time())
def decrypt(self, message, session):
if session.encrkey:
return self.keystor.decrypt(orgsender=session.organization, sender=session.user,
orgreader=self.sslorg, reader=self.ssluser,
message=message[0], signature=message[1])
else:
return message
def notifyOfNewNode(self, node, session_id):
"""
Notifies this daemon about a newly-registered node.
Args:
node: metadata about the new node.
session_id (str): the ID of the session the new node is involved in.
"""
if hasattr(node, 'id'):
# Let's use this opportunity to update the associated session with the new NID
self.sessions[session_id].updateNodeId(node.id)
def encrypt(self, message, session):
if session and session.encrkey:
if not hasattr(session, 'publickey'):
session.publickey = self.keystor.getPubKey(
user=session.user, organization=session.organization, returnAsString=True)
return self.keystor.encrypt(self.sslorg, self.ssluser, "", "", message,
False, pubkeyReader=session.publickey)[0]
else:
return message
def addCMDsInterface(self, cmdInterfaceClass, category, proxy=False):
if category not in self.cmdsInterfaces:
self.cmdsInterfaces[category] = []
if proxy is False:
obj = cmdInterfaceClass(self)
else:
obj = cmdInterfaceClass()
self.cmdsInterfacesProxy[category] = obj
self.cmdsInterfaces[category] = obj
def command_handler(self, command_category, command):
"""
Looks up the callable function responsible for handling the specified command.
Returns:
A callable function or None if the method could not be found.
"""
cache_key = "%s_%s" % (command_category, command)
if cache_key not in self._command_handlers:
command_interface = self.cmdsInterfaces.get(command_category, None)
self._command_handlers[cache_key] = getattr(command_interface, command, None)
return self._command_handlers.get(cache_key, None)
def processRPC(self, cmd, data, returnformat, session, category=""):
"""
@return (resultcode,returnformat,result)
item 0=cmd, item 1=returnformat (str), item 2=args (dict)
resultcode
0=ok
1= not authenticated
2= method not found
2+ any other error
"""
inputisdict = isinstance(data, dict)
ffunction = self.command_handler(command_category=category, command=cmd)
if not ffunction:
return returnCodes.METHOD_NOT_FOUND, returnformat, ''
try:
if inputisdict:
# for k, v in list(data.items()):
# if isinstance(k, bytes):
# data.pop(k)
# k = k.decode('utf-8', 'ignore')
# if isinstance(v, bytes):
# v = v.decode('utf-8', 'ignore')
# data[k] = v
if "_agentid" in data:
if data["_agentid"] != 0:
cmds = self.cmdsInterfaces["agent"]
gid = j.application.whoAmI.gid
nid = int(data["_agentid"])
data.pop("_agentid")
category2 = category.replace("processmanager_", "")
scriptid = "%s_%s" % (category2, cmd)
job = cmds.scheduleCmd(gid, nid, cmdcategory=category2, jscriptid=scriptid, cmdname=cmd,
args=data, queue="internal", log=False, timeout=60, roles=[], session=session, wait=True)
jobqueue = cmds._getJobQueue(job["guid"])
jobr = jobqueue.get(True, 60)
if not jobr:
eco = j.errorconditionhandler.getErrorConditionObject(
msg="Command %s.%s with args: %s timeout" % (category2, cmd, data))
return returnCodes.ERROR, returnformat, eco.__dict__
jobr = j.data.serializer.json.loads(jobr)
if jobr["state"] != "OK":
return jobr["resultcode"], returnformat, jobr["result"]
else:
return returnCodes.OK, returnformat, jobr["result"]
else:
data.pop("_agentid")
data['session'] = session
result = ffunction(**data)
else:
result = ffunction(data, session=session)
except Exception as e:
if isinstance(e, BaseJSException):
return returnCodes.ERROR, returnformat, e.eco
eco = j.errorconditionhandler.parsePythonExceptionObject(e)
eco.level = 2
eco.data = data
# print eco
# eco.errormessage += "\nfunction arguments were:%s\n" % str(inspect.getargspec(ffunction).args)
data.pop('session', None)
if len(str(data)) > 1024:
data = "too much data to show."
eco.errormessage = \
"ERROR IN RPC CALL %s: %s. (Session:%s)\nData:%s\n" % (cmd, eco.errormessage, session, data)
eco.process()
eco.__dict__.pop("tb", None)
eco.tb = None
errorres = eco.__dict__
return returnCodes.ERROR, returnformat, errorres
return returnCodes.OK, returnformat, result
def getSession(self, cmd, sessionid):
if sessionid in self.sessions:
session = self.sessions[sessionid]
else:
# if isinstance(cmd, bytes):
# cmd = cmd.decode('utf-8', 'ignore')
if cmd in ["registerpubkey", "getpubkeyserver", "registersession"]:
session = None
else:
error = "Authentication or Session error, session not known with id:%s" % sessionid
eco = j.errorconditionhandler.getErrorConditionObject(msg=error)
return returnCodes.AUTHERROR, "m", self.errorconditionserializer.dumps(eco.__dict__)
return session
def processRPCUnSerialized(self, cmd, informat, returnformat, data, sessionid, category=""):
"""
@return (resultcode,returnformat,result)
item 0=cmd, item 1=returnformat (str), item 2=args (dict)
resultcode
0=ok
1= not authenticated
2= method not found
2+ any other error
"""
session = self.getSession(cmd, sessionid)
if isinstance(session, tuple):
return session
try:
if informat != "":
# if isinstance(informat, bytes):
# informat = informat.decode('utf-8', 'ignore')
ser = j.data.serializer.serializers.get(informat, key=self.key)
data = ser.loads(data)
except Exception as e:
eco = j.errorconditionhandler.parsePythonExceptionObject(e)
eco.tb = ""
return returnCodes.SERIALIZATIONERRORIN, "m", self.errorconditionserializer.dumps(eco.__dict__)
parts = self.processRPC(cmd, data, returnformat=returnformat, session=session, category=category)
returnformat = parts[1] # return format as comes back from processRPC
# if isinstance(returnformat, bytes):
# returnformat = returnformat.decode('utf-8', 'ignore')
if returnformat != "": # is
returnser = j.data.serializer.serializers.get(returnformat, key=session.encrkey)
error = 0
try:
data = self.encrypt(returnser.dumps(parts[2]), session)
except Exception as e:
error = 1
if error == 1:
try:
data = self.encrypt(returnser.dumps(parts[2].__dict__), session)
except:
eco = j.errorconditionhandler.getErrorConditionObject(
msg="could not serialize result from %s" % cmd)
return returnCodes.SERIALIZATIONERROROUT, "m", self.errorconditionserializer.dumps(eco.__dict__)
else:
data = parts[2]
if data is None:
data = ""
return (parts[0], parts[1], data)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for creating partitioned variables.
This is a convenient abstraction to partition a large variable across
multiple smaller variables that can be assigned to different devices.
The full variable can be reconstructed by concatenating the smaller variables.
Using partitioned variables instead of a single variable is mostly a
performance choice. It however also has an impact on:
1. Random initialization, as the random number generator is called once per
slice
2. Updates, as they happen in parallel across slices
A key design goal is to allow a different graph to repartition a variable
with the same name but different slicings, including possibly no partitions.
TODO(touts): If an initializer provides a seed, the seed must be changed
deterministically for each slice, maybe by adding one to it, otherwise each
slice will use the same values. Maybe this can be done by passing the
slice offsets to the initializer functions.
Typical usage:
```python
# Create a list of partitioned variables with:
vs = create_partitioned_variables(
<shape>, <slicing>, <initializer>, name=<optional-name>)
# Pass the list as inputs to embedding_lookup for sharded, parallel lookup:
y = embedding_lookup(vs, ids, partition_strategy="div")
# Or fetch the variables in parallel to speed up large matmuls:
z = matmul(x, concat(slice_dim, vs))
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
@tf_export(v1=["variable_axis_size_partitioner"])
def variable_axis_size_partitioner(
max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None):
"""Get a partitioner for VariableScope to keep shards below `max_shard_bytes`.
This partitioner will shard a Variable along one axis, attempting to keep
the maximum shard size below `max_shard_bytes`. In practice, this is not
always possible when sharding along only one axis. When this happens,
this axis is sharded as much as possible (i.e., every dimension becomes
a separate shard).
If the partitioner hits the `max_shards` limit, then each shard may end up
larger than `max_shard_bytes`. By default `max_shards` equals `None` and no
limit on the number of shards is enforced.
One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost
`64MB`, to keep below the protobuf byte limit.
Args:
max_shard_bytes: The maximum size any given shard is allowed to be.
axis: The axis to partition along. Default: outermost axis.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
max_shards: The maximum number of shards in int created taking precedence
over `max_shard_bytes`.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope` and `get_variable`.
Raises:
ValueError: If any of the byte counts are non-positive.
"""
if max_shard_bytes < 1 or bytes_per_string_element < 1:
raise ValueError(
"Both max_shard_bytes and bytes_per_string_element must be positive. "
f"Currently, max_shard_bytes is {max_shard_bytes} and"
f"bytes_per_string_element is {bytes_per_string_element}")
if max_shards and max_shards < 1:
raise ValueError(
"max_shards must be positive.")
def _partitioner(shape, dtype):
"""Partitioner that partitions shards to have max_shard_bytes total size.
Args:
shape: A `TensorShape`.
dtype: A `DType`.
Returns:
A tuple representing how much to slice each axis in shape.
Raises:
ValueError: If shape is not a fully defined `TensorShape` or dtype is not
a `DType`.
"""
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError(f"shape is not a TensorShape: {shape}")
if not shape.is_fully_defined():
raise ValueError(f"shape is not fully defined: {shape}")
if not isinstance(dtype, dtypes.DType):
raise ValueError(f"dtype is not a DType: {dtype}")
if dtype.base_dtype == dtypes.string:
element_size = bytes_per_string_element
else:
element_size = dtype.size
partitions = [1] * shape.ndims
bytes_per_slice = 1.0 * (
shape.num_elements() / shape.dims[axis].value) * element_size
# How many slices can we fit on one shard of size at most max_shard_bytes?
# At least one slice is required.
slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))
# How many shards do we need for axis given that each shard fits
# slices_per_shard slices from a total of shape[axis] slices?
axis_shards = int(math.ceil(
1.0 * shape.dims[axis].value / slices_per_shard))
if max_shards:
axis_shards = min(max_shards, axis_shards)
partitions[axis] = axis_shards
return partitions
return _partitioner
@tf_export(v1=["min_max_variable_partitioner"])
def min_max_variable_partitioner(max_partitions=1, axis=0,
min_slice_size=256 << 10,
bytes_per_string_element=16):
"""Partitioner to allocate minimum size per slice.
Returns a partitioner that partitions the variable of given shape and dtype
such that each partition has a minimum of `min_slice_size` slice of the
variable. The maximum number of such partitions (upper bound) is given by
`max_partitions`.
Args:
max_partitions: Upper bound on the number of partitions. Defaults to 1.
axis: Axis along which to partition the variable. Defaults to 0.
min_slice_size: Minimum size of the variable slice per partition. Defaults
to 256K.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope` and `get_variable`.
"""
def _partitioner(shape, dtype):
"""Partitioner that partitions list for a variable of given shape and type.
Ex: Consider partitioning a variable of type float32 with
shape=[1024, 1024].
If `max_partitions` >= 16, this function would return
[(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].
If `max_partitions` < 16, this function would return
[`max_partitions`, 1].
Args:
shape: Shape of the variable.
dtype: Type of the variable.
Returns:
List of partitions for each axis (currently only one axis can be
partitioned).
Raises:
ValueError: If axis to partition along does not exist for the variable.
"""
if axis >= len(shape):
raise ValueError(
f"Cannot partition variable along axis {axis} when shape is "
f"only {shape}")
if dtype.base_dtype == dtypes.string:
bytes_per_element = bytes_per_string_element
else:
bytes_per_element = dtype.size
total_size_bytes = shape.num_elements() * bytes_per_element
partitions = total_size_bytes / min_slice_size
partitions_list = [1] * len(shape)
# We can not partition the variable beyond what its shape or
# `max_partitions` allows.
partitions_list[axis] = max(1, min(shape.dims[axis].value,
max_partitions,
int(math.ceil(partitions))))
return partitions_list
return _partitioner
@tf_export(v1=["fixed_size_partitioner"])
def fixed_size_partitioner(num_shards, axis=0):
"""Partitioner to specify a fixed number of shards along given axis.
Args:
num_shards: `int`, number of shards to partition variable.
axis: `int`, axis to partition on.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope` and `get_variable`.
"""
def _partitioner(shape, **unused_args):
partitions_list = [1] * len(shape)
partitions_list[axis] = min(num_shards, shape.dims[axis].value)
return partitions_list
return _partitioner
@tf_export(v1=["create_partitioned_variables"])
@deprecation.deprecated(
date=None,
instructions="Use `tf.get_variable` with a partitioner set.")
def create_partitioned_variables(
shape, slicing, initializer, dtype=dtypes.float32,
trainable=True, collections=None, name=None, reuse=None):
"""Create a list of partitioned variables according to the given `slicing`.
Currently only one dimension of the full variable can be sliced, and the
full variable can be reconstructed by the concatenation of the returned
list along that dimension.
Args:
shape: List of integers. The shape of the full variable.
slicing: List of integers. How to partition the variable.
Must be of the same length as `shape`. Each value
indicate how many slices to create in the corresponding
dimension. Presently only one of the values can be more than 1;
that is, the variable can only be sliced along one dimension.
For convenience, The requested number of partitions does not have to
divide the corresponding dimension evenly. If it does not, the
shapes of the partitions are incremented by 1 starting from partition
0 until all slack is absorbed. The adjustment rules may change in the
future, but as you can save/restore these variables with different
slicing specifications this should not be a problem.
initializer: A `Tensor` of shape `shape` or a variable initializer
function. If a function, it will be called once for each slice,
passing the shape and data type of the slice as parameters. The
function must return a tensor with the same shape as the slice.
dtype: Type of the variables. Ignored if `initializer` is a `Tensor`.
trainable: If True also add all the variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
collections: List of graph collections keys to add the variables to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name for the full variable. Defaults to
`"PartitionedVariable"` and gets uniquified automatically.
reuse: Boolean or `None`; if `True` and name is set, it would reuse
previously created variables. if `False` it will create new variables.
if `None`, it would inherit the parent scope reuse.
Returns:
A list of Variables corresponding to the slicing.
Raises:
ValueError: If any of the arguments is malformed.
"""
if len(shape) != len(slicing):
raise ValueError(
"The 'shape' and 'slicing' of a partitioned Variable "
f"must have the length: shape: {shape}, slicing: {slicing}")
if len(shape) < 1:
raise ValueError("A partitioned Variable must have rank at least 1: "
f"shape: {shape}")
# Legacy: we are provided the slicing directly, so just pass it to
# the partitioner.
partitioner = lambda **unused_kwargs: slicing
with variable_scope.variable_scope(
name, "PartitionedVariable", reuse=reuse):
# pylint: disable=protected-access
partitioned_var = variable_scope._get_partitioned_variable(
name=None,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=trainable,
partitioner=partitioner,
collections=collections)
return list(partitioned_var)
# pylint: enable=protected-access
|
|
# Copyright (c) 2009, Joseph Lisee
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of StatePy nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Joseph Lisee ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Joseph Lisee
# File: statepy/state.py
# STD Imports
import inspect
import types
# Project Imports
class Event(object):
"""
The action that caused a state transition, it has a type and any other
data you wish to tag along with it.
@type type: str
@ivar type: The type of the event, used in the state transition tables
"""
def __init__(self, etype = '', **kwargs):
"""
Initialize the event with its type and associated data
@type etype: str
@param etype: The type of the event, used in the state transition tables
"""
self.type = etype
def declareEventType(name):
"""
Defines an event type in a manner which will avoid collisions
It defines it in the following format: <file>:<line> <EVENT>
@note All spaces in the string will be replace with '_'
@rtype : str
@return: The new event type
"""
stack = inspect.stack()
try:
frame = stack[1][0]
line = frame.f_lineno
fileName = frame.f_code.co_filename
# Make .py vs .pyc files have the same event names
if fileName.endswith('.pyc'):
fileName = fileName[:-3] + '.py'
return '%s:%d %s' % (fileName, line, name.replace(' ', '_'))
finally:
del stack
class State(object):
"""
Basic state class, its provides empty implementation for all the needed
methods of a state
"""
def __init__(self, **statevars):
for name, statevar in statevars.iteritems():
setattr(self, name, statevar)
# TODO: check my own transition table to make sure none of the
# transitions are actual member functions
@staticmethod
def transitions():
"""
Returns a map of eventTypes -> resulting states, loopbacks are allowed
"""
return {}
# TODO: Config functionality not yet ported
# @staticmethod
# def getattr():
# """
# Returns the possible config values of the state
# """
# return set([])
def enter(self):
"""
Called when the state is entered, loopbacks don't count
"""
pass
def exit(self):
"""
Called when the state is exited, loopbacks don't count
"""
pass
def publish(self, eventType, event):
"""
Publish an event, with the owning Machine object as publisher
@warning: Only valid when the object is created by a Machine object
"""
raise
class End(State):
"""
State machine which demarks a valid end point for a state machine
Ensure that any "dead ends" (states with no out transitions) are actually
intended to be that way.
"""
def __init__(self, config = None, **kwargs):
# TODO: config functionality not yet ported
#State.__init__(self, config, **kwargs)
State.__init__(self, **kwargs)
class Branch(object):
"""
A marker class indication we branch the state machine
"""
def __init__(self, state, branchingEvent = None):
"""
@type state: ram.ai.state.State
@param state: The state to branch to
@type branchingEvent: Event
@param branchingEvent: The event that caused the branch, if any
"""
self.state = state
self.branchingEvent = branchingEvent
class Machine(object):
"""
An event based finite state machine.
This machine works with graph of statepy.State classes. This graph
represents a state machine. There can only be one current state at a time.
When events are injected into the state machine the currents states
transition table determines which state to advance to next.
@type _root: statepy.State
@ivar _root: The first state of the Machine
@type _currentState: statepy.State
@ivar _currentState: The current state which is processing events
@type _started: boolean
@ivar _started: The Machine will not process events unless started
@type _started: boolean
@ivar _started: True when the Machine is complete (ie. currentState = None)
@type _previousEvent: Event
@ivar _previousEvent: The last event injected into the state machine
@todo statevars, branches
"""
STATE_ENTERED = declareEventType('STATE_ENTERED')
STATE_EXITED = declareEventType('STATE_EXITED')
COMPLETE = declareEventType('COMPLETE')
def __init__(self, statevars = None):
"""
The constructor for the Machine class.
@type statevars: dict
@param statevars: A dictionary of the object variables given to states
"""
if statevars is None:
statevars = {}
# Set default instance values
self._root = None
self._currentState = None
self._started = False
self._complete = False
self._previousEvent = Event()
self._statevars = {}
self._startStatevars = {}
self._branches = {}
# Load up the arguments
self._statevars = statevars
def currentState(self):
return self._currentState
def start(self, startState, statevars = None):
"""
Starts or branches the state machine with the given state
If the given state is really a branch, it will branch to that state
instead.
@type startState: State
@param startState: The first state for the machine to enter
@type statevars: dict
@param statevars: An additional dictionary of variables for the State
"""
# Remove the previous startStatevars from our list of variables
for key in self._startStatevars.iterkeys():
del self._statevars[key]
if statevars is not None:
# Ensure there is no overlap
currentVars = set(self._statevars.keys())
newVars = set(statevars.keys())
intersection = currentVars.intersection(newVars)
if len(intersection) != 0:
msg = "ERROR: statevars already contains: %s" % interection
raise statepy.StatePyException(msg)
self._startStatevars = statevars
# Merge the statevars
self._statevars.update(statevars)
if Branch == type(startState):
# Determine if we are branching
branching = True
self._branchToState(startState.state, startState.branchingEvent)
else:
self._root = startState
self._started = True
self._complete = False
self._enterState(startState)
def stop(self):
"""
Exits the current state, and stops if from responding to any more
events. Also stops all branches
"""
if self._currentState is not None:
self._exitState()
self._started = False
self._previousEvent = Event
self._root = None
self._currentState = None
self._complete = False
for branch in self._branches.itervalues():
branch.stop()
self._branches = {}
def stopBranch(self, stateType):
"""
Stops just the desired branch, and its current type
"""
self._branches[stateType].stop()
del self._branches[stateType]
def injectEvent(self, rawEvent, _sendToBranches = False):
"""
Sends an event into the state machine
If currents states transition table has an entry for events of this
type this will cause a transition
@type event: Event or str
@param event: A new event for the state machine to process
@type _sendToBranches: bool
@param _sendToBranches: Use only for testing, injects events into
branched state machines
"""
# If the state we just entered transitions on same kind of event that
# caused the transition, we can be notified again with the same event!
# This check here prevents that event from causing an unwanted
# transition.
if rawEvent == self._previousEvent:
return
# Make sure the event is of the right class
if isinstance(rawEvent, Event):
event = rawEvent
else:
event = Event(rawEvent)
if not self._started:
raise Exception("Machine must be started")
transitionTable = self._currentState.transitions()
nextState = transitionTable.get(event.type, None)
if nextState is not None:
# Determine if we are branching
branching = False
if Branch == type(nextState):
branching = True
nextState = nextState.state
# Detemine if we are in a loopback
loopback = False
if nextState == type(self._currentState):
loopback = True
# For loops backs or branches we don't reenter, or exit from our
# state, just call the transition function
leaveState = False
if (not branching) and (not loopback):
leaveState = True
# We are leaving the current state
currentState = self._currentState
if leaveState:
self._exitState()
# Call the function for the transitions
transFunc = self._getTransitionFunc(event.type, currentState)
if transFunc is not None:
transFunc(event)
# Notify that we are entering the next state
if (not loopback) and (not branching):
# Create an instance of the next state's class
self._enterState(nextState)
elif branching:
self._branchToState(nextState, branchingEvent = event)
# Record previous event
self._previousEvent = event
if _sendToBranches:
for branch in self._branches.itervalues():
branch.injectEvent(event)
@property
def complete(self):
"""Returns true when """
return self._complete
def _enterState(self, newStateClass):
"""
Does all the house keeping when entering a new state
"""
# CONFIG LOOKUP USE TO HAPPEN HERE
# Create state instance from class, make sure to pass all subsystems
# along as well
newState = newStateClass(**self._statevars)
# Subscribe to every event of the desired type
# <EVENT SUBSCRIBTION USE TO HAPPEN HERE>
transitionTable = newState.transitions()
# Actual enter the state and record it as our new current state
self._currentState = newState
self._currentState.enter()
# Notify everyone we just entered the state
#fullClassName = '%s.%s' % (self._currentState.__class__.__module__,
# self._currentState.__class__.__name__)
# NOTIFY STATE BEING ENTERED
# If we are in a state with no way out, exit the state and mark ourself
# complete
if 0 == len(transitionTable):
self._exitState()
self._complete = True
# NOTIFY MACHINE BEING COMPLETE
def _exitState(self):
"""
Does all the house keeping for when you are exiting an old state
"""
self._currentState.exit()
# NOTIFY STATE BEING EXIT
#fullClassName = '%s.%s' % (self._currentState.__class__.__module__,
# self._currentState.__class__.__name__)
self._currentState = None
def _branchToState(self, nextState, branchingEvent = None):
if self._branches.has_key(nextState):
raise Exception("Already branched to this state")
# Create new state machine
branchedMachine = Machine(self._statevars)
# Start it up with the proper state
branchedMachine.start(nextState)
# Set the previous state to avoid unwanted transitions caused by
# the event that led us hear, triggering a transition in the newly
# created state machine
branchedMachine._previousEvent = branchingEvent
# Store new state machine
self._branches[nextState] = branchedMachine
def _getTransitionFunc(self, etype, obj):
"""
Determines which funtion during a transistaion between states
This uses the event type of the event which caused the transition to
determine which member funtion of the self._currentState to call.
"""
# Trim etype of namespace stuff
etype = etype.split(' ')[-1]
# Grab all member functions
members = inspect.getmembers(obj, inspect.ismethod)
# See if we have a matching method
matches = [func for name,func in members if name == etype]
# We found it
assert len(matches) < 2
if len(matches) > 0:
return matches[0]
@property
def branches(self):
return self._branches
@staticmethod
def writeStateGraph(fileobj, startState, ordered = False, noLoops = False):
"""
Write the graph of the state machine starting at the given state to
the fileobj.
@type fileobj: a file like object
@param fileobj: The object to write the result graph to (ie:
fileobject.write(graphtext))
@type startState: ram.ai.state.State
@param startState: The state to start the graph at
@type ordered: boolean
@param ordered: Whether or not to alphabetize the states
"""
fileobj.write("digraph aistate {\n")
stateTransitionList = []
traversedStates = []
Machine._traverse(startState, stateTransitionList, traversedStates,
noLoops)
# Sort list for determinism
if ordered:
stateTransitionList.sort()
# Output Labels in Simple format
traversedStates.sort(key = Machine._dottedName)
for state in traversedStates:
fullName = Machine._dottedName(state)
shortName = state.__name__
# Shape denots "end" states with a "Stop Sign" type shape
shape = 'ellipse'
if 0 == len(state.transitions()):
shape = 'doubleoctagon'
fileobj.write('%s [label=%s,shape=%s]\n' % \
(fullName, shortName, shape))
for item in stateTransitionList:
fileobj.write(item + "\n")
fileobj.write("}")
fileobj.flush() # Push data to file
@staticmethod
def _traverse(currentState,stateList,traversedList,noLoops=False):
if 0 == len(currentState.transitions()):
if not currentState in traversedList:
traversedList.append(currentState)
else:
for aiEvent,aiState in currentState.transitions().iteritems():
eventName = str(aiEvent).split(' ')[-1]
# Style is determine whether or not we are branching
style = "solid"
if type(aiState) is Branch:
style = "dotted"
aiState = aiState.state
# Determine state names
startName = Machine._dottedName(currentState)
endName = Machine._dottedName(aiState)
if (not noLoops) or (startName != endName):
strStruct = "%s -> %s [label=%s,style=%s]" % \
(startName, endName, eventName, style)
stateList.append(strStruct)
if not currentState in traversedList:
traversedList.append(currentState)
# Don't recuse on a state we have already seen
if not aiState in traversedList:
Machine._traverse(aiState, stateList,
traversedList, noLoops)
@staticmethod
def _dottedName(cls):
return cls.__module__.replace('.','_') + '_' + cls.__name__
|
|
from __future__ import print_function, division, absolute_import
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import numpy as np
import six.moves as sm
import imgaug as ia
# TODO add tests for:
# hooks is_activated
# hooks is_propagating
# hooks preprocess
# hooks postprocess
# HeatmapsOnImage.__init__()
# HeatmapsOnImage.get_arr()
# HeatmapsOnImage.to_uint8()
# HeatmapsOnImage.from_0to1()
# HeatmapsOnImage.copy()
# HeatmapsOnImage.deepcopy()
class TestHeatmapsOnImage_draw(unittest.TestCase):
def test_basic_functionality(self):
heatmaps_arr = np.float32([
[0.5, 0.0, 0.0, 0.5],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.5, 0.0, 0.0, 0.5],
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_drawn = heatmaps.draw()[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 1]
v2 = heatmaps_drawn[0, 0]
v3 = heatmaps_drawn[1, 1]
v1_coords = [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1),
(3, 2)]
v2_coords = [(0, 0), (0, 3), (3, 0), (3, 3)]
v3_coords = [(1, 1), (1, 2), (2, 1), (2, 2)]
for y, x in v1_coords:
assert np.allclose(heatmaps_drawn[y, x], v1)
for y, x in v2_coords:
assert np.allclose(heatmaps_drawn[y, x], v2)
for y, x in v3_coords:
assert np.allclose(heatmaps_drawn[y, x], v3)
def test_use_size_arg_with_different_shape_than_heatmap_arr_shape(self):
# size differs from heatmap array size
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_drawn = heatmaps.draw(size=(4, 4))[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 0]
v2 = heatmaps_drawn[0, -1]
for y in sm.xrange(4):
for x in sm.xrange(2):
assert np.allclose(heatmaps_drawn[y, x], v1)
for y in sm.xrange(4):
for x in sm.xrange(2, 4):
assert np.allclose(heatmaps_drawn[y, x], v2)
# TODO test other cmaps
class TestHeatmapsOnImage_draw_on_image(unittest.TestCase):
@property
def heatmaps(self):
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
return ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
def test_cmap_is_none(self):
heatmaps = self.heatmaps
image = np.uint8([
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, cmap=None)[0]
assert heatmaps_drawn.shape == (4, 4, 3)
assert np.all(heatmaps_drawn[0:4, 0:2, :] == 0)
assert (
np.all(heatmaps_drawn[0:4, 2:3, :] == 128)
or np.all(heatmaps_drawn[0:4, 2:3, :] == 127))
assert (
np.all(heatmaps_drawn[0:4, 3:4, :] == 255)
or np.all(heatmaps_drawn[0:4, 3:4, :] == 254))
def test_cmap_is_none_and_resize_is_image(self):
heatmaps = self.heatmaps
image = np.uint8([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(
image, alpha=0.5, resize="image", cmap=None)[0]
assert heatmaps_drawn.shape == (2, 2, 3)
assert np.all(heatmaps_drawn[0:2, 0, :] == 0)
assert (
np.all(heatmaps_drawn[0:2, 1, :] == 128)
or np.all(heatmaps_drawn[0:2, 1, :] == 127))
class TestHeatmapsOnImage_invert(unittest.TestCase):
@property
def heatmaps_arr(self):
return np.float32([
[0.0, 5.0, 10.0],
[-1.0, -2.0, 7.5]
])
@property
def expected_arr(self):
return np.float32([
[8.0, 3.0, -2.0],
[9.0, 10.0, 0.5]
])
def test_with_2d_input_array(self):
# (H, W)
heatmaps_arr = self.heatmaps_arr
expected = self.expected_arr
heatmaps = ia.HeatmapsOnImage(heatmaps_arr,
shape=(2, 3),
min_value=-2.0,
max_value=10.0)
assert np.allclose(heatmaps.get_arr(), heatmaps_arr)
assert np.allclose(heatmaps.invert().get_arr(), expected)
def test_with_3d_input_array(self):
# (H, W, 1)
heatmaps_arr = self.heatmaps_arr
expected = self.expected_arr
heatmaps = ia.HeatmapsOnImage(heatmaps_arr[..., np.newaxis],
shape=(2, 3),
min_value=-2.0,
max_value=10.0)
assert np.allclose(heatmaps.get_arr(),
heatmaps_arr[..., np.newaxis])
assert np.allclose(heatmaps.invert().get_arr(),
expected[..., np.newaxis])
class TestHeatmapsOnImage_pad(unittest.TestCase):
@property
def heatmaps(self):
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
return ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
def test_defaults(self):
heatmaps = self.heatmaps
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
])
)
def test_mode_constant_with_cval_050(self):
heatmaps = self.heatmaps
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4,
cval=0.5)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
])
)
def test_mode_edge(self):
heatmaps = self.heatmaps
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4,
mode="edge")
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]
])
)
class TestHeatmapsOnImage_pad_to_aspect_ratio(unittest.TestCase):
@property
def heatmaps(self):
heatmaps_arr = np.float32([
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0]
])
return ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
def test_square_ratio_with_default_mode_and_cval(self):
heatmaps = self.heatmaps
heatmaps_padded = heatmaps.pad_to_aspect_ratio(1.0)
assert heatmaps_padded.arr_0to1.shape == (3, 3, 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0]
])
)
def test_square_ratio_with_cval_050(self):
heatmaps = self.heatmaps
heatmaps_padded = heatmaps.pad_to_aspect_ratio(1.0, cval=0.5)
assert heatmaps_padded.arr_0to1.shape == (3, 3, 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.5]
])
)
def test_square_ratio_with_edge_mode(self):
heatmaps = self.heatmaps
heatmaps_padded = heatmaps.pad_to_aspect_ratio(1.0, mode="edge")
assert heatmaps_padded.arr_0to1.shape == (3, 3, 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0]
])
)
def test_wider_than_high_ratio_with_cval_010(self):
heatmaps = self.heatmaps
heatmaps_padded = heatmaps.pad_to_aspect_ratio(2.0, cval=0.1)
assert heatmaps_padded.arr_0to1.shape == (2, 4, 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 0.1],
[0.0, 0.0, 1.0, 0.1]
])
)
def test_higher_than_wide_ratio_with_cval_010(self):
heatmaps = self.heatmaps
heatmaps_padded = heatmaps.pad_to_aspect_ratio(0.25, cval=0.1)
assert heatmaps_padded.arr_0to1.shape == (12, 3, 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1]
])
)
class TestHeatmapsOnImage_avg_pool(unittest.TestCase):
def test_with_kernel_size_2(self):
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.avg_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 0.75],
[0.0, 0.75]])
)
class TestHeatmapsOnImage_max_pool(unittest.TestCase):
def test_with_kernel_size_2(self):
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.max_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 1.0],
[0.0, 1.0]])
)
class TestHeatmapsOnImage_resize(unittest.TestCase):
def test_resize_to_exact_shape(self):
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.resize((4, 4), interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (4, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.name == "float32"
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
def test_resize_to_twice_the_size(self):
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.resize(2.0, interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (2, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.name == "float32"
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
class TestHeatmapsOnImage_from_uint8(unittest.TestCase):
def test_3d_uint8_array(self):
hm = ia.HeatmapsOnImage.from_uint8(
np.uint8([
[0, 128, 255],
[255, 128, 0]
])[..., np.newaxis],
(20, 30, 3)
)
assert hm.shape == (20, 30, 3)
assert hm.arr_0to1.shape == (2, 3, 1)
assert np.allclose(hm.arr_0to1[..., 0], np.float32([
[0, 128/255, 1.0],
[1.0, 128/255, 0]
]))
def test_2d_uint8_array(self):
hm = ia.HeatmapsOnImage.from_uint8(
np.uint8([
[0, 128, 255],
[255, 128, 0]
]),
(20, 30, 3)
)
assert hm.shape == (20, 30, 3)
assert hm.arr_0to1.shape == (2, 3, 1)
assert np.allclose(hm.arr_0to1[..., 0], np.float32([
[0, 128/255, 1.0],
[1.0, 128/255, 0]
]))
def test_min_value_and_max_value(self):
# min_value, max_value
hm = ia.HeatmapsOnImage.from_uint8(
np.uint8([
[0, 128, 255],
[255, 128, 0]
])[..., np.newaxis],
(20, 30, 3),
min_value=-1.0,
max_value=2.0
)
assert hm.shape == (20, 30, 3)
assert hm.arr_0to1.shape == (2, 3, 1)
assert np.allclose(hm.arr_0to1[..., 0], np.float32([
[0, 128/255, 1.0],
[1.0, 128/255, 0]
]))
assert np.allclose(hm.min_value, -1.0)
assert np.allclose(hm.max_value, 2.0)
class TestHeatmapsOnImage_change_normalization(unittest.TestCase):
def test_increase_max_value(self):
# (0.0, 1.0) -> (0.0, 2.0)
arr = np.float32([
[0.0, 0.5, 1.0],
[1.0, 0.5, 0.0]
])
observed = ia.HeatmapsOnImage.change_normalization(
arr, (0.0, 1.0), (0.0, 2.0))
expected = np.float32([
[0.0, 1.0, 2.0],
[2.0, 1.0, 0.0]
])
assert np.allclose(observed, expected)
def test_decrease_min_and_max_value(self):
# (0.0, 1.0) -> (-1.0, 0.0)
arr = np.float32([
[0.0, 0.5, 1.0],
[1.0, 0.5, 0.0]
])
observed = ia.HeatmapsOnImage.change_normalization(
arr, (0.0, 1.0), (-1.0, 0.0))
expected = np.float32([
[-1.0, -0.5, 0.0],
[0.0, -0.5, -1.0]
])
assert np.allclose(observed, expected)
def test_increase_min_and_max_value__non_standard_source(self):
# (-1.0, 1.0) -> (1.0, 3.0)
arr = np.float32([
[-1.0, 0.0, 1.0],
[1.0, 0.0, -1.0]
])
observed = ia.HeatmapsOnImage.change_normalization(
arr, (-1.0, 1.0), (1.0, 3.0))
expected = np.float32([
[1.0, 2.0, 3.0],
[3.0, 2.0, 1.0]
])
assert np.allclose(observed, expected)
def test_value_ranges_given_as_heatmaps_on_image(self):
# (-1.0, 1.0) -> (1.0, 3.0)
# value ranges given as HeatmapsOnImage
arr = np.float32([
[-1.0, 0.0, 1.0],
[1.0, 0.0, -1.0]
])
source = ia.HeatmapsOnImage(
np.float32([[0.0]]), min_value=-1.0, max_value=1.0, shape=(1, 1, 3))
target = ia.HeatmapsOnImage(
np.float32([[1.0]]), min_value=1.0, max_value=3.0, shape=(1, 1, 3))
observed = ia.HeatmapsOnImage.change_normalization(arr, source, target)
expected = np.float32([
[1.0, 2.0, 3.0],
[3.0, 2.0, 1.0]
])
assert np.allclose(observed, expected)
|
|
#!/usr/bin/env python
'''
Oryol texture export functions
'''
import sys
import os
import platform
import subprocess
import tempfile
ProjectDirectory = os.path.dirname(os.path.abspath(__file__)) + '/..'
TexSrcDirectory = ProjectDirectory + '/data'
TexDstDirectory = ProjectDirectory + '/build/webpage'
# NOTE: PVRTexTools supports a lot more formats!
PVRFormats = ['PVRTC1_2', 'PVRTC1_4', 'PVRTC1_2_RGB', 'PVRTC1_4_RGB', 'PVRTC2_2', 'PVRTC2_4']
ETCFormats = ['ETC1', 'ETC2']
#-------------------------------------------------------------------------------
def error(msg) :
print "ERROR: {}".format(msg)
sys.exit(10)
#-------------------------------------------------------------------------------
def configure(projDir, texSrcDir, texDstDir) :
'''
Configure the directories of the texture export module
'''
global ProjectDirectory
global TexSrcDirectory
global TexDstDirectory
ProjectDirectory = projDir
TexSrcDirectory = texSrcDir
TexDstDirectory = texDstDir
#-------------------------------------------------------------------------------
def getToolsBinPath() :
path = os.path.dirname(os.path.abspath(__file__))
if platform.system() == 'Windows' :
path += '/win32/'
elif platform.system() == 'Darwin' :
path += '/osx/'
elif platform.system() == 'Linux' :
path += '/linux/'
else :
error("Unknown host system {}".format(platform.system()))
return path;
#-------------------------------------------------------------------------------
def ensureDstDirectory() :
if not os.path.exists(TexDstDirectory) :
os.makedirs(TexDstDirectory)
#-------------------------------------------------------------------------------
def toDDS(srcFilename, dstFilename, linearGamma, fmt, rgbFmt=None) :
'''
Convert a file to DDS format
'''
ensureDstDirectory()
ddsTool = getToolsBinPath() + 'nvcompress'
srcPath = TexSrcDirectory + '/' + srcFilename
dstPath = TexDstDirectory + '/' + dstFilename
print '=== toDDS: {} => {}:'.format(srcPath, dstPath)
cmdLine = [ddsTool, '-'+fmt]
if rgbFmt != None :
cmdLine.append('-rgbfmt')
cmdLine.append(rgbFmt)
if linearGamma :
cmdLine.append('-tolineargamma')
cmdLine.append(srcPath)
cmdLine.append(dstPath)
subprocess.call(args=cmdLine)
#-------------------------------------------------------------------------------
def toCubeDDS(srcDir, srcExt, dstFilename, linearGamma, fmt, rgbFmt=None) :
'''
Generate a cube map and convert to dds.
'''
ensureDstDirectory()
nvassemble = getToolsBinPath() + 'nvassemble'
ddsTool = getToolsBinPath() + 'nvcompress'
srcFiles = ['posx', 'negx', 'posy', 'negy', 'posz', 'negz']
dstPath = TexDstDirectory + '/' + dstFilename
print '=== toCubeDDS: {}/{}/[posx,negx,posy,negy,posz,negz].{} => {}'.format(TexSrcDirectory, srcDir, srcExt, dstPath)
# call nvassemble to generate an uncompressed cube map...
cmdLine = [nvassemble, '-cube']
for src in srcFiles :
cmdLine.append(TexSrcDirectory + '/' + srcDir + '/' + src + '.' + srcExt)
cmdLine.append('-o')
cmdLine.append(dstPath)
subprocess.call(args=cmdLine)
# ...and compress/convert to the desired format
cmdLine = [ddsTool, '-'+fmt]
if rgbFmt != None :
cmdLine.append('-rgbfmt')
cmdLine.append(rgbFmt)
if linearGamma :
cmdLine.append('-tolineargamma')
cmdLine.append(dstPath)
cmdLine.append(dstPath)
subprocess.call(args=cmdLine)
#-------------------------------------------------------------------------------
def toPVR(srcFilename, dstFilename, format) :
'''
Convert a file to PVR format
'''
if format not in PVRFormats :
error('invalid PVR texture format {}!'.format(format))
ensureDstDirectory()
pvrTool = getToolsBinPath() + 'PVRTexToolCLI'
srcPath = TexSrcDirectory + '/' + srcFilename
dstPath = TexDstDirectory + '/' + dstFilename
print '=== toPVR: {} => {}:'.format(srcPath, dstPath)
cmdLine = [pvrTool, '-i', srcPath, '-o', dstPath, '-square', '+', '-pot', '+', '-m', '-mfilter', 'cubic', '-f', format ]
subprocess.call(args=cmdLine)
#-------------------------------------------------------------------------------
def toCubePVR(srcDir, srcExt, dstFilename, format) :
'''
Generate a cube map and convert to PVR
'''
if format not in PVRFormats :
error('invalid PVR texture format {}!'.format(format))
ensureDstDirectory()
pvrTool = getToolsBinPath() + 'PVRTexToolCLI'
srcFiles = ['posx', 'negx', 'posy', 'negy', 'posz', 'negz']
dstPath = TexDstDirectory + '/' + dstFilename
print '=== toCubePVR: {}/{}/[posx,negx,posy,negy,posz,negz].{} => {}'.format(TexSrcDirectory, srcDir, srcExt, dstPath)
cmdLine = [pvrTool, '-i']
inputFiles = ''
for src in srcFiles :
inputFiles += TexSrcDirectory + '/' + srcDir + '/' + src + '.' + srcExt + ','
inputFiles = inputFiles[:-1]
cmdLine.append(inputFiles)
cmdLine.append('-o')
cmdLine.append(dstPath)
cmdLine.append('-cube')
cmdLine.append('-m')
cmdLine.append('-mfilter')
cmdLine.append('cubic')
cmdLine.append('-f')
cmdLine.append(format)
subprocess.call(args=cmdLine)
#-------------------------------------------------------------------------------
def toETC(srcFilename, dstFilename, format) :
'''
Convert a file to ETC2 in a KTX container file.
FIXME: alpha channel support
'''
if format not in ETCFormats :
error('invalid ETC texture format {}!'.format(format))
ensureDstDirectory()
tmpFilename, ext = os.path.splitext(dstFilename)
tmpFilename += '.ppm'
convTool = getToolsBinPath() + 'convert'
etcTool = getToolsBinPath() + 'etcpack'
srcPath = TexSrcDirectory + '/' + srcFilename
dstPath = TexDstDirectory + '/' + dstFilename
tmpPath = tempfile.gettempdir() + '/' + tmpFilename
print '=== toETC2: {} => {} => {}:'.format(srcPath, tmpPath, dstPath)
# first convert file to PPM format
subprocess.call(args=[convTool, srcPath, tmpPath])
cmd = [etcTool, tmpPath, TexDstDirectory, '-mipmaps', '-ktx', '-c']
if format == 'etc1' :
cmd.append('etc1')
else :
cmd.append('etc2')
subprocess.call(args=cmd)
os.unlink(tmpPath)
#-------------------------------------------------------------------------------
def exportSampleTextures() :
# default gamma 2.2
toDDS('lok256.jpg', 'lok_dxt1.dds', False, 'bc1')
toDDS('lok256.jpg', 'lok_dxt3.dds', False, 'bc2')
toDDS('lok256.jpg', 'lok_dxt5.dds', False, 'bc3')
toDDS('lok256.jpg', 'lok_bgra8.dds', False, 'rgb', 'bgra8')
toDDS('lok256.jpg', 'lok_rgba8.dds', False, 'rgb', 'rgba8')
toDDS('lok256.jpg', 'lok_bgr8.dds', False, 'rgb', 'bgr8')
toDDS('lok256.jpg', 'lok_rgb8.dds', False, 'rgb', 'rgb8')
toDDS('lok256.jpg', 'lok_argb4.dds', False, 'rgb', 'argb4')
toDDS('lok256.jpg', 'lok_abgr4.dds', False, 'rgb', 'abgr4')
toDDS('lok256.jpg', 'lok_rgb565.dds', False, 'rgb', 'rgb565')
toDDS('lok256.jpg', 'lok_bgr565.dds', False, 'rgb', 'bgr565')
toDDS('lok256.jpg', 'lok_argb1555.dds', False, 'rgb', 'argb1555')
toDDS('lok256.jpg', 'lok_abgr1555.dds', False, 'rgb', 'abgr1555')
toCubeDDS('RomeChurch', 'jpg', 'romechurch_dxt1.dds', False, 'bc1')
# linear gamma
toDDS('lok256.jpg', 'lok_linear_dxt1.dds', True, 'bc1')
toDDS('lok256.jpg', 'lok_linear_dxt3.dds', True, 'bc2')
toDDS('lok256.jpg', 'lok_linear_dxt5.dds', True, 'bc3')
toDDS('lok256.jpg', 'lok_linear_bgra8.dds', True, 'rgb', 'bgra8')
toDDS('lok256.jpg', 'lok_linear_rgba8.dds', True, 'rgb', 'rgba8')
toDDS('lok256.jpg', 'lok_linear_bgr8.dds', True, 'rgb', 'bgr8')
toDDS('lok256.jpg', 'lok_linear_rgb8.dds', True, 'rgb', 'rgb8')
toDDS('lok256.jpg', 'lok_linear_argb4.dds', True, 'rgb', 'argb4')
toDDS('lok256.jpg', 'lok_linear_abgr4.dds', True, 'rgb', 'abgr4')
toDDS('lok256.jpg', 'lok_linear_rgb565.dds', True, 'rgb', 'rgb565')
toDDS('lok256.jpg', 'lok_linear_bgr565.dds', True, 'rgb', 'bgr565')
toDDS('lok256.jpg', 'lok_linear_argb1555.dds', True, 'rgb', 'argb1555')
toDDS('lok256.jpg', 'lok_linear_abgr1555.dds', True, 'rgb', 'abgr1555')
toCubeDDS('RomeChurch', 'jpg', 'romechurch_linear_dxt1.dds', True, 'bc1')
# PVRTC
toPVR('lok256.jpg', 'lok_bpp2.pvr', 'PVRTC1_2')
toPVR('lok256.jpg', 'lok_bpp4.pvr', 'PVRTC1_4')
toCubePVR('RomeChurch', 'jpg', 'romechurch_bpp2.pvr', 'PVRTC1_2')
# ETC1/2
toETC('lok256.jpg', 'lok_etc1.ktx', 'ETC1')
toETC('lok256.jpg', 'lok_etc2.ktx', 'ETC2')
#-------------------------------------------------------------------------------
if __name__ == '__main__' :
print "{}".format(__file__)
exportSampleTextures()
|
|
import math
import json
import scoresheetsHtml
import pickle
import csv
################################################################################
# Printing Instructions
def printIntro():
'''
Prints program name and instructions for the user
'''
print('CubeToaster - Heat Generator')
print('Takes in a JSON or CSV file with the competition data and outputs an HTML website with all the score sheets for the competition sorted by heat number')
print('Type Ctrl-C or Ctrl-Z (whichever one works) to quit the program if something goes wrong.')
def printEnding():
'''
Prints instructions for what to do after everything is done
'''
print('GROUPS HAVE BEEN GENERATED')
print('1. Open printableGroups.csv to view group assignments.')
print('2. Make sure people with the same first name (or similar names) are not in the same group. You can edit the assignments in the csv file before generating scorecards and still have correct scorecards.')
print('3. To generate scorecards, run sheetGenerator.py and follow prompts')
################################################################################
# Input validation
def validateInt(prompt):
'''
Prompts users until they input an int
'''
while True:
try:
response = int(input(prompt).strip())
break
except:
continue
return response
def validateYesNo(prompt):
'''
Prompts users until they enter y or n
'''
response = ''
acceptedList = ['y', 'yes', 'n', 'no']
while not response in acceptedList:
response = input(prompt).strip().lower()
if response == 'y' or response == 'yes':
return 'y'
else:
return 'n'
def validateInputFile():
print('Fill out inputData.json (you can leave as many things blank as you want)')
print('There is a recommended number of groups already listed. You can change it if you want to.')
print('Everything is based on numGroups (changing numPeople or peoplePerGroup will not change the number of people in each group.)')
if validateYesNo('Type y when done. ') == 'y':
f = open('inputData.json', 'r')
inputData = json.loads(f.read())
f.close()
print()
return inputData
################################################################################
# Data Retrieval
def getJSONDataFile():
'''
Gets file name from user and reads in the JSON file
'''
fileName = input('Enter file name (json): ').strip()
if not fileName.endswith('.json'):
fileName = fileName + '.json'
f = open(fileName, 'r')
# read json data
fileData = json.loads(f.read())
f.close()
return fileData
def getCompEvents(header, eventsDict):
'''
Gets events list from header of csv file
'''
events = {}
for event in header:
if event in eventsDict:
events[event] = eventsDict[event]
return events
def getCSVDataFile():
'''
Gets file name from user
'''
fileName = input('Enter file name (csv): ').strip()
if not fileName.endswith('.csv'):
fileName = fileName + '.csv'
return fileName
def readCSVDataFile(fileName, eventsList):
'''
Read CSV file and make data structure
'''
# initialize data structure
fileData = {}
with open(fileName, 'r', newline = '') as input_file:
dataReader = csv.DictReader(input_file, delimiter=',', quotechar ="'")
compEventsDict = getCompEvents(dataReader.fieldnames, eventsList)
# predict cubecomps ID
predictedID = 1
for row in dataReader:
# use competitor's name as the key
personName = row['Name'].strip()
fileData[personName] = row
fileData[personName]['firstName'] = personName.split(' ')[0]
fileData[personName]['ID'] = str(predictedID)
fileData[personName]['Staff'] = 0
predictedID += 1
# delete unneeded data
del fileData[personName]['IP']
del fileData[personName]['Country']
del fileData[personName]['WCA ID']
del fileData[personName]['Status']
del fileData[personName]['Email']
del fileData[personName]['Birth Date']
del fileData[personName]['Guests']
del fileData[personName]['Gender']
return [fileData, compEventsDict]
def getStaffList(personList, fileType):
'''
Makes staff.txt with a list of all the competitors in the competition so
user can specify staff members
'''
with open('staff.txt', 'w') as f:
with open('competitorList.txt', 'w') as comp:
if fileType == 'json':
for person in personList:
print(person['name'], file=f)
print(person['name'], file=comp)
else:
for person in personList:
print(person, file=f)
print(person, file=comp)
print('Open staff.txt and delete anyone that is NOT on staff')
if validateYesNo('Type y when done. ') == 'y':
return readStaffList()
def getCompetitionData(jsonFileData):
'''
Parses JSON into a data structure
Returns (competitionId, compData)
'''
# let user choose competition name
compName = input('Input competition name (this is the name that will appear on all score cards): ')
jsonFileData['competitionId'] = compName
competitionId = jsonFileData['competitionId']
persons = {}
compData = {}
staffList = getStaffList(jsonFileData['persons'], 'json')
for person in jsonFileData['persons']:
# remove unnecessary data (WCA ID, country, gender, and dob)
del person['wcaId']
del person['countryId']
del person['gender']
del person['dob']
# make sure name is in title case
person['name'] = person['name'].title()
# initialize heat number
person['heat'] = 0
if person['name'] in staffList:
person['staff'] = 1
else:
person['staff'] = 0
# put person data into a dictionary with id number as key
persons[person['id']] = person
for event in jsonFileData['events']:
results = []
# replace id in events part of JSON with the person's data
for person in event['rounds'][0]['results']:
try:
results.append(persons[person['personId']].copy())
except:
print('POSSIBLE ERROR: Make sure all registered competitors are in competitors.txt')
results.sort(key=lambda x: (-x['staff'], x['name'].lower()))
event['rounds'][0]['results'] = results
compData[event['eventId']] = event
return (competitionId, compData)
def readStaffList():
staff = []
with open('staff.txt', 'r') as f:
for line in f:
staff.append(line.strip())
return staff
################################################################################
# Everything related to making/calculating heats
def calcNumHeats(compData, inputData, dataType):
'''
Gets number of heats for each event from user
recommended number of people per heat: ceil(1.5*numStations) to the nearest even number
'''
numHeatsPerEvent = {}
if dataType == 'json':
for event in compData[1]:
numPeople = len(compData[1][event]['rounds'][0]['results'])
if event == '333fm' or event == '333mbf':
numHeatsPerEvent[event] = 1
else:
numHeatsPerEvent[event] = inputData[event]['numGroups']
if numHeatsPerEvent[event] == 1:
print('There will be 1 group for {0} for {1} people'.format(event, numPeople))
else:
print('There will be {0} groups for {1} for {2} people'.format(numHeatsPerEvent[event], event, numPeople))
else:
for event in compData:
numPeople = compData[event]
if event == '333fm' or event == '333mbf':
numHeatsPerEvent[event] = 1
else:
numHeatsPerEvent[event] = inputData[event]['numGroups']
if numHeatsPerEvent[event] == 1:
print('There will be 1 group for {0} for {1} people'.format(event, numPeople))
else:
print('There will be {0} groups for {1} for {2} people'.format(numHeatsPerEvent[event], event, numPeople))
return numHeatsPerEvent
def easyHeats(compData, numHeatsPerEvent, numPeopleDict, dataType):
'''
Goes straight down list of competitors from 1 to numPeopleInHeats
'''
''' TODO: later
staff = False
# assumes that number of people on staff is always less than 60% of the number of people in 3x3x3
if len(staffList) < len(compData[1][event]['rounds'][0]['results']) * 0.6:
staff = True
'''
# print("numHeatsPerEvent", numHeatsPerEvent)
if dataType == 'json':
for event in numHeatsPerEvent:
for i, person in enumerate(compData[1][event]['rounds'][0]['results']):
if (numHeatsPerEvent[event] != 0):
person['heat'] = (i % numHeatsPerEvent[event]) + 1
else:
for event in numHeatsPerEvent:
# event has more than 1 heat
if numHeatsPerEvent[event] > 1:
participantNumber = 0
for competitor in compData:
# competitor is registered for this event
if event in competitor:
competitor[event] = (participantNumber % numHeatsPerEvent[event]) + 1
participantNumber += 1
return compData
def makeCompetitorList(jsonFile):
'''
Creates txt file with all of the competitors at the competition
'''
# print to file
with open('competitors.txt', 'w') as f:
for person in jsonFile['persons']:
print(person['name'], file=f)
def addStaffData(compData, staffList):
for person in staffList:
compData[person]['Staff'] = 1
def numPeoplePerEvent(compData, eventsList):
# initialize dictionary
peoplePerEvent = {}
for event in eventsList:
peoplePerEvent[event] = 0
# count number of people in each event
for person in compData:
for event in eventsList:
if compData[person][event] == '1':
peoplePerEvent[event] += 1
else:
del compData[person][event]
return peoplePerEvent
################################################################################
# Create output files
def createInputFile(compData, dataType):
'''
creates json file for users to edit number of people per heat and cutoffs
'''
data = {}
fastEvents = ['222', '333', '333oh', 'skewb', 'pyram']
numStations = validateInt('How many timing stations will you be using per stage? ')
if dataType == 'json':
for event in compData[1]:
inputData = {}
numPeople = len(compData[1][event]['rounds'][0]['results'])
if event in fastEvents:
recommendNumHeats = round(numPeople / (1.7 * numStations))
else:
recommendNumHeats = round(numPeople / (1.5 * numStations))
if recommendNumHeats < 1:
recommendNumHeats = 1
inputData['numPeople'] = numPeople
inputData['numGroups'] = recommendNumHeats
inputData['cutoff'] = ''
inputData['timeLimit'] = ''
inputData['peoplePerGroup'] = numPeople / recommendNumHeats
# inputData['usePsychSheet?'] = 'no'
data[event] = inputData
else:
for event in compData:
inputData = {}
numPeople = compData[event]
if event in fastEvents:
recommendNumHeats = round(numPeople / (1.7 * numStations))
else:
recommendNumHeats = round(numPeople / (1.5 * numStations))
if recommendNumHeats < 1:
recommendNumHeats = 1
inputData['numPeople'] = numPeople
inputData['numGroups'] = recommendNumHeats
inputData['cutoff'] = ''
inputData['timeLimit'] = ''
inputData['peoplePerGroup'] = numPeople / recommendNumHeats
# inputData['usePsychSheet?'] = 'no'
data[event] = inputData
with open('inputData.json', 'w') as f:
print(json.dumps(data, indent=4), file=f)
def makePrintableHeatSheet(assignedHeats, inputFile, eventsDict, dataType):
'''
Gets all the heats for each competitor and turns it into a printable format
Makes a list of [name, [list of events with heat numbers]]
sorted by name (with events sorted by alphabetical order)
outputs:
file with everyone's heat numbers on it
file with everyone's names on it (for use in staff stuff later)
'''
if dataType == 'json':
# make list of dictionaries with person name and all the events
competitorHeats = []
for person in inputFile['persons']:
tempDict = {}
tempDict['name'] = person['name']
for event in assignedHeats[1]:
for personData in assignedHeats[1][event]['rounds'][0]['results']:
if person['name'] == personData['name']:
heatNum = personData['heat']
tempDict[event] = heatNum
if tempDict not in competitorHeats:
competitorHeats.append(tempDict)
# make competitorHeats sortable by event name
printableHeats = []
for competitor in competitorHeats:
tempList = []
for key, value in competitor.items():
if key != 'name':
tempTuple = ()
tempTuple = (key, value)
tempList.append(tempTuple)
printableHeats.append([competitor['name'], tempList])
for person in printableHeats:
person[1].sort(key=lambda x: x[0])
printableHeats.sort()
# sort by first name
competitorHeats.sort(key=lambda x: x['name'])
# print heat sheet to csv file
with open('printableGroups.csv', 'w', newline='') as f:
columnNames = ['name'] + list(eventsDict.keys())
heatWriter = csv.DictWriter(f, fieldnames=columnNames, delimiter=',')
heatWriter.writeheader()
for person in competitorHeats:
heatWriter.writerow(person)
# REMOVE WHEN CUBECOMPS TAKES JSON STUFF
newIDs = {}
newNum = 1
for person in printableHeats:
newIDs[person[0]] = str(newNum)
newNum += 1
with open('testCompetitorID.txt', 'w') as f:
for person in printableHeats:
print(person[0], newIDs[person[0]], file=f)
return newIDs
else:
# sort by first name
assignedHeats.sort(key=lambda x: x['Name'])
# REMOVE WHEN CUBECOMPS TAKES JSON STUFF
newIDs = {}
newNum = 1
for person in assignedHeats:
newIDs[person['Name']] = str(person['ID'])
with open('testCompetitorID.txt', 'w') as f:
for person in assignedHeats:
print(person, newIDs[person['Name']], file=f)
# print heats by person
with open('printableGroups.txt', 'w', newline="") as f:
for person in assignedHeats:
print(person['Name'], file=f)
for event in eventsDict:
if event in person:
print(event, '-', person[event], file=f)
print(file=f)
# print heat sheet to csv file
with open('printableGroups.csv', 'w', newline='') as f:
columnNames = ['Name', 'ID'] + list(eventsDict.keys())
heatWriter = csv.DictWriter(f, fieldnames=columnNames, delimiter=',')
heatWriter.writeheader()
for person in assignedHeats:
del person['firstName']
del person['Staff']
# del person['ID']
heatWriter.writerow(person)
return newIDs
################################################################################
def jsonHeats(allEventsDict, dataType):
jsonFileData = getJSONDataFile()
compData = getCompetitionData(jsonFileData)
# get user input to make heats
createInputFile(compData, dataType)
print()
inputData = validateInputFile()
# figure out how many heats there will be for each event
numHeatsPerEvent = calcNumHeats(compData, inputData, dataType)
# assign heats
assignedHeats = easyHeats(compData, numHeatsPerEvent, None, dataType)
# make output files for heats
# CHANGE WHEN CUBECOMPS TAKES JSON STUFF
newIDs = makePrintableHeatSheet(assignedHeats, jsonFileData, allEventsDict, dataType)
# Save variables to files for sheetGenerator.py to read. Done with help from http://stackoverflow.com/questions/6568007/how-do-i-save-and-restore-multiple-variables-in-python
with open('objs.pickle', 'wb') as f:
pickle.dump([assignedHeats[0], allEventsDict, inputData, newIDs, dataType], f)
def csvHeats(allEventsDict, dataType):
fileName = getCSVDataFile()
[compData, compEventsDict] = readCSVDataFile(fileName, allEventsDict)
# let user choose competition name
compName = input('Input competition name (this is the name that will appear on all score cards): ')
# get list of staff members
staffList = getStaffList(compData, dataType)
addStaffData(compData, staffList)
# figure out how many people are in each event
numPeopleDict = numPeoplePerEvent(compData, compEventsDict)
# print("numPeopleDict:", numPeopleDict)
# get user input to make heats
createInputFile(numPeopleDict, dataType)
print()
inputData = validateInputFile()
# figure out how many heats there will be for each event
numHeatsPerEvent = calcNumHeats(numPeopleDict, inputData, dataType)
# convert compData into list of dictionaries so it is sortable
compDataList = []
for person in compData:
compDataList.append(compData[person])
# sort data so that staff members are first
compDataList.sort(key=lambda x: (-x['Staff'], x['firstName'].lower()))
# assign heats
assignedHeats = easyHeats(compDataList, numHeatsPerEvent, numPeopleDict, dataType)
# make output files for heats
# CHANGE WHEN CUBECOMPS TAKES JSON STUFF
newIDs = makePrintableHeatSheet(assignedHeats, fileName, allEventsDict, dataType)
# Save variables to files for sheetGenerator.py to read. Done with help from http://stackoverflow.com/questions/6568007/how-do-i-save-and-restore-multiple-variables-in-python
with open('objs.pickle', 'wb') as f:
pickle.dump([compName, allEventsDict, inputData, newIDs, dataType], f)
################################################################################
# Main
def main():
printIntro()
print()
allEventsDict = {"222" : "2x2 Cube",
"333" : "Rubik's Cube",
"333oh" : "Rubik's Cube: One-Handed",
"333bf" : "Rubik's Cube: Blindfolded",
"333fm" : "Rubik's Cube: Fewest moves",
"333ft" : "Rubik's Cube: With feet",
"333mbf": "Rubik's Cube: Multiple Blindfolded",
"444" : "4x4 Cube",
"444bf" : "4x4 Cube: Blindfolded",
"555" : "5x5 Cube",
"555bf" : "5x5 Cube: Blindfolded",
"666" : "6x6 Cube",
"777" : "7x7 Cube",
"clock" : "Rubik's Clock",
"minx" : "Megaminx",
"pyram" : "Pyraminx",
"skewb" : "Skewb",
"sq1" : "Square-1"}
if validateYesNo('Are you using a json file? (y or n) ') == 'y':
dataType = 'json'
jsonHeats(allEventsDict, dataType)
else:
dataType = 'csv'
csvHeats(allEventsDict, dataType)
print()
printEnding()
if __name__ == '__main__':
main()
|
|
"""Config flow to configure homekit_controller."""
import os
import json
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
from .const import DOMAIN, KNOWN_DEVICES
from .connection import get_bridge_information, get_accessory_name
HOMEKIT_IGNORE = ["Home Assistant Bridge"]
HOMEKIT_DIR = ".homekit"
PAIRING_FILE = "pairing.json"
_LOGGER = logging.getLogger(__name__)
def load_old_pairings(hass):
"""Load any old pairings from on-disk json fragments."""
old_pairings = {}
data_dir = os.path.join(hass.config.path(), HOMEKIT_DIR)
pairing_file = os.path.join(data_dir, PAIRING_FILE)
# Find any pairings created with in HA 0.85 / 0.86
if os.path.exists(pairing_file):
with open(pairing_file) as pairing_file:
old_pairings.update(json.load(pairing_file))
# Find any pairings created in HA <= 0.84
if os.path.exists(data_dir):
for device in os.listdir(data_dir):
if not device.startswith("hk-"):
continue
alias = device[3:]
if alias in old_pairings:
continue
with open(os.path.join(data_dir, device)) as pairing_data_fp:
old_pairings[alias] = json.load(pairing_data_fp)
return old_pairings
@callback
def find_existing_host(hass, serial):
"""Return a set of the configured hosts."""
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.data["AccessoryPairingID"] == serial:
return entry
@config_entries.HANDLERS.register(DOMAIN)
class HomekitControllerFlowHandler(config_entries.ConfigFlow):
"""Handle a HomeKit config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the homekit_controller flow."""
import homekit # pylint: disable=import-error
self.model = None
self.hkid = None
self.devices = {}
self.controller = homekit.Controller()
self.finish_pairing = None
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
errors = {}
if user_input is not None:
key = user_input["device"]
self.hkid = self.devices[key]["id"]
self.model = self.devices[key]["md"]
return await self.async_step_pair()
all_hosts = await self.hass.async_add_executor_job(self.controller.discover, 5)
self.devices = {}
for host in all_hosts:
status_flags = int(host["sf"])
paired = not status_flags & 0x01
if paired:
continue
self.devices[host["name"]] = host
if not self.devices:
return self.async_abort(reason="no_devices")
return self.async_show_form(
step_id="user",
errors=errors,
data_schema=vol.Schema(
{vol.Required("device"): vol.In(self.devices.keys())}
),
)
async def async_step_zeroconf(self, discovery_info):
"""Handle a discovered HomeKit accessory.
This flow is triggered by the discovery component.
"""
# Normalize properties from discovery
# homekit_python has code to do this, but not in a form we can
# easily use, so do the bare minimum ourselves here instead.
properties = {
key.lower(): value for (key, value) in discovery_info["properties"].items()
}
# The hkid is a unique random number that looks like a pairing code.
# It changes if a device is factory reset.
hkid = properties["id"]
model = properties["md"]
name = discovery_info["name"].replace("._hap._tcp.local.", "")
status_flags = int(properties["sf"])
paired = not status_flags & 0x01
_LOGGER.debug("Discovered device %s (%s - %s)", name, model, hkid)
# pylint: disable=unsupported-assignment-operation
self.context["hkid"] = hkid
self.context["title_placeholders"] = {"name": name}
# If multiple HomekitControllerFlowHandler end up getting created
# for the same accessory dont let duplicates hang around
active_flows = self._async_in_progress()
if any(hkid == flow["context"]["hkid"] for flow in active_flows):
return self.async_abort(reason="already_in_progress")
# The configuration number increases every time the characteristic map
# needs updating. Some devices use a slightly off-spec name so handle
# both cases.
try:
config_num = int(properties["c#"])
except KeyError:
_LOGGER.warning(
"HomeKit device %s: c# not exposed, in violation of spec", hkid
)
config_num = None
if paired:
if hkid in self.hass.data.get(KNOWN_DEVICES, {}):
# The device is already paired and known to us
# According to spec we should monitor c# (config_num) for
# changes. If it changes, we check for new entities
conn = self.hass.data[KNOWN_DEVICES][hkid]
if conn.config_num != config_num:
_LOGGER.debug(
"HomeKit info %s: c# incremented, refreshing entities", hkid
)
self.hass.async_create_task(
conn.async_refresh_entity_map(config_num)
)
return self.async_abort(reason="already_configured")
old_pairings = await self.hass.async_add_executor_job(
load_old_pairings, self.hass
)
if hkid in old_pairings:
return await self.async_import_legacy_pairing(
properties, old_pairings[hkid]
)
# Device is paired but not to us - ignore it
_LOGGER.debug("HomeKit device %s ignored as already paired", hkid)
return self.async_abort(reason="already_paired")
# Devices in HOMEKIT_IGNORE have native local integrations - users
# should be encouraged to use native integration and not confused
# by alternative HK API.
if model in HOMEKIT_IGNORE:
return self.async_abort(reason="ignored_model")
# Device isn't paired with us or anyone else.
# But we have a 'complete' config entry for it - that is probably
# invalid. Remove it automatically.
existing = find_existing_host(self.hass, hkid)
if existing:
await self.hass.config_entries.async_remove(existing.entry_id)
self.model = model
self.hkid = hkid
# We want to show the pairing form - but don't call async_step_pair
# directly as it has side effects (will ask the device to show a
# pairing code)
return self._async_step_pair_show_form()
async def async_import_legacy_pairing(self, discovery_props, pairing_data):
"""Migrate a legacy pairing to config entries."""
from homekit.controller.ip_implementation import IpPairing
hkid = discovery_props["id"]
existing = find_existing_host(self.hass, hkid)
if existing:
_LOGGER.info(
(
"Legacy configuration for homekit accessory %s"
"not loaded as already migrated"
),
hkid,
)
return self.async_abort(reason="already_configured")
_LOGGER.info(
(
"Legacy configuration %s for homekit"
"accessory migrated to config entries"
),
hkid,
)
pairing = IpPairing(pairing_data)
return await self._entry_from_accessory(pairing)
async def async_step_pair(self, pair_info=None):
"""Pair with a new HomeKit accessory."""
import homekit # pylint: disable=import-error
# If async_step_pair is called with no pairing code then we do the M1
# phase of pairing. If this is successful the device enters pairing
# mode.
# If it doesn't have a screen then the pin is static.
# If it has a display it will display a pin on that display. In
# this case the code is random. So we have to call the start_pairing
# API before the user can enter a pin. But equally we don't want to
# call start_pairing when the device is discovered, only when they
# click on 'Configure' in the UI.
# start_pairing will make the device show its pin and return a
# callable. We call the callable with the pin that the user has typed
# in.
errors = {}
if pair_info:
code = pair_info["pairing_code"]
try:
await self.hass.async_add_executor_job(self.finish_pairing, code)
pairing = self.controller.pairings.get(self.hkid)
if pairing:
return await self._entry_from_accessory(pairing)
errors["pairing_code"] = "unable_to_pair"
except homekit.AuthenticationError:
# PairSetup M4 - SRP proof failed
# PairSetup M6 - Ed25519 signature verification failed
# PairVerify M4 - Decryption failed
# PairVerify M4 - Device not recognised
# PairVerify M4 - Ed25519 signature verification failed
errors["pairing_code"] = "authentication_error"
except homekit.UnknownError:
# An error occurred on the device whilst performing this
# operation.
errors["pairing_code"] = "unknown_error"
except homekit.MaxPeersError:
# The device can't pair with any more accessories.
errors["pairing_code"] = "max_peers_error"
except homekit.AccessoryNotFoundError:
# Can no longer find the device on the network
return self.async_abort(reason="accessory_not_found_error")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Pairing attempt failed with an unhandled exception")
errors["pairing_code"] = "pairing_failed"
start_pairing = self.controller.start_pairing
try:
self.finish_pairing = await self.hass.async_add_executor_job(
start_pairing, self.hkid, self.hkid
)
except homekit.BusyError:
# Already performing a pair setup operation with a different
# controller
errors["pairing_code"] = "busy_error"
except homekit.MaxTriesError:
# The accessory has received more than 100 unsuccessful auth
# attempts.
errors["pairing_code"] = "max_tries_error"
except homekit.UnavailableError:
# The accessory is already paired - cannot try to pair again.
return self.async_abort(reason="already_paired")
except homekit.AccessoryNotFoundError:
# Can no longer find the device on the network
return self.async_abort(reason="accessory_not_found_error")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Pairing attempt failed with an unhandled exception")
errors["pairing_code"] = "pairing_failed"
return self._async_step_pair_show_form(errors)
def _async_step_pair_show_form(self, errors=None):
return self.async_show_form(
step_id="pair",
errors=errors or {},
data_schema=vol.Schema(
{vol.Required("pairing_code"): vol.All(str, vol.Strip)}
),
)
async def _entry_from_accessory(self, pairing):
"""Return a config entry from an initialized bridge."""
# The bulk of the pairing record is stored on the config entry.
# A specific exception is the 'accessories' key. This is more
# volatile. We do cache it, but not against the config entry.
# So copy the pairing data and mutate the copy.
pairing_data = pairing.pairing_data.copy()
# Use the accessories data from the pairing operation if it is
# available. Otherwise request a fresh copy from the API.
# This removes the 'accessories' key from pairing_data at
# the same time.
accessories = pairing_data.pop("accessories", None)
if not accessories:
accessories = await self.hass.async_add_executor_job(
pairing.list_accessories_and_characteristics
)
bridge_info = get_bridge_information(accessories)
name = get_accessory_name(bridge_info)
return self.async_create_entry(title=name, data=pairing_data)
|
|
import pickle
from collections import Counter
import shutil
import tempfile
import copy
import numpy as np
import os
import sys
import time
import unittest
from unittest.mock import patch
import ray
from ray.rllib import _register_all
from ray import tune
from ray.tune import (DurableTrainable, Trainable, TuneError, Stopper,
EarlyStopping, run)
from ray.tune import register_env, register_trainable, run_experiments
from ray.tune.durable_trainable import durable
from ray.tune.schedulers import (TrialScheduler, FIFOScheduler,
AsyncHyperBandScheduler)
from ray.tune.stopper import MaximumIterationStopper, TrialPlateauStopper
from ray.tune.sync_client import CommandBasedClient
from ray.tune.trial import Trial
from ray.tune.result import (TIMESTEPS_TOTAL, DONE, HOSTNAME, NODE_IP, PID,
EPISODES_TOTAL, TRAINING_ITERATION,
TIMESTEPS_THIS_ITER, TIME_THIS_ITER_S,
TIME_TOTAL_S, TRIAL_ID, EXPERIMENT_TAG)
from ray.tune.logger import Logger
from ray.tune.experiment import Experiment
from ray.tune.resources import Resources
from ray.tune.suggest import BasicVariantGenerator, grid_search
from ray.tune.suggest.hyperopt import HyperOptSearch
from ray.tune.suggest.ax import AxSearch
from ray.tune.suggest._mock import _MockSuggestionAlgorithm
from ray.tune.utils import (flatten_dict, get_pinned_object,
pin_in_object_store)
from ray.tune.utils.mock import mock_storage_client, MOCK_REMOTE_DIR
class TrainableFunctionApiTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4, num_gpus=0, object_store_memory=150 * 1024 * 1024)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
shutil.rmtree(self.tmpdir)
def checkAndReturnConsistentLogs(self, results, sleep_per_iter=None):
"""Checks logging is the same between APIs.
Ignore "DONE" for logging but checks that the
scheduler is notified properly with the last result.
"""
class_results = copy.deepcopy(results)
function_results = copy.deepcopy(results)
class_output = []
function_output = []
scheduler_notif = []
class MockScheduler(FIFOScheduler):
def on_trial_complete(self, runner, trial, result):
scheduler_notif.append(result)
class ClassAPILogger(Logger):
def on_result(self, result):
class_output.append(result)
class FunctionAPILogger(Logger):
def on_result(self, result):
function_output.append(result)
class _WrappedTrainable(Trainable):
def setup(self, config):
del config
self._result_iter = copy.deepcopy(class_results)
def step(self):
if sleep_per_iter:
time.sleep(sleep_per_iter)
res = self._result_iter.pop(0) # This should not fail
if not self._result_iter: # Mark "Done" for last result
res[DONE] = True
return res
def _function_trainable(config, reporter):
for result in function_results:
if sleep_per_iter:
time.sleep(sleep_per_iter)
reporter(**result)
class_trainable_name = "class_trainable"
register_trainable(class_trainable_name, _WrappedTrainable)
[trial1] = run(
_function_trainable,
loggers=[FunctionAPILogger],
raise_on_failed_trial=False,
scheduler=MockScheduler()).trials
[trial2] = run(
class_trainable_name,
loggers=[ClassAPILogger],
raise_on_failed_trial=False,
scheduler=MockScheduler()).trials
trials = [trial1, trial2]
# Ignore these fields
NO_COMPARE_FIELDS = {
HOSTNAME,
NODE_IP,
TRIAL_ID,
EXPERIMENT_TAG,
PID,
TIME_THIS_ITER_S,
TIME_TOTAL_S,
DONE, # This is ignored because FunctionAPI has different handling
"timestamp",
"time_since_restore",
"experiment_id",
"date",
}
self.assertEqual(len(class_output), len(results))
self.assertEqual(len(function_output), len(results))
def as_comparable_result(result):
return {
k: v
for k, v in result.items() if k not in NO_COMPARE_FIELDS
}
function_comparable = [
as_comparable_result(result) for result in function_output
]
class_comparable = [
as_comparable_result(result) for result in class_output
]
self.assertEqual(function_comparable, class_comparable)
self.assertEqual(sum(t.get(DONE) for t in scheduler_notif), 2)
self.assertEqual(
as_comparable_result(scheduler_notif[0]),
as_comparable_result(scheduler_notif[1]))
# Make sure the last result is the same.
self.assertEqual(
as_comparable_result(trials[0].last_result),
as_comparable_result(trials[1].last_result))
return function_output, trials
def testPinObject(self):
X = pin_in_object_store("hello")
@ray.remote
def f():
return get_pinned_object(X)
self.assertEqual(ray.get(f.remote()), "hello")
def testFetchPinned(self):
X = pin_in_object_store("hello")
def train(config, reporter):
get_pinned_object(X)
reporter(timesteps_total=100, done=True)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 100)
def testRegisterEnv(self):
register_env("foo", lambda: None)
self.assertRaises(TypeError, lambda: register_env("foo", 2))
def testRegisterEnvOverwrite(self):
def train(config, reporter):
reporter(timesteps_total=100, done=True)
def train2(config, reporter):
reporter(timesteps_total=200, done=True)
register_trainable("f1", train)
register_trainable("f1", train2)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 200)
def testRegisterTrainable(self):
def train(config, reporter):
pass
class A:
pass
class B(Trainable):
pass
register_trainable("foo", train)
Experiment("test", train)
register_trainable("foo", B)
Experiment("test", B)
self.assertRaises(TypeError, lambda: register_trainable("foo", B()))
self.assertRaises(TuneError, lambda: Experiment("foo", B()))
self.assertRaises(TypeError, lambda: register_trainable("foo", A))
self.assertRaises(TypeError, lambda: Experiment("foo", A))
def testTrainableCallable(self):
def dummy_fn(config, reporter, steps):
reporter(timesteps_total=steps, done=True)
from functools import partial
steps = 500
register_trainable("test", partial(dummy_fn, steps=steps))
[trial] = run_experiments({
"foo": {
"run": "test",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], steps)
[trial] = tune.run(partial(dummy_fn, steps=steps)).trials
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], steps)
def testBuiltInTrainableResources(self):
os.environ["TUNE_PLACEMENT_GROUP_AUTO_DISABLED"] = "1"
class B(Trainable):
@classmethod
def default_resource_request(cls, config):
return Resources(cpu=config["cpu"], gpu=config["gpu"])
def step(self):
return {"timesteps_this_iter": 1, "done": True}
register_trainable("B", B)
def f(cpus, gpus, queue_trials):
return run_experiments(
{
"foo": {
"run": "B",
"config": {
"cpu": cpus,
"gpu": gpus,
},
}
},
queue_trials=queue_trials)[0]
# Should all succeed
self.assertEqual(f(0, 0, False).status, Trial.TERMINATED)
self.assertEqual(f(1, 0, True).status, Trial.TERMINATED)
self.assertEqual(f(1, 0, True).status, Trial.TERMINATED)
# Too large resource request
self.assertRaises(TuneError, lambda: f(100, 100, False))
self.assertRaises(TuneError, lambda: f(0, 100, False))
self.assertRaises(TuneError, lambda: f(100, 0, False))
# TODO(ekl) how can we test this is queued (hangs)?
# f(100, 0, True)
def testRewriteEnv(self):
def train(config, reporter):
reporter(timesteps_total=1)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
"env": "CartPole-v0",
}
})
self.assertEqual(trial.config["env"], "CartPole-v0")
def testConfigPurity(self):
def train(config, reporter):
assert config == {"a": "b"}, config
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"config": {
"a": "b"
},
}
})
def testLogdir(self):
def train(config, reporter):
assert os.path.join(ray._private.utils.get_user_temp_dir(),
"logdir", "foo") in os.getcwd(), os.getcwd()
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": os.path.join(
ray._private.utils.get_user_temp_dir(), "logdir"),
"config": {
"a": "b"
},
}
})
def testLogdirStartingWithTilde(self):
local_dir = "~/ray_results/local_dir"
def train(config, reporter):
cwd = os.getcwd()
assert cwd.startswith(os.path.expanduser(local_dir)), cwd
assert not cwd.startswith("~"), cwd
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": local_dir,
"config": {
"a": "b"
},
}
})
def testLongFilename(self):
def train(config, reporter):
assert os.path.join(ray._private.utils.get_user_temp_dir(),
"logdir", "foo") in os.getcwd(), os.getcwd()
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": os.path.join(
ray._private.utils.get_user_temp_dir(), "logdir"),
"config": {
"a" * 50: tune.sample_from(lambda spec: 5.0 / 7),
"b" * 50: tune.sample_from(lambda spec: "long" * 40),
},
}
})
def testBadParams(self):
def f():
run_experiments({"foo": {}})
self.assertRaises(TuneError, f)
def testBadParams2(self):
def f():
run_experiments({
"foo": {
"run": "asdf",
"bah": "this param is not allowed",
}
})
self.assertRaises(TuneError, f)
def testBadParams3(self):
def f():
run_experiments({
"foo": {
"run": grid_search("invalid grid search"),
}
})
self.assertRaises(TuneError, f)
def testBadParams4(self):
def f():
run_experiments({
"foo": {
"run": "asdf",
}
})
self.assertRaises(TuneError, f)
def testBadParams5(self):
def f():
run_experiments({"foo": {"run": "PPO", "stop": {"asdf": 1}}})
self.assertRaises(TuneError, f)
def testBadParams6(self):
def f():
run_experiments({
"foo": {
"run": "PPO",
"resources_per_trial": {
"asdf": 1
}
}
})
self.assertRaises(TuneError, f)
def testBadStoppingReturn(self):
def train(config, reporter):
reporter()
register_trainable("f1", train)
def f():
run_experiments({
"foo": {
"run": "f1",
"stop": {
"time": 10
},
}
})
self.assertRaises(TuneError, f)
def testNestedStoppingReturn(self):
def train(config, reporter):
for i in range(10):
reporter(test={"test1": {"test2": i}})
with self.assertRaises(TuneError):
[trial] = tune.run(
train, stop={
"test": {
"test1": {
"test2": 6
}
}
}).trials
[trial] = tune.run(train, stop={"test/test1/test2": 6}).trials
self.assertEqual(trial.last_result["training_iteration"], 7)
def testStoppingFunction(self):
def train(config, reporter):
for i in range(10):
reporter(test=i)
def stop(trial_id, result):
return result["test"] > 6
[trial] = tune.run(train, stop=stop).trials
self.assertEqual(trial.last_result["training_iteration"], 8)
def testStoppingMemberFunction(self):
def train(config, reporter):
for i in range(10):
reporter(test=i)
class Stopclass:
def stop(self, trial_id, result):
return result["test"] > 6
[trial] = tune.run(train, stop=Stopclass().stop).trials
self.assertEqual(trial.last_result["training_iteration"], 8)
def testStopper(self):
def train(config, reporter):
for i in range(10):
reporter(test=i)
class CustomStopper(Stopper):
def __init__(self):
self._count = 0
def __call__(self, trial_id, result):
print("called")
self._count += 1
return result["test"] > 6
def stop_all(self):
return self._count > 5
trials = tune.run(train, num_samples=5, stop=CustomStopper()).trials
self.assertTrue(all(t.status == Trial.TERMINATED for t in trials))
self.assertTrue(
any(
t.last_result.get("training_iteration") is None
for t in trials))
def testEarlyStopping(self):
def train(config, reporter):
reporter(test=0)
top = 3
with self.assertRaises(ValueError):
EarlyStopping("test", top=0)
with self.assertRaises(ValueError):
EarlyStopping("test", top="0")
with self.assertRaises(ValueError):
EarlyStopping("test", std=0)
with self.assertRaises(ValueError):
EarlyStopping("test", patience=-1)
with self.assertRaises(ValueError):
EarlyStopping("test", std="0")
with self.assertRaises(ValueError):
EarlyStopping("test", mode="0")
stopper = EarlyStopping("test", top=top, mode="min")
analysis = tune.run(train, num_samples=10, stop=stopper)
self.assertTrue(
all(t.status == Trial.TERMINATED for t in analysis.trials))
self.assertTrue(
len(analysis.dataframe(metric="test", mode="max")) <= top)
patience = 5
stopper = EarlyStopping("test", top=top, mode="min", patience=patience)
analysis = tune.run(train, num_samples=20, stop=stopper)
self.assertTrue(
all(t.status == Trial.TERMINATED for t in analysis.trials))
self.assertTrue(
len(analysis.dataframe(metric="test", mode="max")) <= patience)
stopper = EarlyStopping("test", top=top, mode="min")
analysis = tune.run(train, num_samples=10, stop=stopper)
self.assertTrue(
all(t.status == Trial.TERMINATED for t in analysis.trials))
self.assertTrue(
len(analysis.dataframe(metric="test", mode="max")) <= top)
def testBadStoppingFunction(self):
def train(config, reporter):
for i in range(10):
reporter(test=i)
class CustomStopper:
def stop(self, result):
return result["test"] > 6
def stop(result):
return result["test"] > 6
with self.assertRaises(TuneError):
tune.run(train, stop=CustomStopper().stop)
with self.assertRaises(TuneError):
tune.run(train, stop=stop)
def testMaximumIterationStopper(self):
def train(config):
for i in range(10):
tune.report(it=i)
stopper = MaximumIterationStopper(max_iter=6)
out = tune.run(train, stop=stopper)
self.assertEqual(out.trials[0].last_result[TRAINING_ITERATION], 6)
def testTrialPlateauStopper(self):
def train(config):
tune.report(10.0)
tune.report(11.0)
tune.report(12.0)
for i in range(10):
tune.report(20.0)
# num_results = 4, no other constraints --> early stop after 7
stopper = TrialPlateauStopper(metric="_metric", num_results=4)
out = tune.run(train, stop=stopper)
self.assertEqual(out.trials[0].last_result[TRAINING_ITERATION], 7)
# num_results = 4, grace period 9 --> early stop after 9
stopper = TrialPlateauStopper(
metric="_metric", num_results=4, grace_period=9)
out = tune.run(train, stop=stopper)
self.assertEqual(out.trials[0].last_result[TRAINING_ITERATION], 9)
# num_results = 4, min_metric = 22 --> full 13 iterations
stopper = TrialPlateauStopper(
metric="_metric", num_results=4, metric_threshold=22.0, mode="max")
out = tune.run(train, stop=stopper)
self.assertEqual(out.trials[0].last_result[TRAINING_ITERATION], 13)
def testCustomTrialDir(self):
def train(config):
for i in range(10):
tune.report(test=i)
custom_name = "TRAIL_TRIAL"
def custom_trial_dir(trial):
return custom_name
trials = tune.run(
train,
config={
"t1": tune.grid_search([1, 2, 3])
},
trial_dirname_creator=custom_trial_dir,
local_dir=self.tmpdir).trials
logdirs = {t.logdir for t in trials}
assert len(logdirs) == 3
assert all(custom_name in dirpath for dirpath in logdirs)
def testTrialDirRegression(self):
def train(config, reporter):
for i in range(10):
reporter(test=i)
trials = tune.run(
train,
config={
"t1": tune.grid_search([1, 2, 3])
},
local_dir=self.tmpdir).trials
logdirs = {t.logdir for t in trials}
for i in [1, 2, 3]:
assert any(f"t1={i}" in dirpath for dirpath in logdirs)
for t in trials:
assert any(t.trainable_name in dirpath for dirpath in logdirs)
def testEarlyReturn(self):
def train(config, reporter):
reporter(timesteps_total=100, done=True)
time.sleep(99999)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 100)
def testReporterNoUsage(self):
def run_task(config, reporter):
print("hello")
experiment = Experiment(run=run_task, name="ray_crash_repro")
[trial] = ray.tune.run(experiment).trials
print(trial.last_result)
self.assertEqual(trial.last_result[DONE], True)
def testRerun(self):
tmpdir = tempfile.mkdtemp()
self.addCleanup(lambda: shutil.rmtree(tmpdir))
def test(config):
tid = config["id"]
fail = config["fail"]
marker = os.path.join(tmpdir, f"t{tid}-{fail}.log")
if not os.path.exists(marker) and fail:
open(marker, "w").close()
raise ValueError
for i in range(10):
time.sleep(0.1)
tune.report(hello=123)
config = dict(
name="hi-2",
config={
"fail": tune.grid_search([True, False]),
"id": tune.grid_search(list(range(5)))
},
verbose=1,
local_dir=tmpdir,
loggers=None)
trials = tune.run(test, raise_on_failed_trial=False, **config).trials
self.assertEqual(Counter(t.status for t in trials)["ERROR"], 5)
new_trials = tune.run(test, resume="ERRORED_ONLY", **config).trials
self.assertEqual(Counter(t.status for t in new_trials)["ERROR"], 0)
self.assertTrue(
all(t.last_result.get("hello") == 123 for t in new_trials))
def testTrialInfoAccess(self):
class TestTrainable(Trainable):
def step(self):
result = {"name": self.trial_name, "trial_id": self.trial_id}
print(result)
return result
analysis = tune.run(TestTrainable, stop={TRAINING_ITERATION: 1})
trial = analysis.trials[0]
self.assertEqual(trial.last_result.get("name"), str(trial))
self.assertEqual(trial.last_result.get("trial_id"), trial.trial_id)
def testTrialInfoAccessFunction(self):
def train(config, reporter):
reporter(name=reporter.trial_name, trial_id=reporter.trial_id)
analysis = tune.run(train, stop={TRAINING_ITERATION: 1})
trial = analysis.trials[0]
self.assertEqual(trial.last_result.get("name"), str(trial))
self.assertEqual(trial.last_result.get("trial_id"), trial.trial_id)
def track_train(config):
tune.report(
name=tune.get_trial_name(), trial_id=tune.get_trial_id())
analysis = tune.run(track_train, stop={TRAINING_ITERATION: 1})
trial = analysis.trials[0]
self.assertEqual(trial.last_result.get("name"), str(trial))
self.assertEqual(trial.last_result.get("trial_id"), trial.trial_id)
@patch("ray.tune.ray_trial_executor.TRIAL_CLEANUP_THRESHOLD", 3)
def testLotsOfStops(self):
class TestTrainable(Trainable):
def step(self):
result = {"name": self.trial_name, "trial_id": self.trial_id}
return result
def cleanup(self):
time.sleep(0.3)
open(os.path.join(self.logdir, "marker"), "a").close()
return 1
analysis = tune.run(
TestTrainable, num_samples=10, stop={TRAINING_ITERATION: 1})
for trial in analysis.trials:
path = os.path.join(trial.logdir, "marker")
assert os.path.exists(path)
def testReportTimeStep(self):
# Test that no timestep count are logged if never the Trainable never
# returns any.
results1 = [dict(mean_accuracy=5, done=i == 99) for i in range(100)]
logs1, _ = self.checkAndReturnConsistentLogs(results1)
self.assertTrue(all(log[TIMESTEPS_TOTAL] is None for log in logs1))
# Test that no timesteps_this_iter are logged if only timesteps_total
# are returned.
results2 = [dict(timesteps_total=5, done=i == 9) for i in range(10)]
logs2, _ = self.checkAndReturnConsistentLogs(results2)
# Re-run the same trials but with added delay. This is to catch some
# inconsistent timestep counting that was present in the multi-threaded
# FunctionRunner. This part of the test can be removed once the
# multi-threaded FunctionRunner is removed from ray/tune.
# TODO: remove once the multi-threaded function runner is gone.
logs2, _ = self.checkAndReturnConsistentLogs(results2, 0.5)
# check all timesteps_total report the same value
self.assertTrue(all(log[TIMESTEPS_TOTAL] == 5 for log in logs2))
# check that none of the logs report timesteps_this_iter
self.assertFalse(
any(hasattr(log, TIMESTEPS_THIS_ITER) for log in logs2))
# Test that timesteps_total and episodes_total are reported when
# timesteps_this_iter and episodes_this_iter despite only return zeros.
results3 = [
dict(timesteps_this_iter=0, episodes_this_iter=0)
for i in range(10)
]
logs3, _ = self.checkAndReturnConsistentLogs(results3)
self.assertTrue(all(log[TIMESTEPS_TOTAL] == 0 for log in logs3))
self.assertTrue(all(log[EPISODES_TOTAL] == 0 for log in logs3))
# Test that timesteps_total and episodes_total are properly counted
# when timesteps_this_iter and episodes_this_iter report non-zero
# values.
results4 = [
dict(timesteps_this_iter=3, episodes_this_iter=i)
for i in range(10)
]
logs4, _ = self.checkAndReturnConsistentLogs(results4)
# The last reported result should not be double-logged.
self.assertEqual(logs4[-1][TIMESTEPS_TOTAL], 30)
self.assertNotEqual(logs4[-2][TIMESTEPS_TOTAL],
logs4[-1][TIMESTEPS_TOTAL])
self.assertEqual(logs4[-1][EPISODES_TOTAL], 45)
self.assertNotEqual(logs4[-2][EPISODES_TOTAL],
logs4[-1][EPISODES_TOTAL])
def testAllValuesReceived(self):
results1 = [
dict(timesteps_total=(i + 1), my_score=i**2, done=i == 4)
for i in range(5)
]
logs1, _ = self.checkAndReturnConsistentLogs(results1)
# check if the correct number of results were reported
self.assertEqual(len(logs1), len(results1))
def check_no_missing(reported_result, result):
common_results = [reported_result[k] == result[k] for k in result]
return all(common_results)
# check that no result was dropped or modified
complete_results = [
check_no_missing(log, result)
for log, result in zip(logs1, results1)
]
self.assertTrue(all(complete_results))
# check if done was logged exactly once
self.assertEqual(len([r for r in logs1 if r.get("done")]), 1)
def testNoDoneReceived(self):
# repeat same test but without explicitly reporting done=True
results1 = [
dict(timesteps_total=(i + 1), my_score=i**2) for i in range(5)
]
logs1, trials = self.checkAndReturnConsistentLogs(results1)
# check if the correct number of results were reported.
self.assertEqual(len(logs1), len(results1))
def check_no_missing(reported_result, result):
common_results = [reported_result[k] == result[k] for k in result]
return all(common_results)
# check that no result was dropped or modified
complete_results1 = [
check_no_missing(log, result)
for log, result in zip(logs1, results1)
]
self.assertTrue(all(complete_results1))
def _testDurableTrainable(self, trainable, function=False, cleanup=True):
sync_client = mock_storage_client()
mock_get_client = "ray.tune.durable_trainable.get_cloud_sync_client"
with patch(mock_get_client) as mock_get_cloud_sync_client:
mock_get_cloud_sync_client.return_value = sync_client
test_trainable = trainable(remote_checkpoint_dir=MOCK_REMOTE_DIR)
result = test_trainable.train()
self.assertEqual(result["metric"], 1)
checkpoint_path = test_trainable.save()
result = test_trainable.train()
self.assertEqual(result["metric"], 2)
result = test_trainable.train()
self.assertEqual(result["metric"], 3)
result = test_trainable.train()
self.assertEqual(result["metric"], 4)
if not function:
test_trainable.state["hi"] = 2
test_trainable.restore(checkpoint_path)
self.assertEqual(test_trainable.state["hi"], 1)
else:
# Cannot re-use function trainable, create new
tune.session.shutdown()
test_trainable = trainable(
remote_checkpoint_dir=MOCK_REMOTE_DIR)
test_trainable.restore(checkpoint_path)
result = test_trainable.train()
self.assertEqual(result["metric"], 2)
if cleanup:
self.addCleanup(shutil.rmtree, MOCK_REMOTE_DIR)
def testDurableTrainableClass(self):
class TestTrain(DurableTrainable):
def setup(self, config):
self.state = {"hi": 1, "iter": 0}
def step(self):
self.state["iter"] += 1
return {
"timesteps_this_iter": 1,
"metric": self.state["iter"],
"done": self.state["iter"] > 3
}
def save_checkpoint(self, path):
return self.state
def load_checkpoint(self, state):
self.state = state
self._testDurableTrainable(TestTrain)
def testDurableTrainableWrapped(self):
class TestTrain(Trainable):
def setup(self, config):
self.state = {"hi": 1, "iter": 0}
def step(self):
self.state["iter"] += 1
return {
"timesteps_this_iter": 1,
"metric": self.state["iter"],
"done": self.state["iter"] > 3
}
def save_checkpoint(self, path):
return self.state
def load_checkpoint(self, state):
self.state = state
self._testDurableTrainable(durable(TestTrain), cleanup=False)
tune.register_trainable("test_train", TestTrain)
self._testDurableTrainable(durable("test_train"))
def testDurableTrainableFunction(self):
def test_train(config, checkpoint_dir=None):
state = {"hi": 1, "iter": 0}
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "ckpt.pkl"),
"rb") as fp:
state = pickle.load(fp)
for i in range(4):
state["iter"] += 1
with tune.checkpoint_dir(step=state["iter"]) as dir:
with open(os.path.join(dir, "ckpt.pkl"), "wb") as fp:
pickle.dump(state, fp)
tune.report(
**{
"timesteps_this_iter": 1,
"metric": state["iter"],
"done": state["iter"] > 3
})
self._testDurableTrainable(durable(test_train), function=True)
def testDurableTrainableSyncFunction(self):
"""Check custom sync functions in durable trainables"""
class TestDurable(DurableTrainable):
def __init__(self, *args, **kwargs):
# Mock distutils.spawn.find_executable
# so `aws` command is found
import distutils.spawn
distutils.spawn.find_executable = lambda *_, **__: True
super(TestDurable, self).__init__(*args, **kwargs)
def check(self):
return bool(self.sync_function_tpl) and isinstance(
self.storage_client, CommandBasedClient
) and "aws" not in self.storage_client.sync_up_template
class TestTplDurable(TestDurable):
_sync_function_tpl = "echo static sync {source} {target}"
upload_dir = "s3://test-bucket/path"
def _create_remote_actor(trainable_cls, sync_to_cloud):
"""Create a remote trainable actor from an experiment"""
exp = Experiment(
name="test_durable_sync",
run=trainable_cls,
sync_to_cloud=sync_to_cloud,
sync_to_driver=False,
upload_dir=upload_dir)
searchers = BasicVariantGenerator()
searchers.add_configurations([exp])
trial = searchers.next_trial()
cls = trial.get_trainable_cls()
actor = ray.remote(cls).remote(
remote_checkpoint_dir=upload_dir,
sync_function_tpl=trial.sync_to_cloud)
return actor
# This actor should create a default aws syncer, so check should fail
actor1 = _create_remote_actor(TestDurable, None)
self.assertFalse(ray.get(actor1.check.remote()))
# This actor should create a custom syncer, so check should pass
actor2 = _create_remote_actor(TestDurable,
"echo test sync {source} {target}")
self.assertTrue(ray.get(actor2.check.remote()))
# This actor should create a custom syncer, so check should pass
actor3 = _create_remote_actor(TestTplDurable, None)
self.assertTrue(ray.get(actor3.check.remote()))
def testCheckpointDict(self):
class TestTrain(Trainable):
def setup(self, config):
self.state = {"hi": 1}
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, path):
return self.state
def load_checkpoint(self, state):
self.state = state
test_trainable = TestTrain()
result = test_trainable.save()
test_trainable.state["hi"] = 2
test_trainable.restore(result)
self.assertEqual(test_trainable.state["hi"], 1)
trials = run_experiments({
"foo": {
"run": TestTrain,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testMultipleCheckpoints(self):
class TestTrain(Trainable):
def setup(self, config):
self.state = {"hi": 1, "iter": 0}
def step(self):
self.state["iter"] += 1
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, path):
return self.state
def load_checkpoint(self, state):
self.state = state
test_trainable = TestTrain()
checkpoint_1 = test_trainable.save()
test_trainable.train()
checkpoint_2 = test_trainable.save()
self.assertNotEqual(checkpoint_1, checkpoint_2)
test_trainable.restore(checkpoint_2)
self.assertEqual(test_trainable.state["iter"], 1)
test_trainable.restore(checkpoint_1)
self.assertEqual(test_trainable.state["iter"], 0)
trials = run_experiments({
"foo": {
"run": TestTrain,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testLogToFile(self):
def train(config, reporter):
import sys
from ray import logger
for i in range(10):
reporter(timesteps_total=i)
print("PRINT_STDOUT")
print("PRINT_STDERR", file=sys.stderr)
logger.info("LOG_STDERR")
register_trainable("f1", train)
# Do not log to file
[trial] = tune.run("f1", log_to_file=False).trials
self.assertFalse(os.path.exists(os.path.join(trial.logdir, "stdout")))
self.assertFalse(os.path.exists(os.path.join(trial.logdir, "stderr")))
# Log to default files
[trial] = tune.run("f1", log_to_file=True).trials
self.assertTrue(os.path.exists(os.path.join(trial.logdir, "stdout")))
self.assertTrue(os.path.exists(os.path.join(trial.logdir, "stderr")))
with open(os.path.join(trial.logdir, "stdout"), "rt") as fp:
content = fp.read()
self.assertIn("PRINT_STDOUT", content)
with open(os.path.join(trial.logdir, "stderr"), "rt") as fp:
content = fp.read()
self.assertIn("PRINT_STDERR", content)
self.assertIn("LOG_STDERR", content)
# Log to one file
[trial] = tune.run("f1", log_to_file="combined").trials
self.assertFalse(os.path.exists(os.path.join(trial.logdir, "stdout")))
self.assertFalse(os.path.exists(os.path.join(trial.logdir, "stderr")))
self.assertTrue(os.path.exists(os.path.join(trial.logdir, "combined")))
with open(os.path.join(trial.logdir, "combined"), "rt") as fp:
content = fp.read()
self.assertIn("PRINT_STDOUT", content)
self.assertIn("PRINT_STDERR", content)
self.assertIn("LOG_STDERR", content)
# Log to two files
[trial] = tune.run(
"f1", log_to_file=("alt.stdout", "alt.stderr")).trials
self.assertFalse(os.path.exists(os.path.join(trial.logdir, "stdout")))
self.assertFalse(os.path.exists(os.path.join(trial.logdir, "stderr")))
self.assertTrue(
os.path.exists(os.path.join(trial.logdir, "alt.stdout")))
self.assertTrue(
os.path.exists(os.path.join(trial.logdir, "alt.stderr")))
with open(os.path.join(trial.logdir, "alt.stdout"), "rt") as fp:
content = fp.read()
self.assertIn("PRINT_STDOUT", content)
with open(os.path.join(trial.logdir, "alt.stderr"), "rt") as fp:
content = fp.read()
self.assertIn("PRINT_STDERR", content)
self.assertIn("LOG_STDERR", content)
def testTimeout(self):
from ray.tune.stopper import TimeoutStopper
import datetime
def train(config):
for i in range(20):
tune.report(metric=i)
time.sleep(1)
register_trainable("f1", train)
start = time.time()
tune.run("f1", time_budget_s=5)
diff = time.time() - start
self.assertLess(diff, 10)
# Metric should fire first
start = time.time()
tune.run("f1", stop={"metric": 3}, time_budget_s=7)
diff = time.time() - start
self.assertLess(diff, 7)
# Timeout should fire first
start = time.time()
tune.run("f1", stop={"metric": 10}, time_budget_s=5)
diff = time.time() - start
self.assertLess(diff, 10)
# Combined stopper. Shorter timeout should win.
start = time.time()
tune.run(
"f1",
stop=TimeoutStopper(10),
time_budget_s=datetime.timedelta(seconds=3))
diff = time.time() - start
self.assertLess(diff, 9)
def testInfiniteTrials(self):
def train(config):
time.sleep(0.5)
tune.report(np.random.uniform(-10., 10.))
start = time.time()
out = tune.run(train, num_samples=-1, time_budget_s=10)
taken = time.time() - start
# Allow for init time overhead
self.assertLessEqual(taken, 20.)
self.assertGreaterEqual(len(out.trials), 0)
status = dict(Counter([trial.status for trial in out.trials]))
self.assertGreaterEqual(status["TERMINATED"], 1)
self.assertLessEqual(status.get("PENDING", 0), 1)
def testMetricCheckingEndToEnd(self):
def train(config):
tune.report(val=4, second=8)
def train2(config):
return
os.environ["TUNE_DISABLE_STRICT_METRIC_CHECKING"] = "0"
# `acc` is not reported, should raise
with self.assertRaises(TuneError):
# The trial runner raises a ValueError, but the experiment fails
# with a TuneError
tune.run(train, metric="acc")
# `val` is reported, should not raise
tune.run(train, metric="val")
# Run does not report anything, should not raise
tune.run(train2, metric="val")
# Only the scheduler requires a metric
with self.assertRaises(TuneError):
tune.run(
train,
scheduler=AsyncHyperBandScheduler(metric="acc", mode="max"))
tune.run(
train, scheduler=AsyncHyperBandScheduler(metric="val", mode="max"))
# Only the search alg requires a metric
with self.assertRaises(TuneError):
tune.run(
train,
config={"a": tune.choice([1, 2])},
search_alg=HyperOptSearch(metric="acc", mode="max"))
# Metric is passed
tune.run(
train,
config={"a": tune.choice([1, 2])},
search_alg=HyperOptSearch(metric="val", mode="max"))
os.environ["TUNE_DISABLE_STRICT_METRIC_CHECKING"] = "1"
# With strict metric checking disabled, this should not raise
tune.run(train, metric="acc")
def testTrialDirCreation(self):
def test_trial_dir(config):
return 1.0
# Per default, the directory should be named `test_trial_dir_{date}`
with tempfile.TemporaryDirectory() as tmp_dir:
tune.run(test_trial_dir, local_dir=tmp_dir)
subdirs = list(os.listdir(tmp_dir))
self.assertNotIn("test_trial_dir", subdirs)
found = False
for subdir in subdirs:
if subdir.startswith("test_trial_dir_"): # Date suffix
found = True
break
self.assertTrue(found)
# If we set an explicit name, no date should be appended
with tempfile.TemporaryDirectory() as tmp_dir:
tune.run(test_trial_dir, local_dir=tmp_dir, name="my_test_exp")
subdirs = list(os.listdir(tmp_dir))
self.assertIn("my_test_exp", subdirs)
found = False
for subdir in subdirs:
if subdir.startswith("my_test_exp_"): # Date suffix
found = True
break
self.assertFalse(found)
# Don't append date if we set the env variable
os.environ["TUNE_DISABLE_DATED_SUBDIR"] = "1"
with tempfile.TemporaryDirectory() as tmp_dir:
tune.run(test_trial_dir, local_dir=tmp_dir)
subdirs = list(os.listdir(tmp_dir))
self.assertIn("test_trial_dir", subdirs)
found = False
for subdir in subdirs:
if subdir.startswith("test_trial_dir_"): # Date suffix
found = True
break
self.assertFalse(found)
def testWithParameters(self):
class Data:
def __init__(self):
self.data = [0] * 500_000
data = Data()
data.data[100] = 1
class TestTrainable(Trainable):
def setup(self, config, data):
self.data = data.data
self.data[101] = 2 # Changes are local
def step(self):
return dict(
metric=len(self.data), hundred=self.data[100], done=True)
trial_1, trial_2 = tune.run(
tune.with_parameters(TestTrainable, data=data),
num_samples=2).trials
self.assertEqual(data.data[101], 0)
self.assertEqual(trial_1.last_result["metric"], 500_000)
self.assertEqual(trial_1.last_result["hundred"], 1)
self.assertEqual(trial_2.last_result["metric"], 500_000)
self.assertEqual(trial_2.last_result["hundred"], 1)
self.assertTrue(str(trial_1).startswith("TestTrainable"))
def testWithParameters2(self):
class Data:
def __init__(self):
import numpy as np
self.data = np.random.rand((2 * 1024 * 1024))
class TestTrainable(Trainable):
def setup(self, config, data):
self.data = data.data
def step(self):
return dict(metric=len(self.data), done=True)
trainable = tune.with_parameters(TestTrainable, data=Data())
# ray.cloudpickle will crash for some reason
import cloudpickle as cp
dumped = cp.dumps(trainable)
assert sys.getsizeof(dumped) < 100 * 1024
def testWithParameters3(self):
class Data:
def __init__(self):
import numpy as np
self.data = np.random.rand((2 * 1024 * 1024))
class TestTrainable(Trainable):
def setup(self, config, data):
self.data = data.data
def step(self):
return dict(metric=len(self.data), done=True)
new_data = Data()
ref = ray.put(new_data)
trainable = tune.with_parameters(TestTrainable, data=ref)
# ray.cloudpickle will crash for some reason
import cloudpickle as cp
dumped = cp.dumps(trainable)
assert sys.getsizeof(dumped) < 100 * 1024
class SerializabilityTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(local_mode=True)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def tearDown(self):
if "RAY_PICKLE_VERBOSE_DEBUG" in os.environ:
del os.environ["RAY_PICKLE_VERBOSE_DEBUG"]
def testNotRaisesNonserializable(self):
import threading
lock = threading.Lock()
def train(config):
print(lock)
tune.report(val=4, second=8)
with self.assertRaisesRegex(TypeError, "RAY_PICKLE_VERBOSE_DEBUG"):
# The trial runner raises a ValueError, but the experiment fails
# with a TuneError
tune.run(train, metric="acc")
def testRaisesNonserializable(self):
os.environ["RAY_PICKLE_VERBOSE_DEBUG"] = "1"
import threading
lock = threading.Lock()
def train(config):
print(lock)
tune.report(val=4, second=8)
with self.assertRaises(TypeError) as cm:
# The trial runner raises a ValueError, but the experiment fails
# with a TuneError
tune.run(train, metric="acc")
msg = cm.exception.args[0]
assert "RAY_PICKLE_VERBOSE_DEBUG" not in msg
assert "thread.lock" in msg
class ShimCreationTest(unittest.TestCase):
def testCreateScheduler(self):
kwargs = {"metric": "metric_foo", "mode": "min"}
scheduler = "async_hyperband"
shim_scheduler = tune.create_scheduler(scheduler, **kwargs)
real_scheduler = AsyncHyperBandScheduler(**kwargs)
assert type(shim_scheduler) is type(real_scheduler)
def testCreateSearcher(self):
kwargs = {"metric": "metric_foo", "mode": "min"}
searcher_ax = "ax"
shim_searcher_ax = tune.create_searcher(searcher_ax, **kwargs)
real_searcher_ax = AxSearch(space=[], **kwargs)
assert type(shim_searcher_ax) is type(real_searcher_ax)
searcher_hyperopt = "hyperopt"
shim_searcher_hyperopt = tune.create_searcher(searcher_hyperopt,
**kwargs)
real_searcher_hyperopt = HyperOptSearch({}, **kwargs)
assert type(shim_searcher_hyperopt) is type(real_searcher_hyperopt)
def testExtraParams(self):
kwargs = {"metric": "metric_foo", "mode": "min", "extra_param": "test"}
scheduler = "async_hyperband"
tune.create_scheduler(scheduler, **kwargs)
searcher_ax = "ax"
tune.create_searcher(searcher_ax, **kwargs)
class ApiTestFast(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(
num_cpus=4, num_gpus=0, local_mode=True, include_dashboard=False)
@classmethod
def tearDownClass(cls):
ray.shutdown()
_register_all()
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testNestedResults(self):
def create_result(i):
return {"test": {"1": {"2": {"3": i, "4": False}}}}
flattened_keys = list(flatten_dict(create_result(0)))
class _MockScheduler(FIFOScheduler):
results = []
def on_trial_result(self, trial_runner, trial, result):
self.results += [result]
return TrialScheduler.CONTINUE
def on_trial_complete(self, trial_runner, trial, result):
self.complete_result = result
def train(config, reporter):
for i in range(100):
reporter(**create_result(i))
algo = _MockSuggestionAlgorithm()
scheduler = _MockScheduler()
[trial] = tune.run(
train,
scheduler=scheduler,
search_alg=algo,
stop={
"test/1/2/3": 20
}).trials
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result["test"]["1"]["2"]["3"], 20)
self.assertEqual(trial.last_result["test"]["1"]["2"]["4"], False)
self.assertEqual(trial.last_result[TRAINING_ITERATION], 21)
self.assertEqual(len(scheduler.results), 20)
self.assertTrue(
all(
set(result) >= set(flattened_keys)
for result in scheduler.results))
self.assertTrue(set(scheduler.complete_result) >= set(flattened_keys))
self.assertEqual(len(algo.results), 20)
self.assertTrue(
all(set(result) >= set(flattened_keys) for result in algo.results))
with self.assertRaises(TuneError):
[trial] = tune.run(train, stop={"1/2/3": 20})
with self.assertRaises(TuneError):
[trial] = tune.run(train, stop={"test": 1}).trials
def testIterationCounter(self):
def train(config, reporter):
for i in range(100):
reporter(itr=i, timesteps_this_iter=1)
register_trainable("exp", train)
config = {
"my_exp": {
"run": "exp",
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
},
}
}
[trial] = run_experiments(config)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TRAINING_ITERATION], 100)
self.assertEqual(trial.last_result["itr"], 99)
def testErrorReturn(self):
def train(config, reporter):
raise Exception("uh oh")
register_trainable("f1", train)
def f():
run_experiments({
"foo": {
"run": "f1",
}
})
self.assertRaises(TuneError, f)
def testSuccess(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testNoRaiseFlag(self):
def train(config, reporter):
raise Exception()
register_trainable("f1", train)
[trial] = run_experiments(
{
"foo": {
"run": "f1",
}
}, raise_on_failed_trial=False)
self.assertEqual(trial.status, Trial.ERROR)
def testReportInfinity(self):
def train(config, reporter):
for _ in range(100):
reporter(mean_accuracy=float("inf"))
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result["mean_accuracy"], float("inf"))
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
|
######################################################################
#
# File: bstat.py
#
# Copyright 2013 Brian Beach, All Rights Reserved.
#
######################################################################
import itertools
import math
import scipy.special
import scipy.stats
import unittest
def percentile(v, p):
v = sorted(v)
position = (p / 100.0) * (len(v) - 1)
index = int(position)
next_index = min(index + 1, len(v) - 1)
return v[index] * (next_index - position) + v[next_index] * (position - index)
def trimean(v):
return (percentile(v, 25) + 2 * percentile(v, 50) + percentile(v, 75)) / 4.0
def mean(v):
return float(sum(v)) / float(len(v))
def sum_of_squares(v):
return sum(x * x for x in v)
def standard_deviation(v):
m = mean(v)
variance = sum_of_squares(x - m for x in v) / (len(v) - 1.0)
return math.sqrt(variance)
def interquartile_range(v):
q25 = percentile(v, 25)
q75 = percentile(v, 75)
return q75 - q25
def group_pairs(seq):
for (i, x) in enumerate(seq):
if i % 2 == 0:
prev = x
else:
yield (prev, x)
def unzip(pairs):
return zip(*pairs) # not very efficient
def deviations(v):
m = mean(v)
return [x - m for x in v]
def dot_product(x, y):
assert len(x) == len(y)
return sum(a * b for (a, b) in zip(x, y))
def correlation_coefficient(X, Y):
x = deviations(X)
y = deviations(Y)
x_squares = sum_of_squares(x)
y_squares = sum_of_squares(y)
return dot_product(x, y) / math.sqrt(x_squares * y_squares)
def product(seq):
result = 1
for x in seq:
result = result * x
return result
def factorial(n):
return product(xrange(1, n + 1))
def binomial_probability(N, x, pi):
return (
(factorial(N) / float(factorial(x) * factorial(N - x))) *
pow(pi, x) *
pow(1.0 - pi, N - x)
)
def binomial_probabilities(N, vx, pi):
return sum(binomial_probability(N, x, pi) for x in vx)
def poisson_probability(mu, x):
"""
Given a mean number of successes, mu, what is the probability of x
successes?
"""
return math.exp(-mu) * pow(mu, x) / factorial(x)
def multinomial_probability(v_prob, v_count):
"""
Given a number of possible outcomes, where the probability of each
is v_prob[i], what is the probability outcome i will happen
v_count[i] times, for all i.
"""
assert len(v_prob) == len(v_count)
return (
(float(factorial(sum(v_count))) / product(factorial(ni) for ni in v_count)) *
product(pow(pi, ni) for (pi, ni) in zip(v_prob, v_count))
)
def bayes(p_B_A, p_B_notA, p_A):
"""
Return the probability of A given B: P(A|B) p_A_B
Inputs:
Probability of B given A: P(B|A) p_B_A
Probability of B given not A: P(B|A') p_B_notA
Probability of A: P(A) p_A
"""
p_notA = 1.0 - p_A
return (p_B_A * p_A) / (p_B_A * p_A + p_B_notA * p_notA)
def percent_in_range_normal(mean, sd, low, high):
"""
Returns the percentage of the population between low and high,
assuming a normal distribution.
"""
low_sigma = float(low - mean) / sd
high_sigma = float(high - mean) / sd
low_percentile = scipy.stats.norm.cdf(low_sigma)
high_percentile = scipy.stats.norm.cdf(high_sigma)
return high_percentile - low_percentile
def poisson_confidence_interval(number_of_occurrences, sample_size, confidence=0.95):
"""
Returns a triple (low, rate, high):
low - the rate that is the lower bound of the confidence range
expected - the best guess at the rate
high - the rate that is the upper bound of the confidence range
https://en.wikipedia.org/wiki/Poisson_distribution (see CDF)
http://newton.cx/~peter/2012/06/poisson-distribution-confidence-intervals/
"""
if sample_size == 0:
raise Exception("sample_size cannot be 0")
rate = float(number_of_occurrences) / float(sample_size)
a = 1.0 - confidence
if number_of_occurrences != 0:
low_occurrences = scipy.special.gammaincinv(number_of_occurrences, a / 2.0)
low_rate = low_occurrences / sample_size
else:
low_rate = 0.0
high_occurrences = scipy.special.gammaincinv(number_of_occurrences + 1, 1.0 - a / 2.0)
high_rate = high_occurrences / sample_size
return (low_rate, rate, high_rate)
class TestStats(unittest.TestCase):
def test_standard_deviation(self):
sd = standard_deviation([6, 11, 15, 12, 3, 14, 15, 15])
self.assertAlmostEqual(4.5650066, sd)
def test_interquartile_range(self):
r = interquartile_range([12, 13, 14, 15, 9, 10, 16, 10,
8, 10, 11, 12, 13, 22, 23, 24, 25])
self.assertAlmostEqual(6, r)
def test_correlation(self):
x = [8, 9, 10, 12, 10, 13, 8, 7, 7, 12,
11, 11, 9, 13, 9, 10, 11, 10, 7, 8, 8, 11, 8, 13, 9]
y = [8, 10, 9, 12, 9, 11, 9, 10, 10, 12,
8, 11, 9, 11, 9, 9, 11, 12, 9, 10, 8, 10, 9, 13, 13]
r = correlation_coefficient(x, y)
self.assertAlmostEqual(0.5427855, r)
def test_factorial(self):
self.assertEqual(120, factorial(5))
def test_binomial_probability(self):
# http://onlinestatbook.com/2/probability/binomial.html
self.assertAlmostEqual(0.5, binomial_probability(2, 1, 0.5))
self.assertAlmostEqual(0.25, binomial_probability(2, 0, 0.5))
self.assertAlmostEqual(0.36, binomial_probability(2, 0, 0.4))
self.assertAlmostEqual(0.0546875, binomial_probabilities(10, [8, 9, 10], 0.5))
def test_poisson_probability(self):
self.assertAlmostEqual(0.0116442, poisson_probability(21, 12))
def test_multinomial_probability(self):
# http://onlinestatbook.com/2/probability/multinomial.html
self.assertAlmostEqual(0.1008, multinomial_probability([0.4, 0.1, 0.5], [4, 1, 5]))
def test_bayes(self):
self.assertAlmostEqual(0.4049587, bayes(0.98, 0.06, 0.04))
def test_percent_in_range_normal(self):
# http://onlinestatbook.com/2/normal_distribution/areas_normal.html
self.assertAlmostEqual(0.7871163, percent_in_range_normal(38, 6, 30, 45))
def test_poisson_confidence_interval(self):
# from: http://www.statsdirect.com/help/default.htm#rates/poisson_rate_ci.htm
(low, rate, high) = poisson_confidence_interval(14, 400)
self.assertAlmostEqual(0.0191348, low)
self.assertAlmostEqual(0.0350000, rate)
self.assertAlmostEqual(0.0587241, high)
(low, rate, high) = poisson_confidence_interval(0, 400)
self.assertAlmostEqual(0.0, low)
self.assertAlmostEqual(0.0, rate)
self.assertAlmostEqual(0.0092222, high)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
# Copyright (c) 2009 Las Cumbres Observatory (www.lcogt.net)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''protobuf/server.py - RPC server implementation for Google's Protocol Buffers.
This package provides classes to handle the server side of an RPC transaction,
that implements Shardul Deo's RPC protocol, using Protocol Buffers for the data
interchange format. See
http://code.google.com/p/protobuf-socket-rpc/
http://code.google.com/p/protobuf/
for more information.
Authors: Martin Norbury (mnorbury@lcogt.net)
Eric Saunders (esaunders@lcogt.net)
May 2009
'''
# Standard library imports
import SocketServer
import threading
import logging
import socket
# Third-party imports
# Module imports
from protobuf import rpc_pb2 as rpc_pb
from protobuf.controller import SocketRpcController
from protobuf import error
class NullHandler(logging.Handler):
'''A null logging handler to prevent clients that don't require the
logging package from reporting no handlers found.'''
def emit(self,record):
pass
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class Callback():
'''Class to allow execution of client-supplied callbacks.'''
def __init__(self):
self.invoked = False
self.response = None
def run(self,response):
self.response = response
self.invoked = True
class SocketHandler(SocketServer.StreamRequestHandler):
'''Handler for service requests.'''
def handle(self):
'''Entry point for handler functionality.'''
log.debug('Got a request')
# Parse the incoming request
recv = self.rfile.read()
# Evaluate and execute the request
rpcResponse = self.validateAndExecuteRequest(recv)
log.debug("Response to return to client \n %s" % rpcResponse)
# Send reply to client
self.wfile.write(rpcResponse.SerializeToString())
self.request.shutdown(socket.SHUT_RDWR)
def validateAndExecuteRequest(self,input):
'''Match a client request to the corresponding service and method on
the server, and then call the service.'''
# Parse and validate the client's request
try:
request = self.parseServiceRequest(input)
except error.BadRequestDataError, e:
return self.handleError(e)
# Retrieve the requested service
try:
service = self.retrieveService(request.service_name)
except error.ServiceNotFoundError, e:
return self.handleError(e)
# Retrieve the requested method
try:
method = self.retrieveMethod(service, request.method_name)
except error.MethodNotFoundError, e:
return self.handleError(e)
# Retrieve the protocol message
try:
proto_request = self.retrieveProtoRequest(service,method,request)
except error.BadRequestProtoError, e:
return self.handleError(e)
# Execute the specified method of the service with the requested params
try:
response = self.callMethod(service, method, proto_request)
except error.RpcError, e:
return self.handleError(e)
return response
def parseServiceRequest(self,bytestream_from_client):
'''Validate the data stream received from the client.'''
# Convert the client request into a PB Request object
request = rpc_pb.Request()
# Catch anything which isn't a valid PB bytestream
try:
request.MergeFromString(bytestream_from_client)
except Exception, e:
raise error.BadRequestDataError("Invalid request from \
client (decodeError): " + str(e))
# Check the request is correctly initialized
if not request.IsInitialized():
raise error.BadRequestDataError("Client request is missing \
mandatory fields")
log.debug('Request = %s' % request)
return request
def retrieveService(self, service_name):
'''Match the service request to a registered service.'''
service = SocketRpcServer.serviceMap.get(service_name)
if service is None:
msg = "Could not find service '%s'" % service_name
raise error.ServiceNotFoundError(msg)
return service
def retrieveMethod(self, service, method_name):
'''Match the method request to a method of a registered service.'''
method = service.DESCRIPTOR.FindMethodByName(method_name)
if method is None:
msg = "Could not find method '%s' in service '%s'"\
% (method_name,service.DESCRIPTOR.name)
raise error.MethodNotFoundError(msg)
return method
def retrieveProtoRequest(self, service, method, request):
''' Retrieve the users protocol message from the RPC message'''
proto_request = service.GetRequestClass(method)()
proto_request.ParseFromString(request.request_proto)
# Check the request parsed correctly
if not proto_request.IsInitialized():
raise error.BadRequestProtoError('Invalid protocol request \
from client')
return proto_request
def callMethod(self, service, method, proto_request):
'''Execute a service method request.'''
log.debug('Calling service %s' % service)
log.debug('Calling method %s' % method)
# Create the controller (initialised to success) and callback
controller = SocketRpcController()
controller.success = True
callback = Callback()
try:
service.CallMethod(method,controller,proto_request,callback)
except Exception, e:
raise error.RpcError(e.message)
# Return an RPC response, with payload defined in the callback
response = rpc_pb.Response()
if callback.response:
response.callback = True
response.response_proto = callback.response.SerializeToString()
else:
response.callback = callback.invoked
# Check to see if controller has been set to not success by user.
if not controller.success:
response.error = controller.error
response.error_reason = rpc_pb.RPC_FAILED
return response
def handleError(self, e):
'''Produce an RPC response to convey a server error to the client.'''
msg = "%d : %s" % (e.rpc_error_code, e.message)
log.error(msg)
# Create error reply
response = rpc_pb.Response()
response.error_reason = e.rpc_error_code
response.error = e.message
return response
class ThreadedTCPServer(SocketServer.ThreadingMixIn,SocketServer.TCPServer):
SocketServer.allow_reuse_address = True
class SocketRpcServer:
'''Socket server for running rpc services.'''
serviceMap={}
def __init__(self,port,host='localhost'):
'''port - Port this server is started on'''
self.port = port
self.host = host
def registerService(self,service):
'''Register an RPC service.'''
SocketRpcServer.serviceMap[service.GetDescriptor().full_name] = service
def run(self):
'''Activate the server.'''
log.info('Running server on port %d' % self.port)
server = ThreadedTCPServer((self.host,self.port),SocketHandler)
server.serve_forever()
|
|
# -*- coding: utf-8 -*-
'''
Support for Opkg
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
.. versionadded: 2016.3.0
.. note::
For version comparison support, the ``opkg-utils`` package must be
installed.
'''
from __future__ import absolute_import
# Import python libs
import copy
import os
import re
import logging
from salt.ext import six
try:
from shlex import quote as _cmd_quote # pylint: disable=E0611
except ImportError:
from pipes import quote as _cmd_quote
# Import salt libs
import salt.utils
import salt.utils.itertools
from salt.utils.decorators import which as _which
from salt.exceptions import (
CommandExecutionError, MinionError, SaltInvocationError
)
REPO_REGEXP = r'^#?\s*(src|src/gz)\s+[^\s<>]+\s+[^\s<>]+'
OPKG_CONFDIR = '/etc/opkg'
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pkg'
def __virtual__():
'''
Confirm this module is on a nilrt based system
'''
if __grains__.get('os_family', False) == 'NILinuxRT':
return __virtualname__
return (False, "Module opkg only works on nilrt based systems")
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
'''
refresh = salt.utils.is_true(kwargs.pop('refresh', True))
if len(names) == 0:
return ''
ret = {}
for name in names:
ret[name] = ''
# Refresh before looking for the latest version available
if refresh:
refresh_db()
cmd = ['opkg', 'list-upgradable']
out = __salt__['cmd.run_stdout'](cmd,
output_loglevel='trace',
python_shell=False)
for line in salt.utils.itertools.split(out, '\n'):
try:
name, _oldversion, newversion = line.split(' - ')
if name in names:
ret[name] = newversion
except ValueError:
pass
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = latest_version
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
'''
return __salt__['pkg_resource.version'](*names, **kwargs)
def refresh_db():
'''
Updates the opkg database to latest packages based upon repositories
Returns a dict, with the keys being package databases and the values being
the result of the update attempt. Values can be one of the following:
- ``True``: Database updated successfully
- ``False``: Problem updating database
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
ret = {}
cmd = ['opkg', 'update']
call = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += call['stderr']
raise CommandExecutionError(
'{0}'.format(comment)
)
else:
out = call['stdout']
for line in salt.utils.itertools.split(out, '\n'):
if 'Inflating' in line:
key = line.strip().split()[1].split('.')[0]
ret[key] = True
elif 'Failed to download' in line:
key = line.strip().split()[5].split(',')[0]
ret[key] = False
return ret
# TODO: opkg doesn't support installation of a specific version of a package
# (opkg issue 176). Once fixed, this function should add support.
def install(name=None,
refresh=False,
pkgs=None,
sources=None,
**kwargs):
'''
Install the passed package, add refresh=True to update the opkg database.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
Whether or not to refresh the package database before installing.
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
sources
A list of IPK packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package. Dependencies are automatically resolved
and marked as auto-installed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]'
install_recommends
Whether to install the packages marked as recommended. Default is True.
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
'''
refreshdb = salt.utils.is_true(refresh)
try:
pkgs, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
cmd = ['opkg', 'install']
if pkgs is None or len(pkgs) == 0:
return {}
elif pkg_type == 'file':
cmd.extend(pkgs)
elif pkg_type == 'repository':
targets = list(pkgs.keys())
if 'install_recommends' in kwargs and not kwargs['install_recommends']:
cmd.append('--no-install-recommends')
cmd.extend(targets)
if refreshdb:
refresh_db()
out = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False
)
if out['retcode'] != 0 and out['stderr']:
errors = [out['stderr']]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered installing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
'''
Remove packages using ``opkg remove``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
cmd = ['opkg', 'remove']
cmd.extend(targets)
out = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False
)
if out['retcode'] != 0 and out['stderr']:
errors = [out['stderr']]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered removing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
'''
Package purges are not supported by opkg, this function is identical to
:mod:`pkg.remove <salt.modules.opkg.remove>`.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
return remove(name=name, pkgs=pkgs)
def upgrade(refresh=True):
'''
Upgrades all packages via ``opkg upgrade``
Returns a dict containing the changes.
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
ret = {'changes': {},
'result': True,
'comment': '',
}
if salt.utils.is_true(refresh):
refresh_db()
old = list_pkgs()
cmd = ['opkg', 'upgrade']
call = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False,
redirect_stderr=True)
if call['retcode'] != 0:
ret['result'] = False
if call['stdout']:
ret['comment'] = call['stdout']
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret['changes'] = salt.utils.compare_dicts(old, new)
return ret
def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
'''
Set package in 'hold' state, meaning it will not be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.hold <package name>
pkgs
A list of packages to hold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.hold pkgs='["foo", "bar"]'
'''
if not name and not pkgs and not sources:
raise SaltInvocationError(
'One of name, pkgs, or sources must be specified.'
)
if pkgs and sources:
raise SaltInvocationError(
'Only one of pkgs or sources can be specified.'
)
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target))
ret[target] = {'name': target,
'changes': {},
'result': False,
'comment': ''}
state = _get_state(target)
if not state:
ret[target]['comment'] = ('Package {0} not currently held.'
.format(target))
elif state != 'hold':
if 'test' in __opts__ and __opts__['test']:
ret[target].update(result=None)
ret[target]['comment'] = ('Package {0} is set to be held.'
.format(target))
else:
result = _set_state(target, 'hold')
ret[target].update(changes=result[target], result=True)
ret[target]['comment'] = ('Package {0} is now being held.'
.format(target))
else:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is already set to be held.'
.format(target))
return ret
def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
'''
Set package current in 'hold' state to install state,
meaning it will be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.unhold <package name>
pkgs
A list of packages to hold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.unhold pkgs='["foo", "bar"]'
'''
if not name and not pkgs and not sources:
raise SaltInvocationError(
'One of name, pkgs, or sources must be specified.'
)
if pkgs and sources:
raise SaltInvocationError(
'Only one of pkgs or sources can be specified.'
)
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target))
ret[target] = {'name': target,
'changes': {},
'result': False,
'comment': ''}
state = _get_state(target)
if not state:
ret[target]['comment'] = ('Package {0} does not have a state.'
.format(target))
elif state == 'hold':
if 'test' in __opts__ and __opts__['test']:
ret[target].update(result=None)
ret['comment'] = ('Package {0} is set not to be held.'
.format(target))
else:
result = _set_state(target, 'ok')
ret[target].update(changes=result[target], result=True)
ret[target]['comment'] = ('Package {0} is no longer being '
'held.'.format(target))
else:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is already set not to be '
'held.'.format(target))
return ret
def _get_state(pkg):
'''
View package state from the opkg database
Return the state of pkg
'''
cmd = ['opkg', 'status']
cmd.append(pkg)
out = __salt__['cmd.run'](cmd, python_shell=False)
state_flag = ''
for line in salt.utils.itertools.split(out, '\n'):
if line.startswith('Status'):
_status, _state_want, state_flag, _state_status = line.split()
return state_flag
def _set_state(pkg, state):
'''
Change package state on the opkg database
The state can be any of:
- hold
- noprune
- user
- ok
- installed
- unpacked
This command is commonly used to mark a specific package to be held from
being upgraded, that is, to be kept at a certain version.
Returns a dict containing the package name, and the new and old
versions.
'''
ret = {}
valid_states = ('hold', 'noprune', 'user', 'ok', 'installed', 'unpacked')
if state not in valid_states:
raise SaltInvocationError(
'Invalid state: {0}'.format(state)
)
oldstate = _get_state(pkg)
cmd = ['opkg', 'flag']
cmd.append(state)
cmd.append(pkg)
_out = __salt__['cmd.run'](cmd, python_shell=False)
# Missing return value check due to opkg issue 160
ret[pkg] = {'old': oldstate,
'new': state}
return ret
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True
'''
versions_as_list = salt.utils.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
cmd = ['opkg', 'list-installed']
ret = {}
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
for line in salt.utils.itertools.split(out, '\n'):
pkg_name, pkg_version = line.split(' - ')
__salt__['pkg_resource.add_pkg'](ret, pkg_name, pkg_version)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def list_upgrades(refresh=True):
'''
List all available package upgrades.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
ret = {}
if salt.utils.is_true(refresh):
refresh_db()
cmd = ['opkg', 'list-upgradable']
call = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += call['stderr']
if 'stdout' in call:
comment += call['stdout']
raise CommandExecutionError(
'{0}'.format(comment)
)
else:
out = call['stdout']
for line in out.splitlines():
name, _oldversion, newversion = line.split(' - ')
ret[name] = newversion
return ret
def upgrade_available(name):
'''
Check whether or not an upgrade is available for a given package
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
return latest_version(name) != ''
@_which('opkg-compare-versions')
def version_cmp(pkg1, pkg2):
'''
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
making the comparison.
CLI Example:
.. code-block:: bash
salt '*' pkg.version_cmp '0.2.4-0' '0.2.4.1-0'
'''
cmd_compare = ['opkg-compare-versions']
for oper, ret in (("<<", -1), ("=", 0), (">>", 1)):
cmd = cmd_compare[:]
cmd.append(_cmd_quote(pkg1))
cmd.append(oper)
cmd.append(_cmd_quote(pkg2))
retcode = __salt__['cmd.retcode'](cmd,
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
if retcode == 0:
return ret
return None
def list_repos():
'''
Lists all repos on /etc/opkg/*.conf
CLI Example:
.. code-block:: bash
salt '*' pkg.list_repos
'''
repos = {}
regex = re.compile(REPO_REGEXP)
for filename in os.listdir(OPKG_CONFDIR):
if filename.endswith(".conf"):
with open(os.path.join(OPKG_CONFDIR, filename)) as conf_file:
for line in conf_file:
if regex.search(line):
repo = {}
if line.startswith('#'):
repo['enabled'] = False
line = line[1:]
else:
repo['enabled'] = True
cols = line.strip().split()
if cols[0] in 'src':
repo['compressed'] = False
else:
repo['compressed'] = True
repo['name'] = cols[1]
repo['uri'] = cols[2]
repo['file'] = os.path.join(OPKG_CONFDIR, filename)
# do not store duplicated uri's
if repo['uri'] not in repos:
repos[repo['uri']] = [repo]
return repos
def get_repo(alias):
'''
Display a repo from the /etc/opkg/*.conf
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo alias
'''
repos = list_repos()
if repos:
for source in six.itervalues(repos):
for sub in source:
if sub['name'] == alias:
return sub
return {}
def _del_repo_from_file(alias, filepath):
'''
Remove a repo from filepath
'''
with open(filepath) as fhandle:
output = []
regex = re.compile(REPO_REGEXP)
for line in fhandle:
if regex.search(line):
if line.startswith('#'):
line = line[1:]
cols = line.strip().split()
if alias != cols[1]:
output.append(line)
with open(filepath, 'w') as fhandle:
fhandle.writelines(output)
def _add_new_repo(alias, uri, compressed, enabled=True):
'''
Add a new repo entry
'''
repostr = '# ' if not enabled else ''
repostr += 'src/gz ' if compressed else 'src '
repostr += alias + ' ' + uri + '\n'
conffile = os.path.join(OPKG_CONFDIR, alias + '.conf')
with open(conffile, 'a') as fhandle:
fhandle.write(repostr)
def _mod_repo_in_file(alias, repostr, filepath):
'''
Replace a repo entry in filepath with repostr
'''
with open(filepath) as fhandle:
output = []
for line in fhandle:
if alias not in line:
output.append(line)
else:
output.append(repostr + '\n')
with open(filepath, 'w') as fhandle:
fhandle.writelines(output)
def del_repo(alias):
'''
Delete a repo from /etc/opkg/*.conf
If the file does not contain any other repo configuration, the file itself
will be deleted.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo alias
'''
repos = list_repos()
if repos:
deleted_from = dict()
for repo in repos:
source = repos[repo][0]
if source['name'] == alias:
deleted_from[source['file']] = 0
_del_repo_from_file(alias, source['file'])
if deleted_from:
ret = ''
for repo in repos:
source = repos[repo][0]
if source['file'] in deleted_from:
deleted_from[source['file']] += 1
for repo_file, count in six.iteritems(deleted_from):
msg = 'Repo \'{0}\' has been removed from {1}.\n'
if count == 1 and os.path.isfile(repo_file):
msg = ('File {1} containing repo \'{0}\' has been '
'removed.\n')
try:
os.remove(repo_file)
except OSError:
pass
ret += msg.format(alias, repo_file)
# explicit refresh after a repo is deleted
refresh_db()
return ret
return "Repo {0} doesn't exist in the opkg repo lists".format(alias)
def mod_repo(alias, **kwargs):
'''
Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as uri is defined.
The following options are available to modify a repo definition:
alias
alias by which opkg refers to the repo.
uri
the URI to the repo.
compressed
defines (True or False) if the index file is compressed
enabled
enable or disable (True or False) repository
but do not remove if disabled.
refresh
enable or disable (True or False) auto-refresh of the repositories
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo alias uri=http://new/uri
salt '*' pkg.mod_repo alias enabled=False
'''
repos = list_repos()
found = False
uri = ''
if 'uri' in kwargs:
uri = kwargs['uri']
for repo in repos:
source = repos[repo][0]
if source['name'] == alias:
found = True
repostr = ''
if 'enabled' in kwargs and not kwargs['enabled']:
repostr += '# '
if 'compressed' in kwargs:
repostr += 'src/gz ' if kwargs['compressed'] else 'src'
else:
repostr += 'src/gz' if source['compressed'] else 'src'
repostr += ' {0}'.format(kwargs['alias'] if 'alias' in kwargs else alias)
repostr += ' {0}'.format(kwargs['uri'] if 'uri' in kwargs else source['uri'])
_mod_repo_in_file(alias, repostr, source['file'])
elif uri and source['uri'] == uri:
raise CommandExecutionError(
'Repository \'{0}\' already exists as \'{1}\'.'.format(uri, source['name']))
if not found:
# Need to add a new repo
if 'uri' not in kwargs:
raise CommandExecutionError(
'Repository \'{0}\' not found and no URI passed to create one.'.format(alias))
# If compressed is not defined, assume True
compressed = kwargs['compressed'] if 'compressed' in kwargs else True
# If enabled is not defined, assume True
enabled = kwargs['enabled'] if 'enabled' in kwargs else True
_add_new_repo(alias, kwargs['uri'], compressed, enabled)
if 'refresh' in kwargs:
refresh_db()
def file_list(*packages):
'''
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
'''
output = file_dict(*packages)
files = []
for package in list(output['packages'].values()):
files.extend(package)
return {'errors': output['errors'], 'files': files}
def file_dict(*packages):
'''
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's
package database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
'''
errors = []
ret = {}
cmd_files = ['opkg', 'files']
if not packages:
packages = list(list_pkgs().keys())
for package in packages:
files = []
cmd = cmd_files[:]
cmd.append(package)
out = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
for line in out['stdout'].splitlines():
if line.startswith('/'):
files.append(line)
elif line.startswith(' * '):
errors.append(line[3:])
break
else:
continue
if files:
ret[package] = files
return {'errors': errors, 'packages': ret}
def owner(*paths):
'''
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single
path is passed, a string will be returned, and if multiple paths are passed,
a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Example:
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename
'''
if not paths:
return ''
ret = {}
cmd_search = ['opkg', 'search']
for path in paths:
cmd = cmd_search[:]
cmd.append(path)
output = __salt__['cmd.run_stdout'](cmd,
output_loglevel='trace',
python_shell=False)
if output:
ret[path] = output.split(' - ')[0].strip()
else:
ret[path] = ''
if len(ret) == 1:
return six.itervalues(ret)
return ret
|
|
from __future__ import unicode_literals
import hashlib
import json
import time
import warnings
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import (
authenticate,
get_backends,
login as django_login,
logout as django_logout,
)
from django.contrib.auth.models import AbstractUser
from django.contrib.sites.shortcuts import get_current_site
from django.core.cache import cache
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import resolve_url
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from . import app_settings
from ..compat import is_authenticated, reverse, validate_password
from ..utils import (
build_absolute_uri,
email_address_exists,
generate_unique_username,
get_user_model,
import_attribute,
)
from .signals import user_logged_out
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
class DefaultAccountAdapter(object):
error_messages = {
'username_blacklisted':
_('Username can not be used. Please use other username.'),
'username_taken':
AbstractUser._meta.get_field('username').error_messages['unique'],
'too_many_login_attempts':
_('Too many failed login attempts. Try again later.'),
'email_taken':
_("A user is already registered with this e-mail address."),
}
def __init__(self, request=None):
self.request = request
def stash_verified_email(self, request, email):
request.session['account_verified_email'] = email
def unstash_verified_email(self, request):
ret = request.session.get('account_verified_email')
request.session['account_verified_email'] = None
return ret
def stash_user(self, request, user):
request.session['account_user'] = user
def unstash_user(self, request):
return request.session.pop('account_user', None)
def is_email_verified(self, request, email):
"""
Checks whether or not the email address is already verified
beyond allauth scope, for example, by having accepted an
invitation before signing up.
"""
ret = False
verified_email = request.session.get('account_verified_email')
if verified_email:
ret = verified_email.lower() == email.lower()
return ret
def format_email_subject(self, subject):
prefix = app_settings.EMAIL_SUBJECT_PREFIX
if prefix is None:
site = get_current_site(self.request)
prefix = "[{name}] ".format(name=site.name)
return prefix + force_text(subject)
def get_from_email(self):
"""
This is a hook that can be overridden to programatically
set the 'from' email address for sending emails
"""
return settings.DEFAULT_FROM_EMAIL
def render_mail(self, template_prefix, email, context):
"""
Renders an e-mail to `email`. `template_prefix` identifies the
e-mail that is to be sent, e.g. "account/email/email_confirmation"
"""
subject = render_to_string('{0}_subject.txt'.format(template_prefix),
context)
# remove superfluous line breaks
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
from_email = self.get_from_email()
bodies = {}
for ext in ['html', 'txt']:
try:
template_name = '{0}_message.{1}'.format(template_prefix, ext)
bodies[ext] = render_to_string(template_name,
context).strip()
except TemplateDoesNotExist:
if ext == 'txt' and not bodies:
# We need at least one body
raise
if 'txt' in bodies:
msg = EmailMultiAlternatives(subject,
bodies['txt'],
from_email,
[email])
if 'html' in bodies:
msg.attach_alternative(bodies['html'], 'text/html')
else:
msg = EmailMessage(subject,
bodies['html'],
from_email,
[email])
msg.content_subtype = 'html' # Main content is now text/html
return msg
def send_mail(self, template_prefix, email, context):
msg = self.render_mail(template_prefix, email, context)
msg.send()
def get_login_redirect_url(self, request):
"""
Returns the default URL to redirect to after logging in. Note
that URLs passed explicitly (e.g. by passing along a `next`
GET parameter) take precedence over the value returned here.
"""
assert is_authenticated(request.user)
url = getattr(settings, "LOGIN_REDIRECT_URLNAME", None)
if url:
warnings.warn("LOGIN_REDIRECT_URLNAME is deprecated, simply"
" use LOGIN_REDIRECT_URL with a URL name",
DeprecationWarning)
else:
url = settings.LOGIN_REDIRECT_URL
return resolve_url(url)
def get_logout_redirect_url(self, request):
"""
Returns the URL to redirect to after the user logs out. Note that
this method is also invoked if you attempt to log out while no users
is logged in. Therefore, request.user is not guaranteed to be an
authenticated user.
"""
return resolve_url(app_settings.LOGOUT_REDIRECT_URL)
def get_email_confirmation_redirect_url(self, request):
"""
The URL to return to after successful e-mail confirmation.
"""
if is_authenticated(request.user):
if app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL:
return \
app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL
else:
return self.get_login_redirect_url(request)
else:
return app_settings.EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL
def is_open_for_signup(self, request):
"""
Checks whether or not the site is open for signups.
Next to simply returning True/False you can also intervene the
regular flow by raising an ImmediateHttpResponse
"""
return True
def new_user(self, request):
"""
Instantiates a new User instance.
"""
user = get_user_model()()
return user
def populate_username(self, request, user):
"""
Fills in a valid username, if required and missing. If the
username is already present it is assumed to be valid
(unique).
"""
from .utils import user_username, user_email, user_field
first_name = user_field(user, 'first_name')
last_name = user_field(user, 'last_name')
email = user_email(user)
username = user_username(user)
if app_settings.USER_MODEL_USERNAME_FIELD:
user_username(
user,
username or self.generate_unique_username([
first_name,
last_name,
email,
username,
'user']))
def generate_unique_username(self, txts, regex=None):
return generate_unique_username(txts, regex)
def save_user(self, request, user, form, commit=True):
"""
Saves a new `User` instance using information provided in the
signup form.
"""
from .utils import user_username, user_email, user_field
data = form.cleaned_data
first_name = data.get('first_name')
last_name = data.get('last_name')
email = data.get('email')
username = data.get('username')
user_email(user, email)
user_username(user, username)
if first_name:
user_field(user, 'first_name', first_name)
if last_name:
user_field(user, 'last_name', last_name)
if 'password1' in data:
user.set_password(data["password1"])
else:
user.set_unusable_password()
self.populate_username(request, user)
if commit:
# Ability not to commit makes it easier to derive from
# this adapter by adding
user.save()
return user
def clean_username(self, username, shallow=False):
"""
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen.
"""
for validator in app_settings.USERNAME_VALIDATORS:
validator(username)
# TODO: Add regexp support to USERNAME_BLACKLIST
username_blacklist_lower = [ub.lower()
for ub in app_settings.USERNAME_BLACKLIST]
if username.lower() in username_blacklist_lower:
raise forms.ValidationError(
self.error_messages['username_blacklisted'])
# Skipping database lookups when shallow is True, needed for unique
# username generation.
if not shallow:
from .utils import filter_users_by_username
if filter_users_by_username(username).exists():
user_model = get_user_model()
username_field = app_settings.USER_MODEL_USERNAME_FIELD
error_message = user_model._meta.get_field(
username_field).error_messages.get('unique')
if not error_message:
error_message = self.error_messages['username_taken']
raise forms.ValidationError(error_message)
return username
def clean_email(self, email):
"""
Validates an email value. You can hook into this if you want to
(dynamically) restrict what email addresses can be chosen.
"""
return email
def clean_password(self, password, user=None):
"""
Validates a password. You can hook into this if you want to
restric the allowed password choices.
"""
min_length = app_settings.PASSWORD_MIN_LENGTH
if min_length and len(password) < min_length:
raise forms.ValidationError(_("Password must be a minimum of {0} "
"characters.").format(min_length))
validate_password(password, user)
return password
def validate_unique_email(self, email):
if email_address_exists(email):
raise forms.ValidationError(self.error_messages['email_taken'])
return email
def add_message(self, request, level, message_template,
message_context=None, extra_tags=''):
"""
Wrapper of `django.contrib.messages.add_message`, that reads
the message text from a template.
"""
if 'django.contrib.messages' in settings.INSTALLED_APPS:
try:
if message_context is None:
message_context = {}
message = render_to_string(message_template,
message_context).strip()
if message:
messages.add_message(request, level, message,
extra_tags=extra_tags)
except TemplateDoesNotExist:
pass
def ajax_response(self, request, response, redirect_to=None, form=None,
data=None):
resp = {}
status = response.status_code
if redirect_to:
status = 200
resp['location'] = redirect_to
if form:
if request.method == 'POST':
if form.is_valid():
status = 200
else:
status = 400
else:
status = 200
resp['form'] = self.ajax_response_form(form)
if hasattr(response, 'render'):
response.render()
resp['html'] = response.content.decode('utf8')
if data is not None:
resp['data'] = data
return HttpResponse(json.dumps(resp),
status=status,
content_type='application/json')
def ajax_response_form(self, form):
form_spec = {
'fields': {},
'field_order': [],
'errors': form.non_field_errors()
}
for field in form:
field_spec = {
'label': force_text(field.label),
'value': field.value(),
'help_text': force_text(field.help_text),
'errors': [
force_text(e) for e in field.errors
],
'widget': {
'attrs': {
k: force_text(v)
for k, v in field.field.widget.attrs.items()
}
}
}
form_spec['fields'][field.html_name] = field_spec
form_spec['field_order'].append(field.html_name)
return form_spec
def login(self, request, user):
# HACK: This is not nice. The proper Django way is to use an
# authentication backend
if not hasattr(user, 'backend'):
from .auth_backends import AuthenticationBackend
backends = get_backends()
backend = None
for b in backends:
if isinstance(b, AuthenticationBackend):
# prefer our own backend
backend = b
break
elif not backend and hasattr(b, 'get_user'):
# Pick the first vald one
backend = b
backend_path = '.'.join([backend.__module__,
backend.__class__.__name__])
user.backend = backend_path
django_login(request, user)
def logout(self, request):
user = request.user
django_logout(request)
user_logged_out.send(
sender=user.__class__,
request=request,
user=user)
def confirm_email(self, request, email_address):
"""
Marks the email address as confirmed on the db
"""
email_address.verified = True
email_address.set_as_primary(conditional=True)
email_address.save()
def set_password(self, user, password):
user.set_password(password)
user.save()
def get_user_search_fields(self):
user = get_user_model()()
return filter(lambda a: a and hasattr(user, a),
[app_settings.USER_MODEL_USERNAME_FIELD,
'first_name', 'last_name', 'email'])
def is_safe_url(self, url):
from django.utils.http import is_safe_url
return is_safe_url(url)
def get_email_confirmation_url(self, request, emailconfirmation):
"""Constructs the email confirmation (activation) url.
Note that if you have architected your system such that email
confirmations are sent outside of the request context `request`
can be `None` here.
"""
url = reverse(
"account_confirm_email",
args=[emailconfirmation.key])
ret = build_absolute_uri(
request,
url)
return ret
def send_confirmation_mail(self, request, emailconfirmation, signup):
current_site = get_current_site(request)
activate_url = self.get_email_confirmation_url(
request,
emailconfirmation)
ctx = {
"user": emailconfirmation.email_address.user,
"activate_url": activate_url,
"current_site": current_site,
"key": emailconfirmation.key,
}
if signup:
email_template = 'account/email/email_confirmation_signup'
else:
email_template = 'account/email/email_confirmation'
self.send_mail(email_template,
emailconfirmation.email_address.email,
ctx)
def respond_user_inactive(self, request, user):
return HttpResponseRedirect(
reverse('account_inactive'))
def respond_email_verification_sent(self, request, user):
return HttpResponseRedirect(
reverse('account_email_verification_sent'))
def _get_login_attempts_cache_key(self, request, **credentials):
site = get_current_site(request)
login = credentials.get('email', credentials.get('username', ''))
login_key = hashlib.sha256(login.encode('utf8')).hexdigest()
return 'allauth/login_attempts@{site_id}:{login}'.format(
site_id=site.pk,
login=login_key)
def pre_authenticate(self, request, **credentials):
if app_settings.LOGIN_ATTEMPTS_LIMIT:
cache_key = self._get_login_attempts_cache_key(
request, **credentials)
login_data = cache.get(cache_key, None)
if login_data:
dt = timezone.now()
current_attempt_time = time.mktime(dt.timetuple())
if (len(login_data) >= app_settings.LOGIN_ATTEMPTS_LIMIT and
current_attempt_time < (
login_data[-1] +
app_settings.LOGIN_ATTEMPTS_TIMEOUT)):
raise forms.ValidationError(
self.error_messages['too_many_login_attempts'])
def authenticate(self, request, **credentials):
"""Only authenticates, does not actually login. See `login`"""
self.pre_authenticate(request, **credentials)
user = authenticate(**credentials)
if user:
cache_key = self._get_login_attempts_cache_key(
request, **credentials)
cache.delete(cache_key)
else:
self.authentication_failed(request, **credentials)
return user
def authentication_failed(self, request, **credentials):
cache_key = self._get_login_attempts_cache_key(request, **credentials)
data = cache.get(cache_key, [])
dt = timezone.now()
data.append(time.mktime(dt.timetuple()))
cache.set(cache_key, data, app_settings.LOGIN_ATTEMPTS_TIMEOUT)
def get_adapter(request=None):
return import_attribute(app_settings.ADAPTER)(request)
|
|
# Copyright (c) 2015 Mellanox Technologies, Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from neutron._i18n import _LW, _LI
from neutron.agent.l2 import agent_extension
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import exceptions
from neutron import manager
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class QosAgentDriver(object):
"""Defines stable abstract interface for QoS Agent Driver.
QoS Agent driver defines the interface to be implemented by Agent
for applying QoS Rules on a port.
"""
# Each QoS driver should define the set of rule types that it supports, and
# corresponding handlers that has the following names:
#
# create_<type>
# update_<type>
# delete_<type>
#
# where <type> is one of VALID_RULE_TYPES
SUPPORTED_RULES = set()
@abc.abstractmethod
def initialize(self):
"""Perform QoS agent driver initialization.
"""
def create(self, port, qos_policy):
"""Apply QoS rules on port for the first time.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
self._handle_update_create_rules('create', port, qos_policy)
def update(self, port, qos_policy):
"""Apply QoS rules on port.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
self._handle_update_create_rules('update', port, qos_policy)
def delete(self, port, qos_policy=None):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
if qos_policy is None:
rule_types = self.SUPPORTED_RULES
else:
rule_types = set(
[rule.rule_type
for rule in self._iterate_rules(qos_policy.rules)])
for rule_type in rule_types:
self._handle_rule_delete(port, rule_type)
def _iterate_rules(self, rules):
for rule in rules:
rule_type = rule.rule_type
if rule_type in self.SUPPORTED_RULES:
yield rule
else:
LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
'%(rule_type)s; skipping'),
{'rule_id': rule.id, 'rule_type': rule_type})
def _handle_rule_delete(self, port, rule_type):
handler_name = "".join(("delete_", rule_type))
handler = getattr(self, handler_name)
handler(port)
def _handle_update_create_rules(self, action, port, qos_policy):
for rule in self._iterate_rules(qos_policy.rules):
if rule.should_apply_to_port(port):
handler_name = "".join((action, "_", rule.rule_type))
handler = getattr(self, handler_name)
handler(port, rule)
else:
LOG.debug("Port %(port)s excluded from QoS rule %(rule)s",
{'port': port, 'rule': rule.id})
class PortPolicyMap(object):
def __init__(self):
# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)
self.known_policies = {}
self.port_policies = {}
def get_ports(self, policy):
return self.qos_policy_ports[policy.id].values()
def get_policy(self, policy_id):
return self.known_policies.get(policy_id)
def update_policy(self, policy):
self.known_policies[policy.id] = policy
def has_policy_changed(self, port, policy_id):
return self.port_policies.get(port['port_id']) != policy_id
def get_port_policy(self, port):
policy_id = self.port_policies.get(port['port_id'])
if policy_id:
return self.get_policy(policy_id)
def set_port_policy(self, port, policy):
"""Attach a port to policy and return any previous policy on port."""
port_id = port['port_id']
old_policy = self.get_port_policy(port)
self.known_policies[policy.id] = policy
self.port_policies[port_id] = policy.id
self.qos_policy_ports[policy.id][port_id] = port
if old_policy and old_policy.id != policy.id:
del self.qos_policy_ports[old_policy.id][port_id]
return old_policy
def clean_by_port(self, port):
"""Detach port from policy and cleanup data we don't need anymore."""
port_id = port['port_id']
if port_id in self.port_policies:
del self.port_policies[port_id]
for qos_policy_id, port_dict in self.qos_policy_ports.items():
if port_id in port_dict:
del port_dict[port_id]
if not port_dict:
self._clean_policy_info(qos_policy_id)
return
raise exceptions.PortNotFound(port_id=port['port_id'])
def _clean_policy_info(self, qos_policy_id):
del self.qos_policy_ports[qos_policy_id]
del self.known_policies[qos_policy_id]
class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
SUPPORTED_RESOURCES = [resources.QOS_POLICY]
def initialize(self, connection, driver_type):
"""Perform Agent Extension initialization.
"""
self.resource_rpc = resources_rpc.ResourcesPullRpcApi()
self.qos_driver = manager.NeutronManager.load_class_for_provider(
'neutron.qos.agent_drivers', driver_type)()
self.qos_driver.initialize()
self.policy_map = PortPolicyMap()
registry.subscribe(self._handle_notification, resources.QOS_POLICY)
self._register_rpc_consumers(connection)
def _register_rpc_consumers(self, connection):
endpoints = [resources_rpc.ResourcesPushRpcCallback()]
for resource_type in self.SUPPORTED_RESOURCES:
# we assume that neutron-server always broadcasts the latest
# version known to the agent
topic = resources_rpc.resource_type_versioned_topic(resource_type)
connection.create_consumer(topic, endpoints, fanout=True)
@lockutils.synchronized('qos-port')
def _handle_notification(self, qos_policy, event_type):
# server does not allow to remove a policy that is attached to any
# port, so we ignore DELETED events. Also, if we receive a CREATED
# event for a policy, it means that there are no ports so far that are
# attached to it. That's why we are interested in UPDATED events only
if event_type == events.UPDATED:
self._process_update_policy(qos_policy)
@lockutils.synchronized('qos-port')
def handle_port(self, context, port):
"""Handle agent QoS extension for port.
This method applies a new policy to a port using the QoS driver.
Update events are handled in _handle_notification.
"""
port_id = port['port_id']
port_qos_policy_id = port.get('qos_policy_id')
network_qos_policy_id = port.get('network_qos_policy_id')
qos_policy_id = port_qos_policy_id or network_qos_policy_id
if qos_policy_id is None:
self._process_reset_port(port)
return
if not self.policy_map.has_policy_changed(port, qos_policy_id):
return
qos_policy = self.resource_rpc.pull(
context, resources.QOS_POLICY, qos_policy_id)
if qos_policy is None:
LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port "
"%(port_id)s is not available on server, "
"it has been deleted. Skipping."),
{'qos_policy_id': qos_policy_id, 'port_id': port_id})
self._process_reset_port(port)
else:
old_qos_policy = self.policy_map.set_port_policy(port, qos_policy)
if old_qos_policy:
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
else:
self.qos_driver.create(port, qos_policy)
def delete_port(self, context, port):
self._process_reset_port(port)
def _policy_rules_modified(self, old_policy, policy):
return not (len(old_policy.rules) == len(policy.rules) and
all(i in old_policy.rules for i in policy.rules))
def _process_update_policy(self, qos_policy):
old_qos_policy = self.policy_map.get_policy(qos_policy.id)
if old_qos_policy:
if self._policy_rules_modified(old_qos_policy, qos_policy):
for port in self.policy_map.get_ports(qos_policy):
#NOTE(QoS): for now, just reflush the rules on the port.
# Later, we may want to apply the difference
# between the old and new rule lists.
self.qos_driver.delete(port, old_qos_policy)
self.qos_driver.update(port, qos_policy)
self.policy_map.update_policy(qos_policy)
def _process_reset_port(self, port):
try:
self.policy_map.clean_by_port(port)
self.qos_driver.delete(port)
except exceptions.PortNotFound:
LOG.info(_LI("QoS extension did have no information about the "
"port %s that we were trying to reset"),
port['port_id'])
|
|
# Copyright 2017-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from mock import Mock
from c7n.config import Bag
from c7n.exceptions import PolicyValidationError
from c7n.resources import aws
from c7n import output
from .common import BaseTest
class TraceDoc(Bag):
def serialize(self):
return json.dumps(dict(self))
class OutputXrayTracerTest(BaseTest):
def test_emitter(self):
emitter = aws.XrayEmitter()
emitter.client = m = Mock()
doc = TraceDoc({'good': 'morning'})
emitter.send_entity(doc)
emitter.flush()
m.put_trace_segments.assert_called_with(
TraceSegmentDocuments=[doc.serialize()])
class ArnResolverTest(BaseTest):
table = [
('arn:aws:waf::123456789012:webacl/3bffd3ed-fa2e-445e-869f-a6a7cf153fd3', 'waf'),
('arn:aws:waf-regional:us-east-1:123456789012:webacl/3bffd3ed-fa2e-445e-869f-a6a7cf153fd3', 'waf-regional'), # NOQA
('arn:aws:acm:region:account-id:certificate/certificate-id', 'acm-certificate'),
('arn:aws:cloudwatch:region:account-id:alarm:alarm-name', 'alarm'),
('arn:aws:logs:us-east-1:123456789012:log-group:my-log-group', 'log-group'),
('arn:aws:codebuild:us-east-1:123456789012:project/my-demo-project', 'codebuild'),
('arn:aws:cognito-idp:region:account-id:userpool/user-pool-id', 'user-pool'),
('arn:aws:config:region:account-id:config-rule/config-rule-id', 'config-rule'),
('arn:aws:directconnect:us-east-1:123456789012:dxcon/dxcon-fgase048', 'directconnect'),
('arn:aws:dynamodb:region:account-id:table/tablename', 'dynamodb-table'),
('arn:aws:ec2:region:account-id:instance/instance-id', 'ec2'),
('arn:aws:ec2:region:account-id:vpc/vpc-id', 'vpc'),
('arn:aws:ds:region:account-id:directory/directoryId', 'directory'),
('arn:aws:elasticbeanstalk:region:account-id:application/applicationname', 'elasticbeanstalk'), # NOQA
('arn:aws:ecr:region:account-id:repository/repository-name', 'ecr'),
('arn:aws:elasticache:us-east-2:123456789012:cluster:myCluster', 'cache-cluster'),
('arn:aws:es:us-east-1:123456789012:domain/streaming-logs', 'elasticsearch'),
('arn:aws:elasticfilesystem:region:account-id:file-system/file-system-id', 'efs'),
('arn:aws:ecs:us-east-1:123456789012:task/my-cluster/1abf0f6d-a411-4033-b8eb-a4eed3ad252a', 'ecs-task'), # NOQA
('arn:aws:autoscaling:region:account-id:autoScalingGroup:groupid:autoScalingGroupName/groupfriendlyname', 'asg') # NOQA
]
def test_arn_meta(self):
legacy = set()
for k, v in aws.AWS.resources.items():
if getattr(v.resource_type, 'type', None) is not None:
legacy.add(k)
self.assertFalse(legacy)
def test_arn_resolver(self):
for value, expected in self.table:
# load the resource types to enable resolution.
aws.AWS.get_resource_types(("aws.%s" % expected,))
arn = aws.Arn.parse(value)
result = aws.ArnResolver.resolve_type(arn)
self.assertEqual(result, expected)
class ArnTest(BaseTest):
def test_eb_arn(self):
arn = aws.Arn.parse(
'arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnv')
self.assertEqual(arn.service, 'elasticbeanstalk')
self.assertEqual(arn.account_id, '123456789012')
self.assertEqual(arn.region, 'us-east-1')
self.assertEqual(arn.resource_type, 'environment')
self.assertEqual(arn.resource, 'My App/MyEnv')
def test_iam_arn(self):
arn = aws.Arn.parse(
'arn:aws:iam::123456789012:user/David')
self.assertEqual(arn.service, 'iam')
self.assertEqual(arn.resource, 'David')
self.assertEqual(arn.resource_type, 'user')
def test_rds_arn(self):
arn = aws.Arn.parse(
'arn:aws:rds:eu-west-1:123456789012:db:mysql-db')
self.assertEqual(arn.resource_type, 'db')
self.assertEqual(arn.resource, 'mysql-db')
self.assertEqual(arn.region, 'eu-west-1')
def test_s3_key_arn(self):
arn = aws.Arn.parse(
'arn:aws:s3:::my_corporate_bucket/exampleobject.png')
self.assertEqual(arn.resource, 'my_corporate_bucket/exampleobject.png')
class UtilTest(BaseTest):
def test_default_account_id_assume(self):
config = Bag(assume_role='arn:aws:iam::644160558196:role/custodian-mu', account_id=None)
aws._default_account_id(config)
self.assertEqual(config.account_id, '644160558196')
def test_validate(self):
self.assertRaises(
PolicyValidationError,
aws.shape_validate,
{'X': 1},
'AwsSecurityFindingFilters',
'securityhub')
self.assertEqual(
aws.shape_validate(
{'Id': [{'Value': 'abc', 'Comparison': 'EQUALS'}]},
'AwsSecurityFindingFilters',
'securityhub'),
None)
class TracerTest(BaseTest):
def test_tracer(self):
session_factory = self.replay_flight_data('output-xray-trace')
policy = Bag(name='test', resource_type='ec2')
ctx = Bag(
policy=policy,
session_factory=session_factory,
options=Bag(account_id='644160558196'))
ctx.get_metadata = lambda *args: {}
config = Bag()
tracer = aws.XrayTracer(ctx, config)
with tracer:
try:
with tracer.subsegment('testing') as w:
raise ValueError()
except ValueError:
pass
self.assertNotEqual(w.cause, {})
class OutputMetricsTest(BaseTest):
def test_metrics_destination_dims(self):
tmetrics = []
class Metrics(aws.MetricsOutput):
def _put_metrics(self, ns, metrics):
tmetrics.extend(metrics)
conf = Bag({'region': 'us-east-2', 'scheme': 'aws', 'netloc': 'master'})
ctx = Bag(session_factory=None,
options=Bag(account_id='001100', region='us-east-1'),
policy=Bag(name='test', resource_type='ec2'))
moutput = Metrics(ctx, conf)
moutput.put_metric('Calories', 400, 'Count', Scope='Policy', Food='Pizza')
moutput.flush()
tmetrics[0].pop('Timestamp')
self.assertEqual(tmetrics, [{
'Dimensions': [{'Name': 'Policy', 'Value': 'test'},
{'Name': 'ResType', 'Value': 'ec2'},
{'Name': 'Food', 'Value': 'Pizza'},
{'Name': 'Region', 'Value': 'us-east-1'},
{'Name': 'Account', 'Value': '001100'}],
'MetricName': 'Calories',
'Unit': 'Count',
'Value': 400}])
def test_metrics(self):
session_factory = self.replay_flight_data('output-aws-metrics')
policy = Bag(name='test', resource_type='ec2')
ctx = Bag(session_factory=session_factory, policy=policy)
sink = output.metrics_outputs.select('aws', ctx)
self.assertTrue(isinstance(sink, aws.MetricsOutput))
sink.put_metric('ResourceCount', 101, 'Count')
sink.flush()
class OutputLogsTest(BaseTest):
# cloud watch logging
def test_default_log_group(self):
ctx = Bag(session_factory=None,
options=Bag(account_id='001100', region='us-east-1'),
policy=Bag(name='test', resource_type='ec2'))
log_output = output.log_outputs.select('custodian/xyz', ctx)
self.assertEqual(log_output.log_group, 'custodian/xyz')
self.assertEqual(log_output.construct_stream_name(), 'test')
log_output = output.log_outputs.select('/custodian/xyz/', ctx)
self.assertEqual(log_output.log_group, 'custodian/xyz')
log_output = output.log_outputs.select('aws://somewhere/out/there', ctx)
self.assertEqual(log_output.log_group, 'somewhere/out/there')
log_output = output.log_outputs.select('aws:///somewhere/out', ctx)
self.assertEqual(log_output.log_group, 'somewhere/out')
log_output = output.log_outputs.select('aws://somewhere', ctx)
self.assertEqual(log_output.log_group, 'somewhere')
log_output = output.log_outputs.select(
"aws:///somewhere/out?stream={region}/{policy}", ctx)
self.assertEqual(log_output.log_group, 'somewhere/out')
self.assertEqual(log_output.construct_stream_name(), 'us-east-1/test')
def test_master_log_handler(self):
session_factory = self.replay_flight_data('test_log_handler')
ctx = Bag(session_factory=session_factory,
options=Bag(account_id='001100', region='us-east-1'),
policy=Bag(name='test', resource_type='ec2'))
log_output = output.log_outputs.select(
'aws://master/custodian?region=us-east-2', ctx)
stream = log_output.get_handler()
self.assertTrue(stream.log_group == 'custodian')
self.assertTrue(stream.log_stream == '001100/us-east-1/test')
def test_stream_override(self):
session_factory = self.replay_flight_data(
'test_log_stream_override')
ctx = Bag(session_factory=session_factory,
options=Bag(account_id='001100', region='us-east-1'),
policy=Bag(name='test', resource_type='ec2'))
log_output = output.log_outputs.select(
'aws://master/custodian?region=us-east-2&stream=testing', ctx)
stream = log_output.get_handler()
self.assertTrue(stream.log_stream == 'testing')
|
|
import textwrap
from collections import defaultdict
from .annotation import Measure
try:
keys = dict.viewkeys
except Exception:
# Py3k
keys = dict.keys
MEASURES = {
# Mention evaluation measures
'strong_mention_match': Measure(['span']),
'strong_typed_mention_match': Measure(['span', 'type']),
'strong_linked_mention_match': Measure(['span'], 'is_linked'),
# Linking evaluation measures
'strong_link_match': Measure(['span', 'kbid'], 'is_linked'),
'strong_nil_match': Measure(['span'], 'is_nil'),
'strong_all_match': Measure(['span', 'kbid']),
'strong_typed_link_match': Measure(['span', 'type', 'kbid'],
'is_linked'),
'strong_typed_nil_match': Measure(['span', 'type'], 'is_nil'),
'strong_typed_all_match': Measure(['span', 'type', 'kbid']),
# Document-level tagging evaluation measures
'entity_match': Measure(['docid', 'kbid'], 'is_linked'),
# Clustering evaluation measures
'muc': Measure(['span'], agg='muc'),
'b_cubed': Measure(['span'], agg='b_cubed'),
'b_cubed_plus': Measure(['span', 'kbid'], agg='b_cubed'),
'entity_ceaf': Measure(['span'], agg='entity_ceaf'),
'mention_ceaf': Measure(['span'], agg='mention_ceaf'),
'mention_ceaf_plus': Measure(['span', 'kbid'], agg='mention_ceaf'),
'typed_mention_ceaf': Measure(['span', 'type'], agg='mention_ceaf'),
'typed_mention_ceaf_plus': Measure(['span', 'type', 'kbid',], agg='mention_ceaf'),
'pairwise': Measure(['span'], agg='pairwise'),
# Cai & Strube (2010) evaluation measures
#'cs_b_cubed': Measure(['span'], agg='cs_b_cubed'),
#'entity_cs_ceaf': Measure(['span'], agg='entity_cs_ceaf'),
#'mention_cs_ceaf': Measure(['span'], agg='mention_cs_ceaf'),
}
# Configuration constants
ALL_MEASURES = 'all'
ALL_TAGGING = 'all-tagging'
ALL_COREF = 'all-coref'
TAC09_MEASURES = 'tac09' # used 2009-2010
TAC11_MEASURES = 'tac11' # used 2011-2013
TAC14_MEASURES = 'tac14' # used 2014-
TMP_MEASURES = 'tmp'
CORNOLTI_WWW13_MEASURES = 'cornolti'
HACHEY_ACL14_MEASURES = 'hachey'
LUO_MEASURES = 'luo'
CAI_STRUBE_MEASURES = 'cai'
MEASURE_SETS = {
ALL_MEASURES: [
ALL_TAGGING,
ALL_COREF,
],
ALL_TAGGING: {
'strong_mention_match',
'strong_typed_mention_match',
'strong_linked_mention_match',
'strong_link_match',
'strong_nil_match',
'strong_all_match',
'strong_typed_link_match',
'strong_typed_nil_match',
'strong_typed_all_match',
'entity_match',
},
ALL_COREF: {
'mention_ceaf',
'entity_ceaf',
'b_cubed',
'pairwise',
'muc',
#'mention_cs_ceaf',
#'entity_cs_ceaf',
#'cs_b_cubed',
'b_cubed_plus',
'typed_mention_ceaf',
'mention_ceaf_plus',
'typed_mention_ceaf_plus',
},
CORNOLTI_WWW13_MEASURES: [
'strong_linked_mention_match',
'strong_link_match',
'entity_match',
],
HACHEY_ACL14_MEASURES: [
'strong_mention_match', # full ner
'strong_linked_mention_match',
'strong_link_match',
'entity_match',
],
LUO_MEASURES: [
'muc',
'b_cubed',
'mention_ceaf',
'entity_ceaf',
],
#CAI_STRUBE_MEASURES: [
# 'cs_b_cubed',
# 'entity_cs_ceaf',
# 'mention_cs_ceaf',
#],
TAC09_MEASURES: [
'strong_link_match', # recall equivalent to kb accuracy
'strong_nil_match', # recall equivalent to nil accuracy
'strong_all_match', # equivalent to overall accuracy
],
TAC11_MEASURES: [
TAC09_MEASURES,
'b_cubed', # standard b-cubed
'b_cubed_plus', # also requires correct resolution
],
TAC14_MEASURES: [
TAC11_MEASURES,
# Assess mention recognition in TAC 2014 end-to-end task
'strong_mention_match', # span must match
'strong_typed_mention_match', # span and type must match
# Assess recognition and disambiguation in TAC 2014 end-to-end task
'strong_typed_all_match', # span, type and resolution/nil must match
# Assess recognition and clustering in TAC 2014 end-to-end task
'mention_ceaf', # prf based on cluster alignment
'typed_mention_ceaf', # same requiring type match
],
TMP_MEASURES: [
'mention_ceaf',
'entity_ceaf',
'pairwise',
],
}
DEFAULT_MEASURE_SET = ALL_MEASURES
DEFAULT_MEASURE = 'strong_all_match'
def _expand(measures):
if isinstance(measures, str):
if measures in MEASURE_SETS:
measures = MEASURE_SETS[measures]
else:
return [measures]
if isinstance(measures, Measure):
return [measures]
if len(measures) == 1:
return _expand(measures[0])
return [m for group in measures for m in _expand(group)]
def parse_measures(in_measures, incl_clustering=True, allow_unknown=False):
# flatten nested sequences and expand group names
measures = _expand(in_measures)
# remove duplicates while maintaining order
seen = set()
measures = [seen.add(m) or m
for m in measures if m not in seen]
# TODO: make sure resolve to valid measures
not_found = set(measures) - keys(MEASURES)
invalid = []
for m in not_found:
try:
get_measure(m)
except Exception:
invalid.append(m)
if invalid and not allow_unknown:
raise ValueError('Could not resolve measures: '
'{}'.format(sorted(not_found)))
if not incl_clustering:
measures = [m for m in measures
if not get_measure(m).is_clustering]
# TODO: remove clustering metrics given flag
# raise error if empty
if not measures:
msg = 'Could not resolve {!r} to any measures.'.format(in_measures)
if not incl_clustering:
msg += ' Clustering measures have been excluded.'
raise ValueError(msg)
return measures
def get_measure(name):
if isinstance(name, Measure):
return name
if name.count(':') == 2:
return Measure.from_string(name)
return MEASURES[name]
def get_measure_choices():
return sorted(MEASURE_SETS.keys()) + sorted(MEASURES.keys())
MEASURE_HELP = ('Which measures to use: specify a name (or group name) from '
'the list-measures command. This flag may be repeated.')
def _wrap(text):
return '\n'.join(textwrap.wrap(text))
class ListMeasures(object):
"""List measures schemes available for evaluation"""
def __init__(self, measures=None):
self.measures = measures
def __call__(self):
measures = parse_measures(self.measures or get_measure_choices())
header = ['Name', 'Aggregate', 'Filter', 'Key Fields', 'In groups']
rows = [header]
set_membership = defaultdict(list)
for set_name, measure_set in sorted(MEASURE_SETS.items()):
for name in parse_measures(measure_set):
set_membership[name].append(set_name)
for name in sorted(measures):
measure = get_measure(name)
rows.append((name, measure.agg, str(measure.filter),
'+'.join(measure.key),
', '.join(set_membership[name])))
col_widths = [max(len(row[i]) for row in rows)
for i in range(len(header))]
rows.insert(1, ['=' * w for w in col_widths])
fmt = '\t'.join('{:%ds}' % w for w in col_widths[:-1]) + '\t{}'
ret = _wrap('The following lists possible values for --measure (-m) '
'in evaluate, confidence and significance. The name from '
'each row or the name of a group may be used. ') + '\n\n'
ret = '\n'.join(textwrap.wrap(ret)) + '\n\n'
ret += '\n'.join(fmt.format(*row) for row in rows)
ret += '\n\nDefault evaluation group: {}'.format(DEFAULT_MEASURE_SET)
ret += '\n\n'
ret += _wrap('In all measures, a set of tuples corresponding to Key '
'Fields is produced from annotations matching Filter. '
'Aggregation with sets-micro compares gold and predicted '
'tuple sets directly; coreference aggregates compare '
'tuples clustered by their assigned entity ID.')
ret += '\n\n'
ret += ('A measure may be specified explicitly. Thus:\n'
' {}\nmay be entered as\n {}'
''.format(DEFAULT_MEASURE, get_measure(DEFAULT_MEASURE)))
return ret
@classmethod
def add_arguments(cls, p):
p.add_argument('-m', '--measure', dest='measures', action='append',
metavar='NAME', help=MEASURE_HELP)
p.set_defaults(cls=cls)
return p
|
|
# -*- coding: utf-8 -*-
import os
import time
import socket
import tempfile
import pytz
from requests import codes
from datetime import datetime
from django.test import TestCase
from mailme.exceptions import FeedCriticalError, TimeoutError, FeedNotFoundError
from mailme.services.feed import FeedService
from mailme.models.feed import (
Feed,
FEED_NOT_FOUND_ERROR,
FEED_GENERIC_ERROR,
FEED_TIMEDOUT_ERROR
)
data_path = os.path.join(os.path.dirname(__file__), "data")
FEED_YIELDING_404 = "http://de.yahoo.com/rssmhwqgiuyeqwgeqygqfyf"
def get_data_filename(name):
return os.sep.join([data_path, name])
def get_data_file(name, mode="r"):
with open(get_data_filename(name), mode) as file:
return file.read()
class TestFeedDuplication(TestCase):
def setUp(self):
self.service = FeedService()
self.feeds = list(map(get_data_filename,
["t%d.xml" % i for i in reversed(list(range(1, 6)))]))
def assertImportFeed(self, filename, name):
feed_obj = self.service.handle(filename, local=True, force=True)
self.assertEqual(feed_obj.title, name)
return feed_obj
def test_does_not_duplicate_posts(self):
spool = tempfile.mktemp(suffix="ut", prefix="mailme")
def test_file(filename):
try:
with open(filename) as r:
with open(spool, "w") as w:
w.write(r.read())
return self.assertImportFeed(spool,
"Saturday Morning Breakfast Cereal (updated daily)")
finally:
os.unlink(spool)
for i in range(40):
for filename in self.feeds:
f = test_file(filename)
posts = list(f.get_posts())
self.assertEqual(len(posts), 4)
seen = set()
for post in posts:
self.assertNotIn(post.title, seen)
seen.add(post.title)
self.assertEqual(posts[0].title, "November 23, 2009")
self.assertEqual(posts[1].title, "November 22, 2009")
self.assertEqual(posts[2].title, "November 21, 2009")
self.assertEqual(posts[3].title, "November 20, 2009")
class TestFeedService(TestCase):
def setUp(self):
self.feed = get_data_filename("example_feed.rss")
self.empty_feed = get_data_filename("example_empty_feed.rss")
self.feed_content_encoded = get_data_filename(
"example_feed-content_encoded.rss")
self.service = FeedService()
def test_import_empty_feed(self):
feed = self.empty_feed
service = self.service
feed_obj = service.handle(feed, local=True)
self.assertEqual(feed_obj.title, "(no title)")
self.assertEqual(feed_obj.get_post_count(), 0, "feed has 0 items")
self.assertEqual(feed_obj.feed_url, feed, "feed url is filename")
def test_handle(self):
feed = self.feed
service = self.service
feed_obj = service.handle(feed, local=True)
self.assertEqual(feed_obj.title, "Lifehacker", "feed title is set")
self.assertEqual(feed_obj.get_post_count(), 20, "feed has 20 items")
self.assertEqual(feed_obj.feed_url, feed, "feed url is filename")
self.assertTrue(feed_obj.description, "feed has description")
posts = feed_obj.get_posts()
first_post = posts[0]
self.assertEqual(first_post.guid, "Lifehacker-5147831")
self.assertEqual(first_post.updated,
datetime(2009, 2, 6, 4, 30, 0, 0,
tzinfo=pytz.timezone('US/Pacific')).astimezone(
pytz.utc))
for post in posts:
self.assertTrue(post.guid, "post has GUID")
self.assertTrue(post.title, "post has title")
if hasattr(post, "enclosures"):
self.assertEqual(post.enclosures.count(), 0,
"post has no enclosures")
self.assertTrue(post.link, "post has link")
self.assertTrue(post.content)
feed_obj2 = service.handle(feed)
self.assertTrue(feed_obj2.date_last_refresh,
"Refresh date set")
self.assertEqual(feed_obj2.id, feed_obj.id,
"Importing same feed doesn't create new object")
self.assertEqual(feed_obj2.get_post_count(), 20,
"Re-importing feed doesn't give duplicates")
def test_404_feed_raises_ok(self):
service = self.service
with self.assertRaises(FeedNotFoundError):
service.handle(FEED_YIELDING_404)
def test_missing_date_feed(self):
"""Try to reproduce the constant date update bug."""
feed = get_data_filename("buggy_dates.rss")
service = self.service
feed_obj = service.handle(feed, local=True)
last_post = feed_obj.get_posts()[0]
feed2 = get_data_filename("buggy_dates.rss")
feed_obj2 = service.handle(feed2, local=True)
last_post2 = feed_obj2.get_posts()[0]
# if the post is updated, we should see a different datetime
self.assertEqual(last_post.updated, last_post2.updated)
def test_missing_date_and_guid_feed(self):
"""Try to reproduce the constant date update bug."""
feed = get_data_filename("buggy_dates_and_guid.rss")
service = self.service
feed_obj = service.handle(feed, local=True)
last_post = feed_obj.get_posts()[0]
feed2 = get_data_filename("buggy_dates_and_guid.rss")
feed_obj2 = service.handle(feed2, local=True)
last_post2 = feed_obj2.get_posts()[0]
# if the post is updated, we should see a different datetime
self.assertEqual(last_post.updated, last_post2.updated)
def test_socket_timeout(self):
class _TimeoutFeedService(FeedService):
def parse_feed(self, *args, **kwargs):
raise socket.timeout(.1)
feed2 = "foofoobar.rss"
with self.assertRaises(TimeoutError):
_TimeoutFeedService().handle(feed2, local=True)
self.assertTrue(Feed.objects.get(feed_url=feed2))
def test_update_feed_socket_timeout(self):
class _TimeoutFeedService(FeedService):
def parse_feed(self, *args, **kwargs):
raise socket.timeout(.1)
service = FeedService(update_on_import=False)
feed_obj = service.handle(self.feed, local=True, force=True)
sservice = _TimeoutFeedService()
feed_obj = sservice.update(feed_obj=feed_obj, force=True)
self.assertEqual(feed_obj.last_error, FEED_TIMEDOUT_ERROR)
def test_update_feed_parse_feed_raises(self):
class _RaisingFeedService(FeedService):
def parse_feed(self, *args, **kwargs):
raise KeyError("foo")
service = FeedService(update_on_import=False)
feed_obj = service.handle(self.feed, local=True, force=True)
sservice = _RaisingFeedService()
feed_obj = sservice.update(feed_obj=feed_obj, force=True)
self.assertEqual(feed_obj.last_error, FEED_GENERIC_ERROR)
def test_update_feed_not_modified(self):
class _Verify(FeedService):
def parse_feed(self, *args, **kwargs):
feed = super(_Verify, self).parse_feed(*args, **kwargs)
feed["status"] = codes.NOT_MODIFIED
return feed
service = FeedService(update_on_import=False)
feed_obj = service.handle(self.feed, local=True, force=True)
self.assertTrue(_Verify().update(feed_obj=feed_obj, force=False))
def test_update_feed_error_status(self):
class _Verify(FeedService):
def parse_feed(self, *args, **kwargs):
return {"status": codes.NOT_FOUND}
service = FeedService(update_on_import=False)
feed_obj = service.handle(self.feed, local=True, force=True)
feed_obj = _Verify().update(feed_obj=feed_obj, force=True)
self.assertEqual(feed_obj.last_error, FEED_NOT_FOUND_ERROR)
def test_parse_feed_raises(self):
class _RaisingFeedService(FeedService):
def parse_feed(self, *args, **kwargs):
raise KeyError("foo")
feed2 = "foo1foo2bar3.rss"
with self.assertRaises(FeedCriticalError):
_RaisingFeedService().handle(feed2, local=True)
with self.assertRaises(Feed.DoesNotExist):
Feed.objects.get(feed_url=feed2)
def test_http_modified(self):
now = time.localtime()
now_as_dt = datetime.fromtimestamp(time.mktime(
now)).replace(tzinfo=pytz.utc)
class _Verify(FeedService):
def parse_feed(self, *args, **kwargs):
feed = super(_Verify, self).parse_feed(*args, **kwargs)
feed.modified = now
return feed
i = _Verify()
feed = i.handle(self.feed, local=True, force=True)
self.assertEqual(feed.http_last_modified, now_as_dt)
def test_update_on_import(self):
class _Verify(FeedService):
updated = False
def update(self, *args, **kwargs):
self.updated = True
imp1 = _Verify(update_on_import=False)
imp1.handle(self.feed, local=True, force=True)
self.assertFalse(imp1.updated)
imp2 = _Verify(update_on_import=True)
imp1.handle(self.feed, local=True, force=True)
self.assertFalse(imp2.updated)
def test_entry_limit(self):
feed = self.feed
service = FeedService(post_limit=10)
feed_obj = service.handle(feed, local=True)
self.assertEqual(feed_obj.title, "Lifehacker", "feed title is set")
self.assertEqual(feed_obj.get_post_count(), 10, "feed has 10 items")
def test_double_post_bug(self):
"""With some feeds, the posts seem to be imported several times."""
feed_str = get_data_filename("lefigaro.rss")
imported_feed = self.service.handle(feed_str, local=True,
force=True)
post_count = imported_feed.post_set.count()
imported_feed = self.service.handle(feed_str, local=True,
force=True)
self.assertEqual(imported_feed.post_set.count(), post_count,
"Posts seems to be imported twice.")
|
|
# -*- encoding: utf-8 -*-
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
'''
Inherit stock for endicia API
'''
from decimal import Decimal, ROUND_UP
import base64
import math
import logging
from endicia import ShippingLabelAPI, LabelRequest, RefundRequestAPI, \
BuyingPostageAPI, Element, CalculatingPostageAPI
from endicia.tools import objectify_response, get_images
from endicia.exceptions import RequestError
from trytond.model import Workflow, ModelView, fields
from trytond.wizard import Wizard, StateView, Button
from trytond.transaction import Transaction
from trytond.pool import Pool, PoolMeta
from trytond.pyson import Eval, Bool
from trytond.rpc import RPC
from .sale import ENDICIA_PACKAGE_TYPES, MAILPIECE_SHAPES
__metaclass__ = PoolMeta
__all__ = [
'ShipmentOut', 'ShippingEndicia', 'GenerateShippingLabel',
'EndiciaRefundRequestWizardView', 'EndiciaRefundRequestWizard',
'BuyPostageWizardView', 'BuyPostageWizard',
]
STATES = {
'readonly': Eval('state') == 'done',
}
logger = logging.getLogger(__name__)
quantize_2_decimal = lambda v: Decimal(v).quantize(
Decimal('.01'), rounding=ROUND_UP
)
class ShipmentOut:
"Shipment Out"
__name__ = 'stock.shipment.out'
endicia_mailclass = fields.Many2One(
'endicia.mailclass', 'MailClass', states=STATES, depends=['state']
)
endicia_mailpiece_shape = fields.Selection(
MAILPIECE_SHAPES, 'Endicia MailPiece Shape', states=STATES,
depends=['state']
)
endicia_shipment_bag = fields.Many2One(
'endicia.shipment.bag', 'Endicia Shipment Bag')
endicia_label_subtype = fields.Selection([
('None', 'None'),
('Integrated', 'Integrated')
], 'Label Subtype', states=STATES, depends=['state'])
endicia_integrated_form_type = fields.Selection([
(None, ''),
('Form2976', 'Form2976(Same as CN22)'),
('Form2976A', 'Form2976(Same as CP72)'),
], 'Integrated Form Type', states=STATES, depends=['state'])
endicia_include_postage = fields.Boolean(
'Include Postage ?', states=STATES, depends=['state']
)
endicia_package_type = fields.Selection(
ENDICIA_PACKAGE_TYPES, 'Package Content Type',
states=STATES, depends=['state']
)
is_endicia_shipping = fields.Function(
fields.Boolean('Is Endicia Shipping?', readonly=True),
'get_is_endicia_shipping'
)
endicia_refunded = fields.Boolean('Refunded ?', readonly=True)
@classmethod
def view_attributes(cls):
return super(ShipmentOut, cls).view_attributes() + [
('//page[@id="endicia"]', 'states', {
'invisible': ~Bool(Eval('is_endicia_shipping'))
})]
def _get_weight_uom(self):
"""
Returns uom for endicia
"""
UOM = Pool().get('product.uom')
if self.is_endicia_shipping:
# Endicia by default uses this uom
return UOM.search([('symbol', '=', 'oz')])[0]
return super(ShipmentOut, self)._get_weight_uom()
@staticmethod
def default_endicia_mailclass():
Config = Pool().get('sale.configuration')
config = Config(1)
return config.endicia_mailclass and config.endicia_mailclass.id or None
@staticmethod
def default_endicia_label_subtype():
Config = Pool().get('sale.configuration')
config = Config(1)
return config.endicia_label_subtype
@staticmethod
def default_endicia_integrated_form_type():
Config = Pool().get('sale.configuration')
config = Config(1)
return config.endicia_integrated_form_type
@staticmethod
def default_endicia_include_postage():
Config = Pool().get('sale.configuration')
config = Config(1)
return config.endicia_include_postage
@staticmethod
def default_endicia_package_type():
Config = Pool().get('sale.configuration')
config = Config(1)
return config.endicia_package_type
@classmethod
def __setup__(cls):
super(ShipmentOut, cls).__setup__()
# There can be cases when people might want to use a different
# shipment carrier at any state except `done`.
cls.carrier.states = STATES
cls._error_messages.update({
'mailclass_missing':
'Select a mailclass to ship using Endicia [USPS].',
'error_label': 'Error in generating label "%s"',
'tracking_number_already_present':
'Tracking Number is already present for this shipment.',
'invalid_state': 'Labels can only be generated when the '
'shipment is in Packed or Done states only',
'wrong_carrier': 'Carrier for selected shipment is not Endicia',
})
cls.__rpc__.update({
'make_endicia_labels': RPC(readonly=False, instantiate=0),
'get_endicia_shipping_cost': RPC(readonly=False, instantiate=0),
})
@fields.depends('is_endicia_shipping', 'carrier')
def on_change_carrier(self):
super(ShipmentOut, self).on_change_carrier()
self.is_endicia_shipping = self.carrier and \
self.carrier.carrier_cost_method == 'endicia' or None
@classmethod
@ModelView.button
@Workflow.transition('done')
def done(cls, shipments):
"""
Add endicia shipments to a open bag
"""
EndiciaShipmentBag = Pool().get('endicia.shipment.bag')
super(ShipmentOut, cls).done(shipments)
endicia_shipments = filter(
lambda s: s.carrier and s.carrier.carrier_cost_method == 'endicia',
shipments
)
if not endicia_shipments:
return
with Transaction().set_user(0):
bag = EndiciaShipmentBag.get_bag()
cls.write(endicia_shipments, {
'endicia_shipment_bag': bag
})
def _get_carrier_context(self):
"Pass shipment in the context"
context = super(ShipmentOut, self)._get_carrier_context()
if not self.carrier.carrier_cost_method == 'endicia':
return context
context = context.copy()
context['shipment'] = self.id
return context
def _update_endicia_item_details(self, request):
'''
Adding customs items/info and form descriptions to the request
:param request: Shipping Label API request instance
'''
User = Pool().get('res.user')
UOM = Pool().get('product.uom')
user = User(Transaction().user)
uom_oz, = UOM.search([('symbol', '=', 'oz')])
customsitems = []
value = 0
for move in self.outgoing_moves:
if move.quantity <= 0:
continue
weight_oz = quantize_2_decimal(move.get_weight(uom_oz))
new_item = [
Element('Description', move.product.name[0:50]),
Element('Quantity', int(math.ceil(move.quantity))),
Element('Weight', weight_oz),
Element('Value', quantize_2_decimal(
move.product.customs_value_used
)),
]
customsitems.append(Element('CustomsItem', new_item))
value += float(move.product.customs_value_used) * move.quantity
description = ','.join([
move.product.name for move in self.outgoing_moves
])
request.add_data({
'customsinfo': [
Element('ContentsExplanation', description[:25]),
Element('CustomsItems', customsitems),
Element('ContentsType', self.endicia_package_type)
]
})
total_value = sum(map(
lambda move: float(move.product.cost_price) * move.quantity,
self.outgoing_moves
))
request.add_data({
'ContentsType': self.endicia_package_type,
'Value': quantize_2_decimal(total_value),
'Description': description[:50],
'CustomsCertify': 'TRUE', # TODO: Should this be part of config ?
'CustomsSigner': user.name,
})
def make_endicia_labels(self):
"""
Make labels for the given shipment
:return: Tracking number as string
"""
Attachment = Pool().get('ir.attachment')
if self.state not in ('packed', 'done'):
self.raise_user_error('invalid_state')
if not (
self.carrier and
self.carrier.carrier_cost_method == 'endicia'
):
self.raise_user_error('wrong_carrier')
if self.tracking_number:
self.raise_user_error('tracking_number_already_present')
if not self.endicia_mailclass:
self.raise_user_error('mailclass_missing')
mailclass = self.endicia_mailclass.value
label_request = LabelRequest(
Test=self.carrier.endicia_is_test and 'YES' or 'NO',
LabelType=(
'International' in mailclass
) and 'International' or 'Default',
# TODO: Probably the following have to be configurable
ImageFormat="PNG",
LabelSize="6x4",
ImageResolution="203",
ImageRotation="Rotate270",
)
# Endicia only support 1 decimal place in weight
weight_oz = "%.1f" % self.weight
shipping_label_request = ShippingLabelAPI(
label_request=label_request,
weight_oz=weight_oz,
partner_customer_id=self.delivery_address.id,
partner_transaction_id=self.id,
mail_class=mailclass,
accountid=self.carrier.endicia_account_id,
requesterid=self.carrier.endicia_requester_id,
passphrase=self.carrier.endicia_passphrase,
test=self.carrier.endicia_is_test,
)
shipping_label_request.mailpieceshape = self.endicia_mailpiece_shape
from_address = self._get_ship_from_address()
shipping_label_request.add_data(
from_address.address_to_endicia_from_address().data
)
shipping_label_request.add_data(
self.delivery_address.address_to_endicia_to_address().data
)
shipping_label_request.add_data({
'LabelSubtype': self.endicia_label_subtype,
'IncludePostage':
self.endicia_include_postage and 'TRUE' or 'FALSE',
})
if self.endicia_label_subtype != 'None':
# Integrated form type needs to be sent for international shipments
shipping_label_request.add_data({
'IntegratedFormType': self.endicia_integrated_form_type,
})
if self.delivery_address.country.code != 'US':
self._update_endicia_item_details(shipping_label_request)
# Logging.
logger.debug(
'Making Shipping Label Request for'
'Shipment ID: {0} and Carrier ID: {1}'
.format(self.id, self.carrier.id)
)
logger.debug('--------SHIPPING LABEL REQUEST--------')
logger.debug(str(shipping_label_request.to_xml()))
logger.debug('--------END REQUEST--------')
try:
response = shipping_label_request.send_request()
except RequestError, error:
self.raise_user_error('error_label', error_args=(error,))
else:
result = objectify_response(response)
# Logging.
logger.debug('--------SHIPPING LABEL RESPONSE--------')
logger.debug(str(response))
logger.debug('--------END RESPONSE--------')
tracking_number = result.TrackingNumber.pyval
self.__class__.write([self], {
'tracking_number': unicode(result.TrackingNumber.pyval),
'cost': Decimal(str(result.FinalPostage.pyval)),
})
# Save images as attachments
images = get_images(result)
for (id, label) in images:
Attachment.create([{
'name': "%s_%s_USPS-Endicia.png" % (tracking_number, id),
'data': buffer(base64.decodestring(label)),
'resource': '%s,%s' % (self.__name__, self.id)
}])
return str(tracking_number)
def get_endicia_shipping_cost(self):
"""Returns the calculated shipping cost as sent by endicia
:returns: The shipping cost in USD
"""
if not self.endicia_mailclass:
self.raise_user_error('mailclass_missing')
from_address = self._get_ship_from_address()
to_address = self.delivery_address
to_zip = to_address.zip
if to_address.country and to_address.country.code == 'US':
# Domestic
to_zip = to_zip and to_zip[:5]
else:
# International
to_zip = to_zip and to_zip[:15]
# Endicia only support 1 decimal place in weight
weight_oz = "%.1f" % self.weight
calculate_postage_request = CalculatingPostageAPI(
mailclass=self.endicia_mailclass.value,
weightoz=weight_oz,
from_postal_code=from_address.zip and from_address.zip[:5],
to_postal_code=to_zip,
to_country_code=to_address.country and to_address.country.code,
accountid=self.carrier.endicia_account_id,
requesterid=self.carrier.endicia_requester_id,
passphrase=self.carrier.endicia_passphrase,
test=self.carrier.endicia_is_test,
)
calculate_postage_request.mailpieceshape = self.endicia_mailpiece_shape
# Logging.
logger.debug(
'Making Postage Request for'
'Shipment ID: {0} and Carrier ID: {1}'
.format(self.id, self.carrier.id)
)
logger.debug('--------POSTAGE REQUEST--------')
logger.debug(str(calculate_postage_request.to_xml()))
logger.debug('--------END REQUEST--------')
try:
response = calculate_postage_request.send_request()
except RequestError, error:
self.raise_user_error('error_label', error_args=(error,))
# Logging.
logger.debug('--------POSTAGE RESPONSE--------')
logger.debug(str(response))
logger.debug('--------END RESPONSE--------')
return Decimal(
objectify_response(response).PostagePrice.get('TotalAmount')
)
def get_is_endicia_shipping(self, name):
"""
Check if shipping is from USPS
"""
return self.carrier and self.carrier.carrier_cost_method == 'endicia'
class EndiciaRefundRequestWizardView(ModelView):
"""Endicia Refund Wizard View
"""
__name__ = 'endicia.refund.wizard.view'
refund_status = fields.Text('Refund Status', readonly=True,)
refund_approved = fields.Boolean('Refund Approved ?', readonly=True,)
class EndiciaRefundRequestWizard(Wizard):
"""A wizard to cancel the current shipment and refund the cost
"""
__name__ = 'endicia.refund.wizard'
start = StateView(
'endicia.refund.wizard.view',
'shipping_endicia.endicia_refund_wizard_view_form', [
Button('Cancel', 'end', 'tryton-cancel'),
Button('Request Refund', 'request_refund', 'tryton-ok',
default=True),
]
)
request_refund = StateView(
'endicia.refund.wizard.view',
'shipping_endicia.endicia_refund_wizard_view_form', [
Button('OK', 'end', 'tryton-ok', default=True),
]
)
@classmethod
def __setup__(self):
super(EndiciaRefundRequestWizard, self).__setup__()
self._error_messages.update({
'wrong_carrier': 'Carrier for selected shipment is not Endicia'
})
def default_request_refund(self, data):
"""Requests the refund for the current shipment record
and returns the response.
"""
Shipment = Pool().get('stock.shipment.out')
shipments = Shipment.browse(Transaction().context['active_ids'])
# PICNumber is the argument name expected by endicia in API,
# so its better to use the same name here for better understanding
pic_numbers = []
for shipment in shipments:
if not (
shipment.carrier and
shipment.carrier.carrier_cost_method == 'endicia'
):
self.raise_user_error('wrong_carrier')
pic_numbers.append(shipment.tracking_number)
test = shipment.carrier.endicia_is_test and 'Y' or 'N'
refund_request = RefundRequestAPI(
pic_numbers=pic_numbers,
accountid=shipment.carrier.endicia_account_id,
requesterid=shipment.carrier.endicia_requester_id,
passphrase=shipment.carrier.endicia_passphrase,
test=test,
)
try:
response = refund_request.send_request()
except RequestError, error:
self.raise_user_error('error_label', error_args=(error,))
result = objectify_response(response)
if str(result.RefundList.PICNumber.IsApproved) == 'YES':
refund_approved = True
# If refund is approved, then set the state of record
# as cancel/refund
shipment.__class__.write(
[shipment], {'endicia_refunded': True}
)
else:
refund_approved = False
default = {
'refund_status': unicode(result.RefundList.PICNumber.ErrorMsg),
'refund_approved': refund_approved
}
return default
class BuyPostageWizardView(ModelView):
"""Buy Postage Wizard View
"""
__name__ = 'buy.postage.wizard.view'
amount = fields.Numeric('Amount in USD', required=True)
response = fields.Text('Response', readonly=True)
carrier = fields.Many2One(
"carrier", "Carrier", required=True,
domain=[('carrier_cost_method', '=', 'endicia')]
)
class BuyPostageWizard(Wizard):
"""Buy Postage Wizard
"""
__name__ = 'buy.postage.wizard'
start = StateView(
'buy.postage.wizard.view',
'shipping_endicia.endicia_buy_postage_wizard_view_form', [
Button('Cancel', 'end', 'tryton-cancel'),
Button('Buy Postage', 'buy_postage', 'tryton-ok',
default=True),
]
)
buy_postage = StateView(
'buy.postage.wizard.view',
'shipping_endicia.endicia_buy_postage_wizard_view_form', [
Button('OK', 'end', 'tryton-ok', default=True),
]
)
def default_buy_postage(self, data):
"""
Generate the SCAN Form for the current shipment record
"""
default = {}
buy_postage_api = BuyingPostageAPI(
request_id=Transaction().user,
recredit_amount=self.start.amount,
requesterid=self.start.carrier.endicia_requester_id,
accountid=self.start.carrier.endicia_account_id,
passphrase=self.start.carrier.endicia_passphrase,
test=self.start.carrier.endicia_is_test,
)
try:
response = buy_postage_api.send_request()
except RequestError, error:
self.raise_user_error('error_label', error_args=(error,))
result = objectify_response(response)
default['amount'] = self.start.amount
default['carrier'] = self.start.carrier
default['response'] = str(result.ErrorMessage) \
if hasattr(result, 'ErrorMessage') else 'Success'
return default
class ShippingEndicia(ModelView):
'Endicia Configuration'
__name__ = 'shipping.label.endicia'
endicia_mailclass = fields.Many2One(
'endicia.mailclass', 'MailClass', required=True
)
endicia_mailpiece_shape = fields.Selection(
MAILPIECE_SHAPES, 'Endicia MailPiece Shape'
)
endicia_label_subtype = fields.Selection([
('None', 'None'),
('Integrated', 'Integrated')
], 'Label Subtype')
endicia_integrated_form_type = fields.Selection([
(None, ''),
('Form2976', 'Form2976(Same as CN22)'),
('Form2976A', 'Form2976(Same as CP72)'),
], 'Integrated Form Type')
endicia_include_postage = fields.Boolean('Include Postage ?')
endicia_package_type = fields.Selection(
ENDICIA_PACKAGE_TYPES, 'Package Content Type'
)
endicia_refunded = fields.Boolean('Refunded ?', readonly=True)
class GenerateShippingLabel(Wizard):
'Generate Labels'
__name__ = 'shipping.label'
endicia_config = StateView(
'shipping.label.endicia',
'shipping_endicia.shipping_endicia_configuration_view_form',
[
Button('Back', 'start', 'tryton-go-previous'),
Button('Continue', 'generate', 'tryton-go-next', default=True),
]
)
def default_endicia_config(self, data):
Config = Pool().get('sale.configuration')
config = Config(1)
shipment = self.start.shipment
return {
'endicia_mailclass': (
shipment.endicia_mailclass and shipment.endicia_mailclass.id
) or (
config.endicia_mailclass and config.endicia_mailclass.id
) or None,
'endicia_mailpiece_shape': (
shipment.endicia_mailpiece_shape or
config.endicia_mailpiece_shape
),
'endicia_label_subtype': (
shipment.endicia_label_subtype or config.endicia_label_subtype
),
'endicia_integrated_form_type': (
shipment.endicia_integrated_form_type or
config.endicia_integrated_form_type
),
'endicia_include_postage': (
shipment.endicia_include_postage or
config.endicia_include_postage
),
'endicia_package_type': (
shipment.endicia_package_type or config.endicia_package_type
)
}
def transition_next(self):
state = super(GenerateShippingLabel, self).transition_next()
if self.start.carrier.carrier_cost_method == 'endicia':
return 'endicia_config'
return state
def update_shipment(self):
shipment = self.start.shipment
if self.start.carrier.carrier_cost_method == 'endicia':
shipment.endicia_mailclass = self.endicia_config.endicia_mailclass
shipment.endicia_mailpiece_shape = \
self.endicia_config.endicia_mailpiece_shape
shipment.endicia_label_subtype = \
self.endicia_config.endicia_label_subtype
shipment.endicia_integrated_form_type = \
self.endicia_config.endicia_integrated_form_type
shipment.endicia_package_type = \
self.endicia_config.endicia_package_type
shipment.endicia_include_postage = \
self.endicia_config.endicia_include_postage
return super(GenerateShippingLabel, self).update_shipment()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from mxconsole.protobuf import types_pb2
class DType(object):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
* `tf.resource`: Handle to a mutable resource.
In addition, variants of these types with the `_ref` suffix are
defined for reference-typed tensors.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
"""
def __init__(self, type_enum):
"""Creates a new `DataType`.
NOTE(mrry): In normal circumstances, you should not need to
construct a `DataType` object directly. Instead, use the
`tf.as_dtype()` function.
Args:
type_enum: A `types_pb2.DataType` enum value.
Raises:
TypeError: If `type_enum` is not a value `types_pb2.DataType`.
"""
# TODO(mrry): Make the necessary changes (using __new__) to ensure
# that calling this returns one of the interned values.
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values()
or type_enum == types_pb2.DT_INVALID):
raise TypeError(
"type_enum is not a valid types_pb2.DataType: %s" % type_enum)
self._type_enum = type_enum
@property
def _is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def _as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def is_numpy_compatible(self):
return (self._type_enum != types_pb2.DT_RESOURCE and
self._type_enum != types_pb2.DT_RESOURCE_REF)
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
"""Returns a `types_pb2.DataType` enum value based on this `DType`."""
return self._type_enum
@property
def is_bool(self):
"""Returns whether this is a boolean data type"""
return self.base_dtype == bool
@property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (self.is_numpy_compatible and not self.is_quantized and
issubclass(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return self.is_numpy_compatible and issubclass(self.as_numpy_dtype,
np.floating)
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
"""Returns whether this is a quantized data type."""
return self.base_dtype in [qint8, quint8, qint16, quint16, qint32, bfloat16]
@property
def is_unsigned(self):
"""Returns whether this type is unsigned.
Non-numeric, unordered, and quantized types are not considered unsigned, and
this function returns `False`.
Returns:
Whether a `DType` is unsigned.
"""
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
"""Return intensity limits, i.e. (min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
min, max : tuple
Lower and upper intensity limits.
"""
min, max = dtype_range[self.as_numpy_dtype]
if clip_negative:
min = 0
return min, max
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T)) == False
DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum, other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
try:
dtype = as_dtype(other).as_datatype_enum
return self._type_enum == dtype # pylint: disable=protected-access
except TypeError:
return False
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __str__(self):
return "<dtype: %r>" % self.name
def __repr__(self):
return "tf." + self.name
def __hash__(self):
return self._type_enum
@property
def size(self):
if self._type_enum == types_pb2.DT_RESOURCE:
return 1
return np.dtype(self.as_numpy_dtype).itemsize
# Define data type range of numpy dtype
dtype_range = {np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
float16 = DType(types_pb2.DT_HALF)
half = float16
float32 = DType(types_pb2.DT_FLOAT)
float64 = DType(types_pb2.DT_DOUBLE)
double = float64
int32 = DType(types_pb2.DT_INT32)
uint8 = DType(types_pb2.DT_UINT8)
uint16 = DType(types_pb2.DT_UINT16)
int16 = DType(types_pb2.DT_INT16)
int8 = DType(types_pb2.DT_INT8)
string = DType(types_pb2.DT_STRING)
complex64 = DType(types_pb2.DT_COMPLEX64)
complex128 = DType(types_pb2.DT_COMPLEX128)
int64 = DType(types_pb2.DT_INT64)
bool = DType(types_pb2.DT_BOOL)
qint8 = DType(types_pb2.DT_QINT8)
quint8 = DType(types_pb2.DT_QUINT8)
qint16 = DType(types_pb2.DT_QINT16)
quint16 = DType(types_pb2.DT_QUINT16)
qint32 = DType(types_pb2.DT_QINT32)
resource_ref = DType(types_pb2.DT_RESOURCE_REF)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_RESOURCE: resource,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
types_pb2.DT_RESOURCE_REF: resource_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
}
_STRING_TO_TF = {value: _INTERN_TABLE[key]
for key, value in _TYPE_TO_STRING.items()}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = frozenset([
(np.float16, float16),
(np.float32, float32),
(np.float64, float64),
(np.int32, int32),
(np.int64, int64),
(np.uint8, uint8),
(np.uint16, uint16),
(np.int16, int16),
(np.int8, int8),
(np.complex64, complex64),
(np.complex128, complex128),
(np.object, string),
(np.bool, bool),
(_np_qint8, qint8),
(_np_quint8, quint8),
(_np_qint16, qint16),
(_np_quint16, quint16),
(_np_qint32, qint32),
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
])
_TF_TO_NP = {
types_pb2.DT_HALF: np.float16,
types_pb2.DT_FLOAT: np.float32,
types_pb2.DT_DOUBLE: np.float64,
types_pb2.DT_INT32: np.int32,
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_UINT16: np.uint16,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT8: np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING: np.object,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_BOOL: np.bool,
types_pb2.DT_QINT8: _np_qint8,
types_pb2.DT_QUINT8: _np_quint8,
types_pb2.DT_QINT16: _np_qint16,
types_pb2.DT_QUINT16: _np_quint16,
types_pb2.DT_QINT32: _np_qint32,
types_pb2.DT_BFLOAT16: np.uint16,
# Ref types
types_pb2.DT_HALF_REF: np.float16,
types_pb2.DT_FLOAT_REF: np.float32,
types_pb2.DT_DOUBLE_REF: np.float64,
types_pb2.DT_INT32_REF: np.int32,
types_pb2.DT_UINT8_REF: np.uint8,
types_pb2.DT_UINT16_REF: np.uint16,
types_pb2.DT_INT16_REF: np.int16,
types_pb2.DT_INT8_REF: np.int8,
types_pb2.DT_STRING_REF: np.object,
types_pb2.DT_COMPLEX64_REF: np.complex64,
types_pb2.DT_COMPLEX128_REF: np.complex128,
types_pb2.DT_INT64_REF: np.int64,
types_pb2.DT_BOOL_REF: np.bool,
types_pb2.DT_QINT8_REF: _np_qint8,
types_pb2.DT_QUINT8_REF: _np_quint8,
types_pb2.DT_QINT16_REF: _np_qint16,
types_pb2.DT_QUINT16_REF: _np_quint16,
types_pb2.DT_QINT32_REF: _np_qint32,
types_pb2.DT_BFLOAT16_REF: np.uint16,
}
QUANTIZED_DTYPES = frozenset(
[qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref,
quint16_ref, qint32_ref])
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType`
object. This may currently be a `tf.DType` object, a
[`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
if isinstance(type_value, np.dtype):
# The numpy dtype for strings is variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
for key, val in _NP_TO_TF:
try:
if key == type_value:
return val
except TypeError as e:
raise TypeError("Cannot convert {} to a dtype. {}".format(type_value, e))
raise TypeError(
"Cannot convert value %r to a TensorFlow DType." % type_value)
|
|
import csv
from datetime import datetime
class usageBlob(object):
""" A collection of usage data from findr_reduce.
"""
def __init__(self):
self.ids = []
self.commands = []
self.starts = []
self.ends = []
self.exit_statuses = []
self.cpu_times = []
self.walltimes = []
self.cores = []
self.virtual_memory = []
self.swap_memory = []
self.total_processes = []
self.max_concurrent_processes = []
self.bytes_read = []
self.bytes_written = []
self.workers_connected = []
self.workers_busy = []
self.workers_idle = []
self.workers_removed = []
self.tasks_completed = []
self.tasks_running = []
self.tasks_waiting = []
self.total_execute_times = []
def __repr__(self):
return '<usageBlob size=%s>' % (len(self.ids))
@staticmethod
def help():
print("#### usageBlob Properties: Stored as index-matched lists ####")
print("usageBlob.ids ...................... All job IDs in set")
print("usageBlob.commands ................. Commands used by jobs")
print("usageBlob.starts ................... Start times of jobs")
print("usageBlob.ends ..................... End times of jobs")
print("usageBlob.exit_statuses ............ Exit status of jobs")
print("usageBlob.cpu_times ................ CPU time required per job")
print("usageBlob.walltimes ................ Wall time required per job")
print("usageBlob.cores .................... Cores used per job execution")
print("usageBlob.virtual_memory ........... Virtual memory used per job execution")
print("usageBlob.swap_memory .............. Swap memory used per job execution")
print("usageBlob.total_processes .......... Total processes executed per job")
print("usageBlob.max_concurrent_processes . Max concurrent processes per job")
print("usageBlob.bytes_read ............... Bytes read per job")
print("usageBlob.bytes_written ............ Bytes written per job")
print("usageBlob.workers_connected ........ Number of workers connected at job conclusion")
print("usageBlob.workers_busy ............. Number of workers working at job conclusion")
print("usageBlob.workers_idle ............. Number of inactive workers at job conclusion")
print("usageBlob.workers_removed .......... Number of workers lost or removed at job conclusion")
print("usageBlob.tasks_completed .......... Number of tasks completed since previous check")
print("usageBlob.tasks_running ............ Number of tasks running at job conclusion")
print("usageBlob.tasks_waiting ............ Number of tasks waiting at job conclusion")
print("usageBlob.total_execute_times ...... Total execution time, including transfers, per job")
print("\n#### usageBlob Methods ####")
print("usageBlob.apply_from_tsv(filename)\n -- Apply data from a TSV (i.e. <runprefix>_usage.log) to usageBlob")
print("usageBlob.merge_blob(different_usageBlob)\n -- Merge data from another blob into usageBlob")
print("usageBlob.wq_timestamp_convert(timestamp)\n -- Convert a WorkQueue timestamp to a Python timestamp")
print("usageBlob.print_time_report\n -- Print total job count, start & end times and total time elapsed")
print("usageBlob.help()\n -- Print this help message")
return 1
def apply_from_tsv(self, filename):
def get_prop(entry, header_list, headerval):
try:
return entry[header_list.index(headerval)]
except:
return None
with open(filename, 'r') as ipt:
read = csv.reader(ipt, delimiter='\t')
headers = next(read)
for row in read:
self.ids.append(get_prop(row, headers, "TaskID"))
self.commands.append(get_prop(row, headers, "Command"))
self.starts.append(get_prop(row, headers, "Start"))
self.ends.append(get_prop(row, headers, "End"))
self.exit_statuses.append(get_prop(row, headers, "ExitStatus"))
self.cpu_times.append(get_prop(row, headers, "CPUTime"))
self.walltimes.append(get_prop(row, headers, "WallTime"))
self.cores.append(get_prop(row, headers, "Cores"))
self.virtual_memory.append(get_prop(row, headers, "VirtualMemory"))
self.swap_memory.append(get_prop(row, headers, "SwapMemory"))
self.total_processes.append(get_prop(row, headers, "TotalProcesses"))
self.max_concurrent_processes.append(get_prop(row, headers, "MaxConcurrentProcesses"))
self.bytes_read.append(get_prop(row, headers, "BytesRead"))
self.bytes_written.append(get_prop(row, headers, "BytesWritten"))
self.workers_connected.append(get_prop(row, headers, "WorkersConnected"))
self.workers_busy.append(get_prop(row, headers, "WorkersBusy"))
self.workers_idle.append(get_prop(row, headers, "WorkersIdle"))
self.workers_removed.append(get_prop(row, headers, "WorkersRemoved"))
self.tasks_completed.append(get_prop(row, headers, "TasksComplete"))
self.tasks_running.append(get_prop(row, headers, "TasksRunning"))
self.tasks_waiting.append(get_prop(row, headers, "TasksWaiting"))
self.total_execute_times.append(get_prop(row, headers, "TotalExecuteTime"))
return 1
def merge_blob(self, blob):
self.ids += blob.ids
self.commands += blob.commands
self.starts += blob.starts
self.ends += blob.ends
self.exit_statuses += blob.exit_statuses
self.cpu_times += blob.cpu_times
self.walltimes += blob.walltimes
self.cores += blob.cores
self.virtual_memory += blob.virtual_memory
self.swap_memory += blob.swap_memory
self.total_processes += blob.total_processes
self.max_concurrent_processes += blob.max_concurrent_processes
self.bytes_read += blob.bytes_read
self.bytes_written += blob.bytes_written
self.workers_connected += blob.workers_connected
self.workers_busy += blob.workers_busy
self.workers_idle += blob.workers_idle
self.workers_removed += blob.workers_removed
self.tasks_completed += blob.tasks_completed
self.tasks_running += blob.tasks_running
self.tasks_waiting += blob.tasks_waiting
self.total_execute_times += blob.total_execute_times
@staticmethod
def wq_timestamp_convert(wq_timestamp):
# RETURNS: datetime object from timestamp
ts = str(wq_timestamp)
# new_ts = float(ts[0:10] + "." + ts[10:12]) # Replaced with slicing with relation to rear of list
new_ts = float(ts[:-4][:-2] + '.' + ts[:-4][-2:])
return datetime.fromtimestamp(new_ts)
@staticmethod
def wq_timestamp_seconds(wq_timeobj):
# RETURNS: seconds from a wq timeobj (float)
t = str(wq_timeobj)
new_t = float(t[:-6] + '.' + t[-6:])
return new_t
def print_time_report(self):
# PRINTS TO STDOUT: brief summary of times
begin = self.wq_timestamp_convert(min(self.starts))
finish = self.wq_timestamp_convert(max(self.ends))
print("%s Job Records" % len(self.ids))
print("Jobs started : %s" % str(begin))
print("Jobs finished : %s" % str(finish))
print("Time elapsed : %s" % str(finish - begin))
class runBlob(object):
def __init__(self, prefix):
self.prefix = prefix
self.alltasks = []
self.completetasks = []
self.failedtasks = []
self.remainingtasks = []
self.usage = 'unlinked'
try:
with open(prefix + '_alltasks.log', 'U') as a:
for l in a:
l = l.strip().split('\t')
self.alltasks.append(l[0])
with open(prefix + '_completetasks.log') as c:
for l in c:
l = l.strip()
self.completetasks.append(l)
with open(prefix + '_failedtasks.log') as f:
for l in f:
l = l.strip()
self.failedtasks.append(l)
for t in self.alltasks:
if t not in self.completetasks and t not in self.failedtasks:
self.remainingtasks.append(t)
except:
print("Instantiation of runBlob failed. Ensure all logs are available.")
exit()
def __repr__(self):
return '<runBlob prefix=%s usage=%s>' % (self.prefix, self.usage)
@staticmethod
def help():
print("#### runBlob Properties ####")
print("runBlob.prefix ........... Prefix from which runBlob was built")
print("runBlob.alltasks ......... All tasks submitted to run queue")
print("runBlob.completetasks .... Tasks completed ")
print("runBlob.failedtasks ...... Tasks attempted but failed")
print("runBlob.remainingtasks ... Tasks not completed or failed")
print("runBlob.usage ............ Link to a usageBlob for this run")
print("\n#### usageBlob Methods ####")
print("usageBlob.link_usage(usageBlob)\n -- Link this usage blob with this runBlob")
print("usageBlob.help()\n -- Print this help message")
return 1
def link_usage(self, usageBlob):
self.usage = usageBlob
|
|
from .bits import Bits
from ..ast.base import Base, _make_name
class BV(Bits):
# TODO: do these go on Bits or BV?
def chop(self, bits=1):
'''
Chops an AST into ASTs of size 'bits'. Obviously, the length of the AST must be
a multiple of bits.
'''
s = len(self)
if s % bits != 0:
raise ValueError("expression length (%d) should be a multiple of 'bits' (%d)" % (len(self), bits))
elif s == bits:
return [ self ]
else:
return list(reversed([ self[(n+1)*bits - 1:n*bits] for n in range(0, s / bits) ]))
def __getitem__(self, rng):
'''
Extracts bits from the AST. ASTs are indexed weirdly. For a 32-bit AST:
a[31] is the *LEFT* most bit, so it'd be the 0 in
01111111111111111111111111111111
a[0] is the *RIGHT* most bit, so it'd be the 0 in
11111111111111111111111111111110
a[31:30] are the two leftmost bits, so they'd be the 0s in:
00111111111111111111111111111111
a[1:0] are the two rightmost bits, so they'd be the 0s in:
11111111111111111111111111111100
@returns the new AST.
'''
if type(rng) is slice:
return Extract(int(rng.start), int(rng.stop), self)
else:
return Extract(int(rng), int(rng), self)
def zero_extend(self, n):
'''
Zero-extends the AST by n bits. So:
a = BVV(0b1111, 4)
b = a.zero_extend(4)
b is BVV(0b00001111)
'''
return ZeroExt(n, self)
def sign_extend(self, n):
'''
Sign-extends the AST by n bits. So:
a = BVV(0b1111, 4)
b = a.sign_extend(4)
b is BVV(0b11111111)
'''
return SignExt(n, self)
def concat(self, *args):
'''
Concatenates this AST with the ASTs provided.
'''
return Concat(self, *args)
@staticmethod
def _from_int(like, value):
return BVI(bv.BVV(value, like.length), length=like.length)
@staticmethod
def _from_long(like, value):
return BVI(bv.BVV(value, like.length), length=like.length)
@staticmethod
def _from_BVV(like, value): #pylint:disable=unused-argument
return BVI(value, length=value.size())
def signed_to_fp(self, rm, sort):
if rm is None:
rm = fp.fp.RM.default()
return fp.fpToFP(rm, self, sort)
def unsigned_to_fp(self, rm, sort):
if rm is None:
rm = fp.fp.RM.default()
return fp.fpToFPUnsigned(rm, self, sort)
def raw_to_fp(self):
sort = fp.fp.FSort.from_size(self.length)
return fp.fpToFP(self, sort)
def to_bv(self):
return self
def BVI(model, **kwargs):
eager = isinstance(model, bv.BVV)
kwargs['eager'] = eager
return BV('I', (model,), **kwargs)
def BitVec(name, size, explicit_name=False):
n = _make_name(name, size, explicit_name)
return BV('BitVec', (n, size), variables={n}, symbolic=True, simplified=Base.FULL_SIMPLIFY, length=size)
def BitVecVal(value, size, name=None, explicit_name=False, variables=frozenset()):
if name is not None:
n = _make_name(name, size, explicit_name=explicit_name)
variables = variables | frozenset((n,))
return BVI(bv.BVV(value, size), variables=variables, symbolic=False, simplified=Base.FULL_SIMPLIFY, length=size, eager=True)
def StridedInterval(name=None, bits=0, lower_bound=None, upper_bound=None, stride=None, to_conv=None):
si = vsa.CreateStridedInterval(name=name, bits=bits, lower_bound=lower_bound, upper_bound=upper_bound, stride=stride, to_conv=to_conv)
return BVI(si, variables={ si.name }, symbolic=False, length=si._bits, eager=False)
def TopStridedInterval(bits, name=None, uninitialized=False):
si = vsa.StridedInterval.top(bits, name=name, uninitialized=uninitialized)
return BVI(si, variables={ si.name }, symbolic=False, length=bits)
def EmptyStridedInterval(bits, name=None):
si = vsa.StridedInterval.empty(bits)
return BVI(si, variables={ si.name }, symbolic=False, length=bits)
def ValueSet(**kwargs):
vs = vsa.ValueSet(**kwargs)
return BVI(vs, variables={ vs.name }, symbolic=False, length=kwargs['bits'], eager=False)
#
# Unbound operations
#
from .bool import Bool
from .. import operations
# comparisons
ULT = operations.op('ULT', (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
ULE = operations.op('ULE', (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
UGT = operations.op('UGT', (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
UGE = operations.op('UGE', (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
SLT = operations.op('SLT', (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
SLE = operations.op('SLE', (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
SGT = operations.op('SGT', (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
SGE = operations.op('SGE', (BV, BV), Bool, extra_check=operations.length_same_check, bound=False)
# bit stuff
LShR = operations.op('LShR', (BV, BV), BV, extra_check=operations.length_same_check,
calc_length=operations.basic_length_calc, bound=False)
SignExt = operations.op('SignExt', ((int, long), BV), BV,
calc_length=operations.ext_length_calc, bound=False)
ZeroExt = operations.op('ZeroExt', ((int, long), BV), BV,
calc_length=operations.ext_length_calc, bound=False)
Extract = operations.op('Extract', ((int, long), (int, long), BV),
BV, extra_check=operations.extract_check,
calc_length=operations.extract_length_calc, bound=False)
Concat = operations.op('Concat', BV, BV, calc_length=operations.concat_length_calc, bound=False)
RotateLeft = operations.op('RotateLeft', (BV, BV), BV,
extra_check=operations.length_same_check,
calc_length=operations.basic_length_calc, bound=False)
RotateRight = operations.op('RotateRight', (BV, BV), BV,
extra_check=operations.length_same_check,
calc_length=operations.basic_length_calc, bound=False)
Reverse = operations.op('Reverse', (BV,), BV,
calc_length=operations.basic_length_calc, bound=False)
#
# Bound operations
#
BV.__add__ = operations.op('__add__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__radd__ = operations.op('__radd__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__div__ = operations.op('__div__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rdiv__ = operations.op('__rdiv__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__truediv__ = operations.op('__truediv__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rtruediv__ = operations.op('__rtruediv__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__floordiv__ = operations.op('__floordiv__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rfloordiv__ = operations.op('__rfloordiv__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__mul__ = operations.op('__mul__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rmul__ = operations.op('__rmul__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__sub__ = operations.op('__sub__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rsub__ = operations.op('__rsub__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__pow__ = operations.op('__pow__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rpow__ = operations.op('__rpow__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__mod__ = operations.op('__mod__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rmod__ = operations.op('__rmod__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__divmod__ = operations.op('__divmod__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rdivmod__ = operations.op('__rdivmod__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__neg__ = operations.op('__neg__', (BV,), BV, calc_length=operations.basic_length_calc)
BV.__pos__ = operations.op('__pos__', (BV,), BV, calc_length=operations.basic_length_calc)
BV.__abs__ = operations.op('__abs__', (BV,), BV, calc_length=operations.basic_length_calc)
BV.__eq__ = operations.op('__eq__', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__ne__ = operations.op('__ne__', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__ge__ = operations.op('__ge__', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__le__ = operations.op('__le__', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__gt__ = operations.op('__gt__', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__lt__ = operations.op('__lt__', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.SLT = operations.op('SLT', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.SGT = operations.op('SGT', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.SLE = operations.op('SLE', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.SGE = operations.op('SGE', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.ULT = operations.op('ULT', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.UGT = operations.op('UGT', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.ULE = operations.op('ULE', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.UGE = operations.op('UGE', (BV, BV), Bool, extra_check=operations.length_same_check)
BV.__invert__ = operations.op('__invert__', (BV,), BV, calc_length=operations.basic_length_calc)
BV.__or__ = operations.op('__or__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__ror__ = operations.op('__ror__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__and__ = operations.op('__and__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rand__ = operations.op('__rand__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__xor__ = operations.op('__xor__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rxor__ = operations.op('__rxor__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__lshift__ = operations.op('__lshift__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rlshift__ = operations.op('__rlshift__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rshift__ = operations.op('__rshift__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.__rrshift__ = operations.op('__rrshift__', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.LShR = operations.op('LShR', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.Extract = staticmethod(operations.op('Extract', ((int, long), (int, long), BV), BV, extra_check=operations.extract_check, calc_length=operations.extract_length_calc, bound=False))
BV.Concat = staticmethod(operations.op('Concat', BV, BV, calc_length=operations.concat_length_calc, bound=False))
BV.reversed = property(operations.op('Reverse', (BV,), BV, calc_length=operations.basic_length_calc))
BV.union = operations.op('union', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.widen = operations.op('widen', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
BV.intersection = operations.op('intersection', (BV, BV), BV, extra_check=operations.length_same_check, calc_length=operations.basic_length_calc)
from .. import bv
from .. import fp
from . import fp
from .. import vsa
|
|
"""
Provide authentication using local files
.. versionadded:: 2018.3.0
The `file` auth module allows simple authentication via local files. Different
filetypes are supported, including:
1. Text files, with passwords in plaintext or hashed
2. Apache-style htpasswd files
3. Apache-style htdigest files
.. note::
The ``python-passlib`` library is required when using a ``^filetype`` of
``htpasswd`` or ``htdigest``.
The simplest example is a plaintext file with usernames and passwords:
.. code-block:: yaml
external_auth:
file:
^filename: /etc/insecure-user-list.txt
gene:
- .*
dean:
- test.*
In this example the ``/etc/insecure-user-list.txt`` file would be formatted
as so:
.. code-block:: text
dean:goneFishing
gene:OceanMan
``^filename`` is the only required parameter. Any parameter that begins with
a ``^`` is passed directly to the underlying file authentication function
via ``kwargs``, with the leading ``^`` being stripped.
The text file option is configurable to work with legacy formats:
.. code-block:: yaml
external_auth:
file:
^filename: /etc/legacy_users.txt
^filetype: text
^hashtype: md5
^username_field: 2
^password_field: 3
^field_separator: '|'
trey:
- .*
This would authenticate users against a file of the following format:
.. code-block:: text
46|trey|16a0034f90b06bf3c5982ed8ac41aab4
555|mike|b6e02a4d2cb2a6ef0669e79be6fd02e4
2001|page|14fce21db306a43d3b680da1a527847a
8888|jon|c4e94ba906578ccf494d71f45795c6cb
.. note::
The :py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function is used for comparing hashed passwords, so any algorithm
supported by that function will work.
There is also support for Apache-style ``htpasswd`` and ``htdigest`` files:
.. code-block:: yaml
external_auth:
file:
^filename: /var/www/html/.htusers
^filetype: htpasswd
cory:
- .*
When using ``htdigest`` the ``^realm`` must be set:
.. code-block:: yaml
external_auth:
file:
^filename: /var/www/html/.htdigest
^filetype: htdigest
^realm: MySecureRealm
cory:
- .*
"""
import logging
import os
# Import salt utils
import salt.utils.files
import salt.utils.versions
log = logging.getLogger(__name__)
__virtualname__ = "file"
def __virtual__():
return __virtualname__
def _get_file_auth_config():
"""
Setup defaults and check configuration variables for auth backends
"""
config = {
"filetype": "text",
"hashtype": "plaintext",
"field_separator": ":",
"username_field": 1,
"password_field": 2,
}
for opt in __opts__["external_auth"][__virtualname__]:
if opt.startswith("^"):
config[opt[1:]] = __opts__["external_auth"][__virtualname__][opt]
if "filename" not in config:
log.error(
"salt.auth.file: An authentication file must be specified "
"via external_auth:file:^filename"
)
return False
if not os.path.exists(config["filename"]):
log.error(
"salt.auth.file: The configured external_auth:file:^filename (%s)"
"does not exist on the filesystem",
config["filename"],
)
return False
config["username_field"] = int(config["username_field"])
config["password_field"] = int(config["password_field"])
return config
def _text(username, password, **kwargs):
"""
The text file function can authenticate plaintext and digest methods
that are available in the :py:func:`hashutil.digest <salt.modules.hashutil.digest>`
function.
"""
filename = kwargs["filename"]
hashtype = kwargs["hashtype"]
field_separator = kwargs["field_separator"]
username_field = kwargs["username_field"] - 1
password_field = kwargs["password_field"] - 1
with salt.utils.files.fopen(filename, "r") as pwfile:
for line in pwfile.readlines():
fields = line.strip().split(field_separator)
try:
this_username = fields[username_field]
except IndexError:
log.error(
"salt.auth.file: username field (%s) does not exist in file %s",
username_field,
filename,
)
return False
try:
this_password = fields[password_field]
except IndexError:
log.error(
"salt.auth.file: password field (%s) does not exist in file %s",
password_field,
filename,
)
return False
if this_username == username:
if hashtype == "plaintext":
if this_password == password:
return True
else:
# Exceptions for unknown hash types will be raised by hashutil.digest
if this_password == __salt__["hashutil.digest"](password, hashtype):
return True
# Short circuit if we've already found the user but the password was wrong
return False
return False
def _htpasswd(username, password, **kwargs):
"""
Provide authentication via Apache-style htpasswd files
"""
from passlib.apache import HtpasswdFile
pwfile = HtpasswdFile(kwargs["filename"])
# passlib below version 1.6 uses 'verify' function instead of 'check_password'
if salt.utils.versions.version_cmp(kwargs["passlib_version"], "1.6") < 0:
return pwfile.verify(username, password)
else:
return pwfile.check_password(username, password)
def _htdigest(username, password, **kwargs):
"""
Provide authentication via Apache-style htdigest files
"""
realm = kwargs.get("realm", None)
if not realm:
log.error(
"salt.auth.file: A ^realm must be defined in "
"external_auth:file for htdigest filetype"
)
return False
from passlib.apache import HtdigestFile
pwfile = HtdigestFile(kwargs["filename"])
# passlib below version 1.6 uses 'verify' function instead of 'check_password'
if salt.utils.versions.version_cmp(kwargs["passlib_version"], "1.6") < 0:
return pwfile.verify(username, realm, password)
else:
return pwfile.check_password(username, realm, password)
def _htfile(username, password, **kwargs):
"""
Gate function for _htpasswd and _htdigest authentication backends
"""
filetype = kwargs.get("filetype", "htpasswd").lower()
try:
import passlib
kwargs["passlib_version"] = passlib.__version__
except ImportError:
log.error(
"salt.auth.file: The python-passlib library is required for %s filetype",
filetype,
)
return False
if filetype == "htdigest":
return _htdigest(username, password, **kwargs)
else:
return _htpasswd(username, password, **kwargs)
FILETYPE_FUNCTION_MAP = {"text": _text, "htpasswd": _htfile, "htdigest": _htfile}
def auth(username, password):
"""
File based authentication
^filename
The path to the file to use for authentication.
^filetype
The type of file: ``text``, ``htpasswd``, ``htdigest``.
Default: ``text``
^realm
The realm required by htdigest authentication.
.. note::
The following parameters are only used with the ``text`` filetype.
^hashtype
The digest format of the password. Can be ``plaintext`` or any digest
available via :py:func:`hashutil.digest <salt.modules.hashutil.digest>`.
Default: ``plaintext``
^field_separator
The character to use as a delimiter between fields in a text file.
Default: ``:``
^username_field
The numbered field in the text file that contains the username, with
numbering beginning at 1 (one).
Default: ``1``
^password_field
The numbered field in the text file that contains the password, with
numbering beginning at 1 (one).
Default: ``2``
"""
config = _get_file_auth_config()
if not config:
return False
auth_function = FILETYPE_FUNCTION_MAP.get(config["filetype"], "text")
return auth_function(username, password, **config)
|
|
from bitcoin.main import hash_to_int, fast_multiply, inv, ecdsa_raw_recover
from bitcoin.main import G, N, decode_privkey, get_privkey_format, privtopub
from bitcoin.main import encode_pubkey, ecdsa_raw_verify
from os import urandom
from hashlib import sha256
from binascii import hexlify, unhexlify
from sha3 import keccak_256 as sha3
from copy import copy
# We're going to reuse this to get duplicate R values.
# Never, ever, ever, do this with real signatures.
insecure_k = int(urandom(32).encode('hex'), 16)
# Do ECDSA signing without a random or deterministic K.
def insecure_ecdsa_sign(msghash, priv):
global insecure_k
z = hash_to_int(msghash)
k = insecure_k
r, y = fast_multiply(G, k)
s = inv(k, N) * (z + r*decode_privkey(priv)) % N
v, r, s = 27+((y % 2) ^ (0 if s * 2 < N else 1)), r, s if s * 2 < N else N - s
if 'compressed' in get_privkey_format(priv):
print("COmpressed \a")
v += 4
return v, r, s
# this function is from
# https://github.com/warner/python-ecdsa/blob/master/ecdsa/numbertheory.py
def inverse_mod( a, m ):
"""Inverse of a mod m."""
if a < 0 or m <= a: a = a % m
# From Ferguson and Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod( d, c ) + ( c, )
uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0: return ud
else: return ud + m
# Convert an int to hex.
def int_to_hex_str(i):
h = "%0x" % i
# Python truncates leading zero for some reason ...
if len(h) % 2:
h = "0" + h
return h
# Do attack on sigs with duplicate R values.
def derivate_privkey(p, r, s1, s2, hash1, hash2):
assert(type(p) == long)
assert(type(r) == long)
assert(type(s1) == long)
assert(type(s2) == long)
assert(type(hash1) == long)
assert(type(hash2) == long)
assert(len(int_to_hex_str(p)) == 64)
assert(len(int_to_hex_str(r)) == 64)
assert(len(int_to_hex_str(s1)) == 64)
assert(len(int_to_hex_str(s2)) == 64)
assert(len(int_to_hex_str(hash1)) == 64)
assert(len(int_to_hex_str(hash2)) == 64)
z = hash1 - hash2
s = s1 - s2
r_inv = inverse_mod(r, p)
s_inv = inverse_mod(s, p)
k = (z * s_inv) % p
d = (r_inv * (s1 * k - hash1)) % p
return d, k
# Return a hash value as an int.
def hash_as_int(hash_type, msg):
if hash_type == "sha256":
h = sha256(msg).hexdigest()
if hash_type == "sha3":
h = sha3(msg).hexdigest()
i = int(h, 16)
return i
# Gets a private key within allowed range.
def get_priv_key():
max_priv_key = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
while 1:
priv_key = urandom(32)
if int(hexlify(priv_key), 16) < max_priv_key:
return priv_key
# Generation solution hash to pass to Ethereum.
def gen_solution_hash(hm1, v1, r1, s1, hm2, s2, destination):
# Create a string of hex characters with zero padding where necessary.
buf = b""
buf += int_to_hex_str(hm1)
buf += int_to_hex_str(v1)
buf += int_to_hex_str(r1)
buf += int_to_hex_str(s1)
buf += int_to_hex_str(hm2)
buf += int_to_hex_str(s2)
# Convert ethereum address to aligned hex data.
# It's already in hex so this is easy.
dest = destination[2:]
if len(dest) % 2:
dest = "0" + dest
buf += dest
# Convert hex string to bytes and hash it.
solution_hash = sha3(unhexlify(buf)).hexdigest()
# Return the solution hash as hex.
return solution_hash
# Generate message hashes.
m1 = b"test1"
m2 = b"test2"
hm1 = hash_as_int("sha3", m1)
hm2 = hash_as_int("sha3", m2)
# Generate a key that can be retrieved.
generated = False
while generated == False:
# Generate key pairs.
priv_key = get_priv_key()
pub_key = int(privtopub(priv_key).encode('hex'), 16)
# Get sig components.
v1, r1, s1 = insecure_ecdsa_sign(unhexlify(int_to_hex_str(hm1)), priv_key)
v2, r2, s2 = insecure_ecdsa_sign(unhexlify(int_to_hex_str(hm2)), priv_key)
# They should be equal for easy recovery.
if v1 != v2:
continue
# Duplicate R is required for attack.
if r1 != r2:
raise Exception("Could not generate duplicate R values.")
# Sig should be over unique messages.
if s1 == s2:
raise Exception("s1 was == s2.")
# Test attack is possible.
p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
try:
temp_priv_key, k = derivate_privkey(p, r1, s1, s2, hm1, hm2)
except:
continue
# Test recovered priv key matches up.
if temp_priv_key != int(hexlify(priv_key), 16):
continue
# Test recovered public key matches up.
# This should be unnecessary but we'll check anyway.
temp_pub_key = int(hexlify(privtopub(priv_key)), 16)
if temp_pub_key != pub_key:
continue
# Save values.
priv_key = temp_priv_key
pub_key = int_to_hex_str(temp_pub_key)
break
# Choose an Ethereum address to redeem Ether to.
chosen_addr = raw_input("Enter an Ethereum address that can redeem the coins: [enter is default]")
if len(chosen_addr) == 0:
# For fast testing.
destination = "0xcfd31d218dccc9b553458f1b6c4ace40dada01f7"
else:
destination = chosen_addr
# Generate a solution hash (to claim the coins without getting scammed.)
solution_hash = gen_solution_hash(hm1, v1, r1, s1, hm2, s2, destination)
# Show results.
print("Priv key = " + int_to_hex_str(priv_key))
print("Pub key = " + pub_key)
print("Address = 0x" + sha3(unhexlify(pub_key[2:])).hexdigest()[24:])
print("r1 = " + int_to_hex_str(r1))
print("s1 = " + int_to_hex_str(s1))
print("s2 = " + int_to_hex_str(s2))
print("hm1 = " + int_to_hex_str(hm1))
print("hm2 = " + int_to_hex_str(hm2))
print("v1 = " + int_to_hex_str(v1))
print("v2 = " + int_to_hex_str(v2))
print("m1 = " + m1)
print("m2 = " + m2)
print("solution hash = " + solution_hash)
print("Eth input = ")
eth_input = """ "0x%s", %d, "0x%s", "0x%s", "0x%s", "0x%s", "%s", "%s", 0 <--- replace the zero with the index returned from CommitSolutionHsh""" % (int_to_hex_str(hm1), v1, int_to_hex_str(r1), int_to_hex_str(s1), int_to_hex_str(hm2), int_to_hex_str(s2), destination, destination)
print(eth_input)
rec_pub_key = ecdsa_raw_recover(unhexlify(int_to_hex_str(hm1)), (v1, r1, s1))
if v1 >= 31:
rec_pub_key = encode_pubkey(rec_pub_key, 'hex_compressed')
else:
rec_pub_key = encode_pubkey(rec_pub_key, 'hex')
print("Recovery 1 = " + rec_pub_key)
print("Ver sig hm1 from rec = " + str(ecdsa_raw_verify(int_to_hex_str(hm1), (v1, r1, s1), rec_pub_key)))
print("Ver sig hm1 from attack = " + str(ecdsa_raw_verify(int_to_hex_str(hm1), (v1, r1, s1), pub_key)))
rec_pub_key = ecdsa_raw_recover(unhexlify(int_to_hex_str(hm2)), (v2, r2, s2))
if v1 >= 31:
rec_pub_key = encode_pubkey(rec_pub_key, 'hex_compressed')
else:
rec_pub_key = encode_pubkey(rec_pub_key, 'hex')
print("Recovery 2 = " + rec_pub_key)
print("Ver sig hm2 from rec = " + str(ecdsa_raw_verify(int_to_hex_str(hm2), (v2, r2, s2), rec_pub_key)))
print("Ver sig hm2 from attack = " + str(ecdsa_raw_verify(int_to_hex_str(hm2), (v2, r2, s2), pub_key)))
|
|
#! /usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright 2011-2017 Olivier Renault, Luiko Czub, TestLink-API-Python-client developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
"""
TestLinkExample - v0.20
Created on 6 nov. 2011
@author: Olivier Renault (admin@sqaopen.net)
Shows how to use the TestLinkAPI.
=> Counts and lists the Projects
=> Create a new Project with the following structure:
NewProject
|
----NewTestPlan
|
------ Test Suite A
| |
| ------- Test Suite AA
| |
| --------- Test Case AA
| |
------ Test Suite B --- 5 manual test steps
|
--------- Test Case B
|
--- 5 automated test steps
Update Oct. 2013, L. Czub
Integrates v0.4.5 changes for optional arguments and response error handling
The v0.4.0 method calls are still visible as comments (look for CHANGE v0.4.5)
So this file helps to understand where existing own code needs adjustment.
Update Dec. 2013, L. Czub - examples for v0.4.6 api extensions added
Update Jan. 2014, L. Czub - examples for v0.4.7 api and service extensions added
"""
from __future__ import print_function
from testlink import TestlinkAPIClient, TestLinkHelper
from testlink.testlinkerrors import TLResponseError
import sys, os.path
from platform import python_version
# precondition a)
# SERVER_URL and KEY are defined in environment
# TESTLINK_API_PYTHON_SERVER_URL=http://YOURSERVER/testlink/lib/api/xmlrpc.php
# TESTLINK_API_PYTHON_DEVKEY=7ec252ab966ce88fd92c25d08635672b
#
# alternative precondition b)
# SERVEUR_URL and KEY are defined as command line arguments
# python TestLinkExample.py --server_url http://YOURSERVER/testlink/lib/api/xmlrpc.php
# --devKey 7ec252ab966ce88fd92c25d08635672b
#
# ATTENTION: With TestLink 1.9.7, cause of the new REST API, the SERVER_URL
# has changed from
# (old) http://YOURSERVER/testlink/lib/api/xmlrpc.php
# to
# (new) http://YOURSERVER/testlink/lib/api/xmlrpc/v1/xmlrpc.php
tl_helper = TestLinkHelper()
tl_helper.setParamsFromArgs('''Shows how to use the TestLinkAPI.
=> Counts and lists the Projects
=> Create a new Project with the following structure:''')
myTestLink = tl_helper.connect(TestlinkAPIClient)
myPyVersion = python_version()
myPyVersionShort = myPyVersion.replace('.', '')[:2]
NEWTESTPLAN_A="TestPlan_API A"
NEWTESTPLAN_B="TestPlan_API B"
NEWTESTPLAN_C="TestPlan_API C - DeleteTest"
NEWPLATFORM_A='Big Birds %s' % myPyVersionShort
NEWPLATFORM_B='Small Birds'
NEWPLATFORM_C='Ugly Birds'
NEWTESTSUITE_A="A - First Level"
NEWTESTSUITE_B="B - First Level"
NEWTESTSUITE_AA="AA - Second Level"
NEWTESTCASE_AA="TESTCASE_AA"
NEWTESTCASE_B="TESTCASE_B"
myApiVersion='%s v%s' % (myTestLink.__class__.__name__ , myTestLink.__version__)
NEWBUILD_A='%s' % myApiVersion
NEWBUILD_B='%s' % myApiVersion
NEWBUILD_C='%s - DeleteTest' % myApiVersion
NEWBUILD_D='%s - copyTestersTest' % myApiVersion
this_file_dirname=os.path.dirname(__file__)
NEWATTACHMENT_PY= os.path.join(this_file_dirname, 'TestLinkExample.py')
NEWATTACHMENT_PNG=os.path.join(this_file_dirname, 'PyGreat.png')
# Servers TestLink Version
myTLVersion = myTestLink.testLinkVersion()
myTLVersionShort = myTLVersion.replace('.', '')
NEWPROJECT="NEW_PROJECT_API-%s" % myPyVersionShort
NEWPREFIX="NPROAPI%s" % myPyVersionShort
ITSNAME="myITS"
# used connection settings
print(myTestLink.connectionInfo())
print("")
# CHANGE this name into a valid account, known in your TL application
myTestUserName="pyTLapi"
myTestUserName2="admin"
# get user information
response = myTestLink.getUserByLogin(myTestUserName)
print("getUserByLogin", response)
myTestUserID=response[0]['dbID']
response = myTestLink.getUserByID(myTestUserID)
print("getUserByID ", response)
# example asking the api client about methods arguments
print(myTestLink.whatArgs('assignTestCaseExecutionTask'))
# example handling Response Error Codes
# first check an invalid devKey and than the own one
try:
myTestLink.checkDevKey(devKey='007')
except TLResponseError as tl_err:
if tl_err.code == 2000:
# expected invalid devKey Error
# now check the own one - just call with default settings
myTestLink.checkDevKey()
else:
# seems to be another response failure - we forward it
raise
print("Number of Projects in TestLink: %s " % (myTestLink.countProjects()))
print("")
myTestLink.listProjects()
print("")
# Delete the project, if it already exists
try:
response = myTestLink.deleteTestProject(NEWPREFIX)
print("deleteTestProject", response)
except TLResponseError:
print("No project with prefix %s exists" % NEWPREFIX)
# # get IssueTrackerSystem
# aITS=myTestLink.getIssueTrackerSystem(aITSNAME)
# print("getIssueTrackerSystem", aITS)
# Creates the project
projInfo = 'Example created with Python %s API class %s in TL %s' % \
( python_version(), myApiVersion, myTLVersion )
newProject = myTestLink.createTestProject(NEWPROJECT, NEWPREFIX,
notes=projInfo, active=1, public=1,
# itsname=ITSNAME, itsenabled=1,
options={'requirementsEnabled' : 0, 'testPriorityEnabled' : 1,
'automationEnabled' : 1, 'inventoryEnabled' : 0})
print("createTestProject", newProject)
newProjectID = newProject[0]['id']
print("New Project '%s' - id: %s" % (NEWPROJECT,newProjectID))
# Creates the test plan
newTestPlan = myTestLink.createTestPlan(NEWTESTPLAN_A, testprojectname=NEWPROJECT,
notes='New TestPlan created with the API',active=1, public=1)
print("createTestPlan", newTestPlan)
newTestPlanID_A = newTestPlan[0]['id']
print("New Test Plan '%s' - id: %s" % (NEWTESTPLAN_A,newTestPlanID_A))
# Create test plan B - uses no platforms
newTestPlan = myTestLink.createTestPlan(NEWTESTPLAN_B, prefix=NEWPREFIX,
notes='New TestPlan created with the Generic API - uses no platforms.',
active=1, public=1)
print("createTestPlan", newTestPlan)
newTestPlanID_B = newTestPlan[0]['id']
print("New Test Plan '%s' - id: %s" % (NEWTESTPLAN_B,newTestPlanID_B))
# Create platform 'Big Birds x'
newPlatForm = myTestLink.createPlatform(NEWPROJECT, NEWPLATFORM_A,
notes='Platform for Big Birds, unique name, only used in this project')
print("createPlatform", newPlatForm)
newPlatFormID_A = newPlatForm['id']
# Add Platform 'Big Bird x' to platform
response = myTestLink.addPlatformToTestPlan(newTestPlanID_A, NEWPLATFORM_A)
print("addPlatformToTestPlan", response)
# Create platform 'Small Birds'
newPlatForm = myTestLink.createPlatform(NEWPROJECT, NEWPLATFORM_B,
notes='Platform for Small Birds, name used in all example projects')
print("createPlatform", newPlatForm)
newPlatFormID_B = newPlatForm['id']
# Add Platform 'Small Bird' to platform
response = myTestLink.addPlatformToTestPlan(newTestPlanID_A, NEWPLATFORM_B)
print("addPlatformToTestPlan", response)
# Create platform 'Ugly Birds'
newPlatForm = myTestLink.createPlatform(NEWPROJECT, NEWPLATFORM_C,
notes='Platform for Ugly Birds, will be removed from test plan')
print("createPlatform", newPlatForm)
newPlatFormID_C = newPlatForm['id']
# Add Platform 'Ugly Bird' to platform
response = myTestLink.addPlatformToTestPlan(newTestPlanID_A, NEWPLATFORM_C)
print("addPlatformToTestPlan", response)
#Creates the test Suite A
newTestSuite = myTestLink.createTestSuite(newProjectID, NEWTESTSUITE_A,
"Details of the Test Suite A")
print("createTestSuite", newTestSuite)
newTestSuiteID_A = newTestSuite[0]['id']
print("New Test Suite '%s' - id: %s" % (NEWTESTSUITE_A, newTestSuiteID_A))
FirstLevelID = newTestSuiteID_A
#Creates the test Suite B
newTestSuite = myTestLink.createTestSuite(newProjectID, NEWTESTSUITE_B,
"Details of the Test Suite B")
print("createTestSuite", newTestSuite)
newTestSuiteID_B = newTestSuite[0]['id']
print("New Test Suite '%s' - id: %s" % (NEWTESTSUITE_B, newTestSuiteID_B))
#Creates the test Suite AA
newTestSuite = myTestLink.createTestSuite(newProjectID, NEWTESTSUITE_AA,
"Details of the Test Suite AA",parentid=FirstLevelID)
print("createTestSuite", newTestSuite)
newTestSuiteID_AA = newTestSuite[0]['id']
print("New Test Suite '%s' - id: %s" % (NEWTESTSUITE_AA, newTestSuiteID_AA))
MANUAL = 1
AUTOMATED = 2
READFORREVIEW=2
REWORK=4
HIGH=3
MEDIUM=2
LOW=1
#Creates the test case TC_AA with state ready for review
myTestLink.initStep("Step action 1", "Step result 1", MANUAL)
myTestLink.appendStep("Step action 2", "Step result 2", MANUAL)
myTestLink.appendStep("Step action 3", "Step result 3", MANUAL)
myTestLink.appendStep("Step action 4", "Step result 4", MANUAL)
myTestLink.appendStep("Step action 5", "Step result 5", MANUAL)
myTestLink.appendStep("Dummy step for delete tests",
"should be delete with deleteTestCaseSteps", MANUAL)
newTestCase = myTestLink.createTestCase(NEWTESTCASE_AA, newTestSuiteID_AA,
newProjectID, myTestUserName, "This is the summary of the Test Case AA",
preconditions='these are the preconditions',
importance=LOW, state=READFORREVIEW, estimatedexecduration=10.1)
print("createTestCase", newTestCase)
newTestCaseID_AA = newTestCase[0]['id']
print("New Test Case '%s' - id: %s" % (NEWTESTCASE_AA, newTestCaseID_AA))
#Creates the test case TC_B with state rework
myTestLink.initStep("Step action 1", "Step result 1", AUTOMATED)
myTestLink.appendStep("Step action 2", "Step result 2", AUTOMATED)
myTestLink.appendStep("Step action 3", "Step result 3", AUTOMATED)
myTestLink.appendStep("Step action 4", "Step result 4", AUTOMATED)
myTestLink.appendStep("Step action 5", "Step result 5", AUTOMATED)
newTestCase = myTestLink.createTestCase(NEWTESTCASE_B, newTestSuiteID_B,
newProjectID, myTestUserName, "This is the summary of the Test Case B",
preconditions='these are the preconditions', executiontype=AUTOMATED,
status=REWORK, estimatedexecduration=0.5)
print("createTestCase", newTestCase)
newTestCaseID_B = newTestCase[0]['id']
print("New Test Case '%s' - id: %s" % (NEWTESTCASE_B, newTestCaseID_B))
# Add test cases to test plan - we need the full external id !
# for every test case version 1 is used
tc_version=1
# TC AA should be tested with platforms 'Big Birds'+'Small Birds'
tc_aa_full_ext_id = myTestLink.getTestCase(newTestCaseID_AA)[0]['full_tc_external_id']
response = myTestLink.addTestCaseToTestPlan(newProjectID, newTestPlanID_A,
tc_aa_full_ext_id, tc_version, platformid=newPlatFormID_A)
print("addTestCaseToTestPlan", response)
tc_aa_full_ext_id = myTestLink.getTestCase(newTestCaseID_AA)[0]['full_tc_external_id']
response = myTestLink.addTestCaseToTestPlan(newProjectID, newTestPlanID_A,
tc_aa_full_ext_id, tc_version, platformid=newPlatFormID_B)
print("addTestCaseToTestPlan", response)
# change test case TC_AA - delete step 6 (step 7 does not exist)
response = myTestLink.deleteTestCaseSteps(tc_aa_full_ext_id, [7,6],
version=tc_version)
print("deleteTestCaseSteps", response)
# TC B should be tested with platform 'Small Birds'
tc_b_full_ext_id = myTestLink.getTestCase(testcaseid=newTestCaseID_B)[0]['full_tc_external_id']
response = myTestLink.addTestCaseToTestPlan(newProjectID, newTestPlanID_A,
tc_b_full_ext_id, tc_version, platformid=newPlatFormID_B)
print("addTestCaseToTestPlan", response)
#Update test case TC_B -> high, change step 5, new step 6
steps_tc_b = myTestLink.getTestCase(testcaseid=newTestCaseID_B)[0]['steps']
steps_tc_b_v1u = steps_tc_b[:4]
steps_tc_b_v1u.append(
{'step_number' : 5, 'actions' : "Step action 5 -b changed by updateTestCase" ,
'expected_results' : "Step result 5 - b changed", 'execution_type' : AUTOMATED})
steps_tc_b_v1u.append(
{'step_number' : 6, 'actions' : "Step action 6 -b added by updateTestCase" ,
'expected_results' : "Step result 6 - b added", 'execution_type' : AUTOMATED})
response = myTestLink.updateTestCase(tc_b_full_ext_id, version=tc_version,
steps=steps_tc_b_v1u, importance=MEDIUM, estimatedexecduration=3)
print("updateTestCase", response)
# create additional steps via createTestCaseSteps - action create
steps_tc_b_c67 = [
{'step_number' : 6, 'actions' : "action 6 createTestCaseSteps.create" ,
'expected_results' : "skip - cause step 6 already exist", 'execution_type' : AUTOMATED},
{'step_number' : 7, 'actions' : "action 7 createTestCaseSteps.create" ,
'expected_results' : "create - cause step 7 not yet exist", 'execution_type' : AUTOMATED}]
response = myTestLink.createTestCaseSteps('create', steps_tc_b_c67,
testcaseexternalid=tc_b_full_ext_id, version=tc_version)
print("createTestCaseSteps.create", response)
# create additional steps via createTestCaseSteps - action update
steps_tc_b_c38 = [
{'step_number' : 3, 'actions' : "action 3 createTestCaseSteps.update" ,
'expected_results' : "update - cause step 3 already exist", 'execution_type' : AUTOMATED},
{'step_number' : 8, 'actions' : "action 8 createTestCaseSteps.update" ,
'expected_results' : "create - cause step 8 not yet exist", 'execution_type' : AUTOMATED}]
response = myTestLink.createTestCaseSteps('update', steps_tc_b_c38,
testcaseid=newTestCaseID_B, version=tc_version)
print("createTestCaseSteps.update", response)
# In test plan B TC B should be tested without platform
response = myTestLink.addTestCaseToTestPlan(newProjectID, newTestPlanID_B,
tc_b_full_ext_id, tc_version)
print("addTestCaseToTestPlan", response)
# # Try to Remove Platform 'Big Birds' from platform
# response = myTestLink.removePlatformFromTestPlan(newTestPlanID_A, NEWPLATFORM_C)
# print "removePlatformFromTestPlan", response
# Remove Platform 'Ugly Birds' from platform
response = myTestLink.removePlatformFromTestPlan(newTestPlanID_A, NEWPLATFORM_C)
print("removePlatformFromTestPlan", response)
# -- Create Build for TestPlan A (uses platforms)
newBuild = myTestLink.createBuild(newTestPlanID_A, NEWBUILD_A,
'Notes for the Build', releasedate="2016-12-31")
print("createBuild", newBuild)
newBuildID_A = newBuild[0]['id']
print("New Build '%s' - id: %s" % (NEWBUILD_A, newBuildID_A))
# assign user to test case execution tasks - test plan with platforms
response = myTestLink.assignTestCaseExecutionTask( myTestUserName,
newTestPlanID_A, tc_aa_full_ext_id,
buildid=newBuildID_A, platformname=NEWPLATFORM_A)
print("assignTestCaseExecutionTask", response)
response = myTestLink.assignTestCaseExecutionTask( myTestUserName2,
newTestPlanID_A, tc_aa_full_ext_id,
buildname=NEWBUILD_A, platformid=newPlatFormID_B)
print("assignTestCaseExecutionTask", response)
response = myTestLink.assignTestCaseExecutionTask( myTestUserName,
newTestPlanID_A, tc_b_full_ext_id,
buildname=NEWBUILD_A, platformname=NEWPLATFORM_B)
print("assignTestCaseExecutionTask", response)
# get test case assigned tester
response = myTestLink.getTestCaseAssignedTester(
newTestPlanID_A, tc_aa_full_ext_id,
buildid=newBuildID_A, platformname=NEWPLATFORM_A)
print("getTestCaseAssignedTester TC_AA TP_A Platform A", response)
response = myTestLink.getTestCaseAssignedTester(
newTestPlanID_A, tc_aa_full_ext_id,
buildname=NEWBUILD_A, platformid=newPlatFormID_B)
print("getTestCaseAssignedTester TC_AA TP_A Platform B", response)
response = myTestLink.getTestCaseAssignedTester(
newTestPlanID_A, tc_b_full_ext_id,
buildname=NEWBUILD_A, platformname=NEWPLATFORM_B)
print("getTestCaseAssignedTester TC_B TP_A Platform B", response)
# get bugs for test case TC_AA in test plan A - state TC not executed
response = myTestLink.getTestCaseBugs(newTestPlanID_A,
testcaseexternalid=tc_aa_full_ext_id)
print("getTestCaseBugs TC_AA in TP_A (TC is not executed)", response)
# report Test Case Results for platform 'Big Bird' with step results
# TC_AA failed, build should be guessed, TC identified with external id
newResult = myTestLink.reportTCResult(None, newTestPlanID_A, None, 'f', '', guess=True,
testcaseexternalid=tc_aa_full_ext_id,
platformname=NEWPLATFORM_A,
execduration=3.9, timestamp='2015-09-18 14:33',
steps=[{'step_number' : 3, 'result' : 'p', 'notes' : 'result note for passed step 3'},
{'step_number' : 4, 'result' : 'f', 'notes' : 'result note for failed step 4'}] )
print("reportTCResult", newResult)
newResultID_AA = newResult[0]['id']
# get bugs for test case TC_AA in test plan A - state TC is executed
response = myTestLink.getTestCaseBugs(newTestPlanID_A,
testcaseexternalid=tc_aa_full_ext_id)
print("getTestCaseBugs TC_AA in TP_A (TC is executed, no bug)", response)
# report Test Case Results for platform 'Small Bird'
# TC_AA passed, build should be guessed, TC identified with external id
newResult = myTestLink.reportTCResult(None, newTestPlanID_A, None, 'p', '', guess=True,
testcaseexternalid=tc_aa_full_ext_id,
platformname=NEWPLATFORM_B,
execduration='2.2', timestamp='2015-09-19 14:33:02')
print("reportTCResult", newResult)
newResultID_AA_p = newResult[0]['id']
# TC_B passed, explicit build and some notes , TC identified with internal id
newResult = myTestLink.reportTCResult(newTestCaseID_B, newTestPlanID_A, NEWBUILD_A,
'p', 'first try', platformname=NEWPLATFORM_B)
print("reportTCResult", newResult)
newResultID_B = newResult[0]['id']
# add this (text) file as Attachemnt to last execution of TC_B with
# different filename 'MyPyExampleApiClient.py'
a_file=open(NEWATTACHMENT_PY)
newAttachment = myTestLink.uploadExecutionAttachment(a_file, newResultID_B,
'Textfile Example', 'Text Attachment Example for a TestCase Execution',
filename='MyPyExampleApiClient.py')
print("uploadExecutionAttachment", newAttachment)
# add png file as Attachemnt to last execution of TC_AA
# !Attention - on WINDOWS use binary mode for none text file
# see http://docs.python.org/2/tutorial/inputoutput.html#reading-and-writing-files
a_file=open(NEWATTACHMENT_PNG, mode='rb')
newAttachment = myTestLink.uploadExecutionAttachment(a_file, newResultID_AA,
'PNG Example', 'PNG Attachment Example for a TestCase Execution')
print("uploadExecutionAttachment", newAttachment)
# -- Create Build for TestPlan B (uses no platforms)
newBuild = myTestLink.createBuild(newTestPlanID_B, NEWBUILD_B,
'Build for TestPlan without platforms', releasedate='2016-11-30')
print("createBuild", newBuild)
newBuildID_B = newBuild[0]['id']
print("New Build '%s' - id: %s" % (NEWBUILD_B, newBuildID_B))
# assign user to test case execution tasks - test plans without platforms
response = myTestLink.assignTestCaseExecutionTask( myTestUserName,
newTestPlanID_B, tc_b_full_ext_id, buildname=NEWBUILD_B)
print("assignTestCaseExecutionTask", response)
# get test case assigned tester
response = myTestLink.getTestCaseAssignedTester(
newTestPlanID_B, tc_b_full_ext_id, buildname=NEWBUILD_B)
print("getTestCaseAssignedTester TC_B TP_B no Platform", response)
# try to remove not assigned tester
response = myTestLink.unassignTestCaseExecutionTask(
newTestPlanID_B, tc_b_full_ext_id, buildname=NEWBUILD_B,
user=myTestUserName2)
print("unassignTestCaseExecutionTask not assigned user", response)
response = myTestLink.getTestCaseAssignedTester(
newTestPlanID_B, tc_b_full_ext_id, buildname=NEWBUILD_B)
print("getTestCaseAssignedTester TC_B TP_B no Platform", response)
# try to remove all assigned tester
response = myTestLink.unassignTestCaseExecutionTask(
newTestPlanID_B, tc_b_full_ext_id, buildid=newBuildID_B,
action='unassignAll')
print("unassignTestCaseExecutionTask unassignAll", response)
response = myTestLink.getTestCaseAssignedTester(
newTestPlanID_B, tc_b_full_ext_id, buildname=NEWBUILD_B)
print("getTestCaseAssignedTester TC_B TP_B no Platform", response)
# reassign user to test case execution tasks - test plans without platforms
response = myTestLink.assignTestCaseExecutionTask( myTestUserName,
newTestPlanID_B, tc_b_full_ext_id, buildid=newBuildID_B)
print("assignTestCaseExecutionTask", response)
response = myTestLink.getTestCaseAssignedTester(
newTestPlanID_B, tc_b_full_ext_id, buildname=NEWBUILD_B)
print("getTestCaseAssignedTester TC_B TP_B no Platform", response)
# TC_B blocked (without platform), explicit build and some notes ,
# TC identified with internal id, report by myTestUserName
newResult = myTestLink.reportTCResult(newTestCaseID_B, newTestPlanID_B, NEWBUILD_B,
'f', "no birds are singing", bugid='007',
user=myTestUserName)
print("reportTCResult", newResult)
newResultID_B_f = newResult[0]['id']
newResult = myTestLink.reportTCResult(newTestCaseID_B, newTestPlanID_B, NEWBUILD_B,
'b', "hungry birds blocks the execution",
bugid='008', user=myTestUserName)
print("reportTCResult", newResult)
newResultID_B_b = newResult[0]['id']
# get bugs for test case TC_B in test plan B - state TC is executed with bug
response = myTestLink.getTestCaseBugs(newTestPlanID_B,
testcaseid=newTestCaseID_B)
print("getTestCaseBugs TC_B in TP_B (TC is executed with 2 bugs)", response)
# now we make a mistake and commit the same result a second time
# and try to delete this mistake
newResult = myTestLink.reportTCResult(newTestCaseID_B, newTestPlanID_B, NEWBUILD_B,
'b', "mistake, commit same result a second time")
print("reportTCResult", newResult)
newResultID_B_b2 = int(newResult[0]['id'])
try:
# if TL configuration allows deletion of executions, no error will occur
response = myTestLink.deleteExecution(newResultID_B_b2)
print("deleteExecution", response)
except TLResponseError as tl_err:
if tl_err.code == 232:
# TL configuration does not allow deletion of executions
pass
else:
# sh..: another problem occurs
raise
# now we try to change the execution types of the test cases
# - AA from manual -> auto and B from auto -> manual
newResult = myTestLink.setTestCaseExecutionType(tc_aa_full_ext_id, tc_version,
newProjectID, AUTOMATED)
print("setTestCaseExecutionType", response)
newResult = myTestLink.setTestCaseExecutionType(tc_b_full_ext_id, tc_version,
newProjectID, MANUAL)
print("setTestCaseExecutionType", response)
# create TestPlan C with Platform, Build , TestCase, assigned TestCase
# and delete it
newTestPlan = myTestLink.createTestPlan(NEWTESTPLAN_C, NEWPROJECT,
notes='TestPlan for delete test.',
active=1, public=1)
print("createTestPlan for DeleteTest", newTestPlan)
newTestPlanID_C = newTestPlan[0]['id']
print("Test Plan '%s' - id: %s" % (NEWTESTPLAN_C,newTestPlanID_C))
newBuild = myTestLink.createBuild(newTestPlanID_C, NEWBUILD_C,
'Build for TestPlan delete test')
print("createBuild for DeleteTest", newBuild)
newBuildID_C = newBuild[0]['id']
print("Build '%s' - id: %s" % (NEWBUILD_C, newBuildID_C))
response = myTestLink.addPlatformToTestPlan(newTestPlanID_C, NEWPLATFORM_C)
print("addPlatformToTestPlan", response)
response = myTestLink.addTestCaseToTestPlan(newProjectID, newTestPlanID_C,
tc_aa_full_ext_id, tc_version, platformid=newPlatFormID_C)
print("addTestCaseToTestPlan", response)
response = myTestLink.assignTestCaseExecutionTask( myTestUserName,
newTestPlanID_C, tc_aa_full_ext_id, buildid=newBuildID_C,
platformid=newPlatFormID_C)
print("assignTestCaseExecutionTask", response)
newResult = myTestLink.reportTCResult(newTestCaseID_AA, newTestPlanID_C,
NEWBUILD_C, 'p', "TP delete test",
platformname=NEWPLATFORM_C)
print("reportTCResult", newResult)
newResultID_B = newResult[0]['id']
newAttachment = myTestLink.uploadExecutionAttachment(NEWATTACHMENT_PY, newResultID_B,
'Textfile Example', 'Attachment Example for a TC Execution and TP delete test',
filename='MyPyTPDeleteTest.py')
print("uploadExecutionAttachment", newAttachment)
response = myTestLink.getTotalsForTestPlan(newTestPlanID_C)
print("getTotalsForTestPlan before delete", response)
response = myTestLink.deleteTestPlan(newTestPlanID_C)
print("deleteTestPlan", response)
try:
response = myTestLink.getTotalsForTestPlan(newTestPlanID_C)
print("getTotalsForTestPlan after delete", response)
except TLResponseError as tl_err:
print(tl_err.message)
# -- Create Build D and copy Testers from Build A
newBuild = myTestLink.createBuild(newTestPlanID_A, NEWBUILD_D,
'Build with copied testers from Build ' + NEWBUILD_A,
active=1, open=1, copytestersfrombuild=newBuildID_A)
print("createBuild", newBuild)
newBuildID_D = newBuild[0]['id']
print("New Build '%s' - id: %s" % (NEWBUILD_D, newBuildID_D))
# get information - TestProject
response = myTestLink.getTestProjectByName(NEWPROJECT)
print("getTestProjectByName", response)
response = myTestLink.getProjectTestPlans(newProjectID)
print("getProjectTestPlans", response)
response = myTestLink.getFirstLevelTestSuitesForTestProject(newProjectID)
print("getFirstLevelTestSuitesForTestProject", response)
response = myTestLink.getProjectPlatforms(newProjectID)
print("getProjectPlatforms", response)
response = myTestLink.getProjectKeywords(newProjectID)
print("getProjectKeywords", response)
# get information - testPlan
response = myTestLink.getTestPlanByName(NEWPROJECT, NEWTESTPLAN_A)
print("getTestPlanByName", response)
response = myTestLink.getTotalsForTestPlan(newTestPlanID_A)
print("getTotalsForTestPlan", response)
response = myTestLink.getBuildsForTestPlan(newTestPlanID_A)
print("getBuildsForTestPlan", response)
response = myTestLink.getLatestBuildForTestPlan(newTestPlanID_A)
print("getLatestBuildForTestPlan", response)
response = myTestLink.getTestPlanPlatforms(newTestPlanID_A)
print("getTestPlanPlatforms", response)
response = myTestLink.getTestSuitesForTestPlan(newTestPlanID_A)
print("getTestSuitesForTestPlan", response)
# get failed Testcases
# -- Start CHANGE v0.4.5 --
#response = myTestLink.getTestCasesForTestPlan(newTestPlanID_A, 'executestatus=f')
response = myTestLink.getTestCasesForTestPlan(newTestPlanID_A, executestatus='f')
# -- END CHANGE v0.4.5 --
print("getTestCasesForTestPlan A failed ", response)
# get Testcases for Plattform SmallBird
response = myTestLink.getTestCasesForTestPlan(newTestPlanID_A, platformid=newPlatFormID_B)
print("getTestCasesForTestPlan A SmallBirds", response)
# get information - TestSuite
response = myTestLink.getTestSuiteByID(newTestSuiteID_B)
print("getTestSuiteByID", response)
response = myTestLink.getTestSuitesForTestSuite(newTestSuiteID_A)
print("getTestSuitesForTestSuite A", response)
response = myTestLink.getTestCasesForTestSuite(newTestSuiteID_A, True, 'full')
print("getTestCasesForTestSuite A", response)
response = myTestLink.getTestCasesForTestSuite(newTestSuiteID_B, False, 'only_id')
print("getTestCasesForTestSuite B", response)
# Update test suite B details - Using Project ID
updatedTestSuite = myTestLink.updateTestSuite(newTestSuiteID_B,
testprojectid=newProjectID,
details="updated Details of the Test Suite B")
print("updateTestSuite", updatedTestSuite)
# Update test suite A name and order details - Using Project Name
# with TL 1.9.15 this step fails - solution see TL Mantis Ticket 7696
# <http://mantis.testlink.org/view.php?id=7696>
changedNEWTESTSUITE_A = NEWTESTSUITE_A + ' - Changed'
updatedTestSuite = myTestLink.updateTestSuite(newTestSuiteID_A, prefix=NEWPREFIX,
testsuitename = changedNEWTESTSUITE_A, order=1)
print("updateTestSuite", updatedTestSuite)
# get all test suites, using the same name - test Suite B
response = myTestLink.getTestSuite(NEWTESTSUITE_B, NEWPREFIX)
print("getTestSuite", response)
# get informationen - TestCase
# -- Start CHANGE v0.4.5 --
#response = myTestLink.getTestCaseIDByName(NEWTESTCASE_B, None, NEWPROJECT)
response = myTestLink.getTestCaseIDByName(NEWTESTCASE_B, testprojectname=NEWPROJECT)
# -- END CHANGE v0.4.5 --
print("getTestCaseIDByName", response)
tcpathname = '::'.join([NEWPROJECT, changedNEWTESTSUITE_A, NEWTESTSUITE_AA, NEWTESTCASE_AA])
response = myTestLink.getTestCaseIDByName('unknown', testcasepathname=tcpathname)
print("getTestCaseIDByName", response)
# get execution result
response = myTestLink.getLastExecutionResult(newTestPlanID_A, None,
testcaseexternalid=tc_aa_full_ext_id)
print("getLastExecutionResult", response)
response = myTestLink.getLastExecutionResult(newTestPlanID_A, newTestCaseID_B)
print("getLastExecutionResult", response)
if not myTLVersion == '<= 1.9.8':
# new optional arguments platformid , buildid with TL 1.9.9
response = myTestLink.getLastExecutionResult(
newTestPlanID_A, newTestCaseID_AA,
platformid=newPlatFormID_A)
print("getLastExecutionResult", response)
response = myTestLink.getExecCountersByBuild(newTestPlanID_A)
print("getExecCountersByBuild", response)
response = myTestLink.getExecCountersByBuild(newTestPlanID_B)
print("getExecCountersByBuild", response)
response = myTestLink.getTestCaseKeywords(testcaseexternalid=tc_b_full_ext_id)
print("getTestCaseKeywords noKeyWords", response)
# get information - general
response = myTestLink.getFullPath(int(newTestSuiteID_AA))
print("getFullPath", response)
response = myTestLink.getFullPath([int(newTestCaseID_AA), int(newTestCaseID_B)])
print("getFullPath", response)
# attachments
# add png file as Attachment to test project
a_file=open(NEWATTACHMENT_PNG, mode='rb')
newAttachment = myTestLink.uploadTestProjectAttachment(a_file, newProjectID,
title='PNG Example', description='PNG Attachment Example for a TestProject')
print("uploadTestProjectAttachment", newAttachment)
# add png file as Attachnent to test suite A - uploadXxzAttachmemt also file path
newAttachment = myTestLink.uploadTestSuiteAttachment(NEWATTACHMENT_PNG, newTestSuiteID_A,
title='PNG Example', description='PNG Attachment Example for a TestSuite')
print("uploadTestSuiteAttachment", newAttachment)
# add png file as Attachment to test case B
a_file=open(NEWATTACHMENT_PNG, mode='rb')
newAttachment = myTestLink.uploadTestCaseAttachment(a_file, newTestCaseID_B,
title='PNG Example', description='PNG Attachment Example for a TestCase')
print("uploadTestCaseAttachment", newAttachment)
# get Attachment of test case B
# response = myTestLink.getTestCaseAttachments(testcaseexternalid=tc_aa_full_ext_id)
# print "getTestCaseAttachments", response
response = myTestLink.getTestCaseAttachments(testcaseid=newTestCaseID_B)
print("getTestCaseAttachments", response)
# copy test case - as a new TC version
print("create new version of TC B")
response = myTestLink.copyTCnewVersion(newTestCaseID_B,
summary='new version of TC B', importance='1')
print('copyTCnewVersion', response)
# copy test case - as new TC in a different TestSuite
print("copy TC B as TC BA into Test suite A")
response = myTestLink.copyTCnewTestCase(newTestCaseID_B,
testsuiteid=newTestSuiteID_A, testcasename='%sA' % NEWTESTCASE_B)
print('copyTCnewTestCase', response)
response = myTestLink.getTestCasesForTestSuite(newTestSuiteID_B, False, 'simple')
print('getTestCasesForTestSuite B', response)
response = myTestLink.getTestCasesForTestSuite(newTestSuiteID_A, True, 'simple')
print('getTestCasesForTestSuite A', response)
# sample, how the test plan can be updated to use the new tc version
# site effect of this step, assigned testers and existing execution results are
# not accessible anymore via the TL Web Gui.
# That is the reason, why we have uncomment it for the normal sample execution
# response = myTestLink.addTestCaseToTestPlan(newProjectID, newTestPlanID_B,
# tc_b_full_ext_id, tc_version+1,
# overwrite=1)
# print("addTestCaseToTestPlan overwrite", response)
# no test data
# response = myTestLink.getTestCaseCustomFieldDesignValue(
# tc_aa_full_ext_id, 1, newProjectID, 'cfieldname', 'simple')
# print "getTestCaseCustomFieldDesignValue", response
print("getTestCaseCustomFieldDesignValue", "Sorry currently no testdata")
# add png file as Attachemnt to a requirement specification.
print("uploadRequirementSpecificationAttachment", "Sorry currently no testdata")
# add png file as Attachemnt to a requirement.
print("uploadRequirementAttachment", "Sorry currently no testdata")
# add requirements to testcase AA
# response = myTestLink.assignRequirements(tc_aa_full_ext_id, newProjectID,
# [{'req_spec' : 6729, 'requirements' : [6731]},
# {'req_spec' : 6733, 'requirements' : [6735, 6737]}])
print("assignRequirements", "Sorry currently no testdata")
print("")
print("Number of Projects in TestLink: %s " % myTestLink.countProjects())
print("Number of Platforms (in TestPlans): %s " % myTestLink.countPlatforms())
print("Number of Builds : %s " % myTestLink.countBuilds())
print("Number of TestPlans : %s " % myTestLink.countTestPlans())
print("Number of TestSuites : %s " % myTestLink.countTestSuites())
print("Number of TestCases (in TestSuites): %s " % myTestLink.countTestCasesTS())
print("Number of TestCases (in TestPlans) : %s " % myTestLink.countTestCasesTP())
print("")
print()
print("")
myTestLink.listProjects()
|
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base class used to start servers used by the layout tests."""
import errno
import logging
import socket
import tempfile
import time
_log = logging.getLogger(__name__)
class ServerError(Exception):
pass
class ServerBase(object):
"""A skeleton class for starting and stopping servers used by the layout tests."""
def __init__(self, port_obj, output_dir):
self._port_obj = port_obj
self._executive = port_obj._executive
self._filesystem = port_obj._filesystem
self._platform = port_obj.host.platform
self._output_dir = output_dir
# We need a non-checkout-dependent place to put lock files, etc. We
# don't use the Python default on the Mac because it defaults to a
# randomly-generated directory under /var/folders and no one would ever
# look there.
tmpdir = tempfile.gettempdir()
if self._platform.is_mac():
tmpdir = '/tmp'
self._runtime_path = self._filesystem.join(tmpdir, "WebKit")
self._filesystem.maybe_make_directory(self._runtime_path)
# Subclasses must override these fields.
self._name = '<virtual>'
self._log_prefixes = tuple()
self._mappings = {}
self._pid_file = None
self._start_cmd = None
# Subclasses may override these fields.
self._env = None
self._cwd = None
self._stdout = self._executive.PIPE
self._stderr = self._executive.PIPE
self._process = None
self._pid = None
self._error_log_path = None
def start(self):
"""Starts the server. It is an error to start an already started server.
This method also stops any stale servers started by a previous instance."""
assert not self._pid, '%s server is already running' % self._name
# Stop any stale servers left over from previous instances.
if self._filesystem.exists(self._pid_file):
try:
self._pid = int(self._filesystem.read_text_file(self._pid_file))
_log.debug('stale %s pid file, pid %d', self._name, self._pid)
self._stop_running_server()
except (ValueError, UnicodeDecodeError):
# These could be raised if the pid file is corrupt.
self._remove_pid_file()
self._pid = None
self._remove_stale_logs()
self._prepare_config()
self._check_that_all_ports_are_available()
self._pid = self._spawn_process()
if self._wait_for_action(self._is_server_running_on_all_ports):
_log.debug("%s successfully started (pid = %d)", self._name, self._pid)
else:
self._log_errors_from_subprocess()
self._stop_running_server()
raise ServerError('Failed to start %s server' % self._name)
def stop(self):
"""Stops the server. Stopping a server that isn't started is harmless."""
actual_pid = None
try:
if self._filesystem.exists(self._pid_file):
try:
actual_pid = int(self._filesystem.read_text_file(self._pid_file))
except (ValueError, UnicodeDecodeError):
# These could be raised if the pid file is corrupt.
pass
if not self._pid:
self._pid = actual_pid
if not self._pid:
return
if not actual_pid:
_log.warning('Failed to stop %s: pid file is missing', self._name)
return
if self._pid != actual_pid:
_log.warning('Failed to stop %s: pid file contains %d, not %d',
self._name, actual_pid, self._pid)
# Try to kill the existing pid, anyway, in case it got orphaned.
self._executive.kill_process(self._pid)
self._pid = None
return
_log.debug("Attempting to shut down %s server at pid %d", self._name, self._pid)
self._stop_running_server()
_log.debug("%s server at pid %d stopped", self._name, self._pid)
self._pid = None
finally:
# Make sure we delete the pid file no matter what happens.
self._remove_pid_file()
def _prepare_config(self):
"""This routine can be overridden by subclasses to do any sort
of initialization required prior to starting the server that may fail."""
def _remove_stale_logs(self):
"""This routine can be overridden by subclasses to try and remove logs
left over from a prior run. This routine should log warnings if the
files cannot be deleted, but should not fail unless failure to
delete the logs will actually cause start() to fail."""
# Sometimes logs are open in other processes but they should clear eventually.
for log_prefix in self._log_prefixes:
try:
self._remove_log_files(self._output_dir, log_prefix)
except OSError:
_log.warning('Failed to remove old %s %s files', self._name, log_prefix)
def _spawn_process(self):
_log.debug('Starting %s server, cmd="%s"', self._name, self._start_cmd)
self._process = self._executive.popen(self._start_cmd,
env=self._env,
cwd=self._cwd,
stdout=self._stdout,
stderr=self._stderr)
pid = self._process.pid
self._filesystem.write_text_file(self._pid_file, str(pid))
return pid
def _stop_running_server(self):
self._wait_for_action(self._check_and_kill)
if self._filesystem.exists(self._pid_file):
self._filesystem.remove(self._pid_file)
def _check_and_kill(self):
if self._executive.check_running_pid(self._pid):
_log.debug('pid %d is running, killing it', self._pid)
self._executive.kill_process(self._pid)
return False
else:
_log.debug('pid %d is not running', self._pid)
return True
def _remove_pid_file(self):
if self._filesystem.exists(self._pid_file):
self._filesystem.remove(self._pid_file)
def _remove_log_files(self, folder, starts_with):
files = self._filesystem.listdir(folder)
for file in files:
if file.startswith(starts_with):
full_path = self._filesystem.join(folder, file)
self._filesystem.remove(full_path)
def _log_errors_from_subprocess(self):
_log.error('logging %s errors, if any', self._name)
if self._process:
_log.error('%s returncode %s', self._name, str(self._process.returncode))
if self._process.stderr:
stderr_text = self._process.stderr.read()
if stderr_text:
_log.error('%s stderr:', self._name)
for line in stderr_text.splitlines():
_log.error(' %s', line)
else:
_log.error('%s no stderr', self._name)
else:
_log.error('%s no stderr handle', self._name)
else:
_log.error('%s no process', self._name)
if self._error_log_path and self._filesystem.exists(self._error_log_path):
error_log_text = self._filesystem.read_text_file(self._error_log_path)
if error_log_text:
_log.error('%s error log (%s) contents:', self._name, self._error_log_path)
for line in error_log_text.splitlines():
_log.error(' %s', line)
else:
_log.error('%s error log empty', self._name)
_log.error('')
else:
_log.error('%s no error log', self._name)
def _wait_for_action(self, action, wait_secs=20.0, sleep_secs=1.0):
"""Repeat the action for wait_sec or until it succeeds, sleeping for sleep_secs
in between each attempt. Returns whether it succeeded."""
start_time = time.time()
while time.time() - start_time < wait_secs:
if action():
return True
_log.debug("Waiting for action: %s", action)
time.sleep(sleep_secs)
return False
def _is_server_running_on_all_ports(self):
"""Returns whether the server is running on all the desired ports."""
# TODO(dpranke): crbug/378444 maybe pid is unreliable on win?
if not self._platform.is_win() and not self._executive.check_running_pid(self._pid):
_log.debug("Server isn't running at all")
self._log_errors_from_subprocess()
raise ServerError("Server exited")
for mapping in self._mappings:
s = socket.socket()
port = mapping['port']
try:
s.connect(('localhost', port))
_log.debug("Server running on %d", port)
except IOError as e:
if e.errno not in (errno.ECONNREFUSED, errno.ECONNRESET):
raise
_log.debug("Server NOT running on %d: %s", port, e)
return False
finally:
s.close()
return True
def _check_that_all_ports_are_available(self):
for mapping in self._mappings:
s = socket.socket()
if not self._platform.is_win():
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = mapping['port']
try:
s.bind(('localhost', port))
except IOError as e:
if e.errno in (errno.EALREADY, errno.EADDRINUSE):
raise ServerError('Port %d is already in use.' % port)
elif self._platform.is_win() and e.errno in (errno.WSAEACCES,): # pylint: disable=E1101
raise ServerError('Port %d is already in use.' % port)
else:
raise
finally:
s.close()
_log.debug('all ports are available')
|
|
# -*- coding: UTF-8 -*-
import wx
from directActions import *
from classActions import *
from time import localtime
from caid.field import field
from global_vars import strtoArray
from scripture import scripture
from PythonEditor import Editor as PythonEditor
class Preferences():
def __init__(self, wk):
self._wk = wk
self._intersection = {}
self._coons = {}
# ... default initialization for intersection
self.set_intersection("npts", 50)
# ...
# ... default initialization for coons
self.set_coons("tol", 1.e-2)
# ...
@property
def workgroup(self):
return self._wk
@property
def intersection(self):
return self._intersection
def set_intersection(self, attribut, value):
self._intersection[attribut] = value
@property
def coons(self):
return self._coons
def set_coons(self, attribut, value):
self._coons[attribut] = value
class WorkGroup(wx.Frame):
def __init__(self, parent, empty=False):
self.parent = parent
self._preferences = Preferences(self)
self.list_geo = []
self.list_space = []
self.list_field = []
self.stockUndo = []
self.stockRedo = []
self.createInspector()
self.createViewer()
# self.createFields()
self.initialize(empty=empty)
self.directAct = directActions(self)
# needed when saving the workgroup
self.filename = None
# needed for temporary save
t = localtime()
tmp_file = "session-"\
+str(t.tm_year)+"-"+str(t.tm_mon)+"-"+str(t.tm_mday)\
+"_"+str(t.tm_hour)+"h"+str(t.tm_min)+"min"\
+".wkl"
self.tmp_filename = tmp_file
# auto save is activated by default
self.auto_save = True
self._macroRecording = False
self._macro_script = scripture(self)
self._pythonEditor = PythonEditor(self.parent, -1, '')
self._pythonEditor.Show(False)
@property
def pythonEditor(self):
return self._pythonEditor
def set_pythonEditor(self, edt):
self._pythonEditor = edt
@property
def preferences(self):
return self._preferences
def set_macroRecording(self, value):
self._macroRecording = value
@property
def macroRecording(self):
return self._macroRecording
@property
def macro_script(self):
return self._macro_script
def initialize(self, empty=False):
"""
create an empty geometry object
"""
if not empty:
from geometry import geometry
geo = geometry()
self.add_geometry(geo)
def createViewer(self):
from viewer import Viewer
self.viewer = Viewer(self, self.parent \
, pos=(500,50) \
, size=wx.Size(700,700))
self.viewer.Show(True)
def createInspector(self):
from inspector import Inspector
self.inspector = Inspector(self, self.parent, -1, 'Inspector' \
, pos=(500,50) \
, size=wx.Size(450,550))
self.inspector.Show(True)
def createSpaces(self):
from spaces import Inspector
self.spaces = Inspector(self, self.parent, -1, 'Spaces' \
, pos=(500,300) \
, size=wx.Size(450,550))
self.spaces.Show(True)
def createFields(self):
from fields import Inspector
self.fields = Inspector(self, self.parent, -1, 'Fields' \
, pos=(500,300) \
, size=wx.Size(450,550))
self.fields.Show(True)
def appendAction(self, undo):
self.stockUndo.append( undo )
if self.stockRedo:
try:
del self.stockRedo[:]
except:
print("problem occurs while deleting stockRedo")
def add_geometry(self, geo, activeUndo=True):
self.list_geo.append(geo)
geoItem = self.inspector.add_geometry(geo)
geo.set_treeItem(geoItem)
# undo action
if activeUndo:
undo = UndoAddGeometry(self, geoItem, geo)
self.appendAction(undo)
self.Refresh()
return geoItem
def add_patch(self, geoItem, geo, patch, activeUndo=True):
geo.append(patch)
patchItem = self.inspector.add_patch(geoItem, geo, patch)
# undo action
if activeUndo:
undo = UndoAddPatch(self, patchItem, patch, geo, geoItem)
self.appendAction(undo)
self.Refresh()
return patchItem
def add_space(self, geo, testcase=None):
# create the Frame if list_space is empty
if len(self.list_space) == 0:
self.createSpaces()
from pigasus.fem.basicPDE import basicPDE
if testcase is None:
testcase = {}
testcase['AllDirichlet'] = True
PDE = basicPDE(geometry=geo, testcase=testcase)
V = PDE.space
self.list_space.append(V)
self.spaces.add_space(V)
self.Refresh()
def add_field(self, field):
if len(self.list_field) == 0:
self.createFields()
self.list_field.append(field)
self.fields.Show(True)
self.fields.add_field(field)
self.Refresh()
def remove_geometry(self, geoItem, geo, activeUndo=True):
# undo action
if activeUndo:
undo = UndoRemoveGeometry(self, geoItem, geo)
self.appendAction(undo)
# remove geo from the dictionary
self.list_geo.remove(geo)
# delete the corresponding item from the inspector
self.inspector.remove_geometry(geoItem)
self.inspector.reset_currentAll()
# refresh the viewer
self.Refresh()
def remove_patch(self, patchItem, patch, geo=None, activeUndo=True):
# remove patch from the dictionary
if geo is None:
geo = self.get_geometry_from_patch(patch)
# undo action
if activeUndo:
geoItem = self.inspector.tree.GetItemParent(patchItem)
undo = UndoRemovePatch(self, patchItem, patch, geo, geoItem)
self.appendAction(undo)
geo.remove_patch(patch)
print("%")
# delete the corresponding item from the inspector
self.inspector.remove_patch(patchItem)
print("%%")
self.inspector.reset_currentAll()
print("%%%")
# refresh the viewer
self.Refresh()
def remove_field(self, fieldItem, field):
# remove geo from the dictionary
self.list_field.remove(field)
# delete the corresponding item from the inspector
self.fields.remove_field(fieldItem)
self.fields.reset_currentAll()
# refresh the viewer
self.Refresh()
def get_geometry_from_patch(self, patch):
# remove patch from the dictionary
print(("looking for patch ", id(patch)))
for geo in self.list_geo:
for nrb in geo:
print((id(nrb)))
if id(nrb) == id(patch):
print("found.")
return geo
print("Not found.")
def Refresh(self, inspector=False):
if inspector:
self.inspector.Refresh()
# save in temp file
if self.auto_save:
self.save(filename=self.tmp_filename)
self.viewer.drawWorld()
self.viewer.Refresh()
def message(self, txt):
self.viewer.statusbar.SetStatusText(txt)
def save(self, filename=None):
if filename is None:
filename = self.filename
# this means that self.filename is also None
if filename is None:
# Create a save file dialog
from global_vars import CAIDWorkGroupwildcard
dialog = wx.FileDialog ( None\
, style = wx.SAVE | wx.OVERWRITE_PROMPT\
, wildcard=CAIDWorkGroupwildcard)
# Show the dialog and get user input
if dialog.ShowModal() == wx.ID_OK:
filename = dialog.GetPath()
self.filename = filename
# The user did not select anything
else:
print('Nothing was selected.')
# Destroy the dialog
dialog.Destroy()
# ... create xml doc
from xml.dom.minidom import Document
# Create the minidom document
doc = Document()
# Create the <theme> base element
rootElt = self.viewer.theme.save(doc=doc)
# Create the <caid> base element
rootElt = doc.createElement("caid")
# set camera attributs
eye = self.viewer.lookAt.GetEye()
rootElt.setAttribute("eye", str(eye))
center = self.viewer.lookAt.GetCenter()
rootElt.setAttribute("center", str(center))
up = self.viewer.lookAt.GetUp()
rootElt.setAttribute("up", str(up))
doc.appendChild(rootElt)
# ...
# ...
themeElt, doc = self.viewer.theme.save(doc=doc)
rootElt.appendChild(themeElt)
# ...
# ...
from caid.io import XML
io = XML()
for geo in self.list_geo:
geo.save_attributs()
geoElt = doc.createElement("geometry")
doc = io.geotoxml(geo, doc, geoElt)
rootElt.appendChild(geoElt)
# ...
if filename is not None:
with open( filename, 'w' ) as f:
f.write( doc.toprettyxml() )
else:
print("No file was specified")
def open(self, filename=None):
if filename is not None:
self.filename = filename
else:
from global_vars import CAIDWorkGroupwildcard
# Create an open file dialog
dialog = wx.FileDialog(None\
, style = wx.OPEN\
, wildcard=CAIDWorkGroupwildcard)
# Show the dialog and get user input
if dialog.ShowModal() == wx.ID_OK:
self.filename = dialog.GetPath()
# The user did not select anything
else:
print('Nothing was selected.')
# Destroy the dialog
dialog.Destroy()
from caid.cad_geometry import cad_geometry
from caid.io import XML
from geometry import geometry
io = XML()
from xml.dom.minidom import parse
doc = parse(self.filename)
rootElt = doc.documentElement
# read attributs
# get camera attributs
eye = strtoArray(rootElt.getAttribute("eye"))
self.viewer.lookAt.SetEye(eye)
center = strtoArray(rootElt.getAttribute("center"))
self.viewer.lookAt.SetCenter(center)
up = strtoArray(rootElt.getAttribute("up"))
self.viewer.lookAt.SetUp(up)
# get colors attributs
# ...
try:
self.viewer.theme.load(rootElt=rootElt)
except:
print("Theme can not be loaded. Dark theme will be used.")
self.viewer.theme.set_theme("dark")
# ...
for geoElt in rootElt.getElementsByTagName("geometry"):
geo = cad_geometry()
io.xmltogeo(geo, doc, geoElt)
_geo = geometry(geo)
_geo.load_attributs()
self.add_geometry(_geo)
self.Refresh()
# sets the temporary file for auto-save
tmp = self.filename.split('/')[-1]
basedir = self.filename.split(tmp)[0]
self.tmp_filename = basedir+"~"+tmp
class WorkGroupTree(wx.TreeCtrl):
'''Our customized TreeCtrl class
'''
def __init__(self, parent, frame, id, position, size, style):
'''Initialize our tree
'''
wx.TreeCtrl.__init__(self, parent, id, position, size, style)
root = self.AddRoot('caid')
self.root = root
# self.parent = parent
self.parent = frame
self.dict_WorkGroup = {}
self._currentWorkGroup = None
self._lastWorkGroup = None
self.menu_titles = [ "New Geometry"\
, "Import"\
, "Export"\
, "Save"\
, "Duplicate"\
, "New Scalar Field"\
, "Import Scalar Field"\
, "Delete" ]
self.menu_title_by_id = {}
for title in self.menu_titles:
self.menu_title_by_id[ wx.NewId() ] = title
@property
def currentWorkGroup(self):
if self._currentWorkGroup is None:
self._currentWorkGroup = self._lastWorkGroup
return self._currentWorkGroup
def createWorkGroup(self, empty=False):
wk = WorkGroup(self.parent, empty=empty)
self.dict_WorkGroup[id(wk)] = wk
TAG = "-" + str(len(self.dict_WorkGroup))
wkItem = self.AppendItem( self.root, 'WorkGroup'+TAG, -1, -1, wk )
inspectorItem = self.AppendItem( wkItem, 'Inspector', -1, -1, wk )
viewerItem = self.AppendItem( wkItem, 'Viewer' , -1, -1, wk )
spacesItem = self.AppendItem( wkItem, 'Spaces' , -1, -1, wk )
fieldsItem = self.AppendItem( wkItem, 'Fields' , -1, -1, wk )
return wk
def SelectedViewer(self, item):
obj = self.GetItemData( item )
if obj.__class__.__name__ in ["Viewer"]:
return True
else:
return False
def SelectedInspector(self, item):
obj = self.GetItemData( item )
if obj.__class__.__name__ in ["Inspector"]:
return True
else:
return False
def GetCurrentWorkGroup(self, event):
"""
return the current workgroup independtly from
the selected item (viewer, inspector, workgroup)
"""
item = event.GetItem()
obj = self.GetItemData(item)
txt = self.GetItemText(item)
tag = txt.split("-")[0]
if tag in ["WorkGroup","Inspector","Viewer"]:
return obj
else:
return None
def OnSelChanged(self, event):
'''Method called when selected item is changed
'''
# Get the selected item object
item = event.GetItem()
self._currentWorkGroup = self.GetCurrentWorkGroup(event)
if self._currentWorkGroup is not None:
self._lastWorkGroup = self._currentWorkGroup
def OnRightMouseClick(self, event):
### 2. Launcher creates wxMenu. ###
menu = wx.Menu()
for (id,title) in list(self.menu_title_by_id.items()):
### 3. Launcher packs menu with Append. ###
title_id = menu.Append( id, title )
### 4. Launcher registers menu handlers with EVT_MENU, on the menu. ###
menu.Bind( wx.EVT_MENU, self.MenuSelectionCb, title_id )
### 5. Launcher displays menu with call to PopupMenu, invoked on the source component, passing event's GetPoint. ###
self.parent.PopupMenu( menu, event.GetPoint() )
menu.Destroy() # destroy to avoid mem leak
def MenuSelectionCb( self, event ):
# do something
operation = self.menu_title_by_id[ event.GetId() ]
if operation == "New Geometry":
from geometry import geometry
geo = geometry()
wk = self.currentWorkGroup
wk.add_geometry(geo)
if operation == "Save":
self.currentWorkGroup.save()
if operation == "New Scalar Field":
F = field()
wk = self.currentWorkGroup
wk.add_field(F)
if operation == "Import Scalar Field":
filename = None
from global_vars import CAIDFieldWildcard
# Create an open file dialog
dialog = wx.FileDialog(None\
, style = wx.OPEN\
, wildcard=CAIDFieldWildcard)
# Show the dialog and get user input
if dialog.ShowModal() == wx.ID_OK:
filename = dialog.GetPath()
# The user did not select anything
else:
print('Nothing was selected.')
# Destroy the dialog
dialog.Destroy()
if filename is not None:
U = field()
U.open(filename)
wk = self.currentWorkGroup
wk.add_field(U)
wk.Refresh()
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
import logging as stdlib_logging
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
import six
from brick.openstack.common.gettextutils import _
from brick.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, 'Running cmd (subprocess): %s',
' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, '%r failed. Retrying.', cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug('Running cmd (SSH): %s', cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
|
|
import os
import pickle
import random
import sys
from random import shuffle
import numpy as np
from Global import get_word_vect
from RAE_adam_herical import rae_trainning_normal
from utility1 import get_words_id, get_parents, get_dep, pdep_2_deporder_dep, dep_2_hid_var, save2pickle, get_weight_matrices, init_weight, remove_dep,get_zero_weight_matrices, zero_weight
import Global
def cleanwd(wds):
rmwd = []
for i in wds:
if wds[i]['pid']==-1:
rmwd.append(i)
for j in rmwd:
wds.pop(j)
return wds
def wd_preprocess(word_data):
twd = cleanwd(word_data)
d = get_dep(twd)
try:
d, req_ = remove_dep(d)
except MemoryError:
print get_dep(twd)
return -1,-1,-1,-1,-1
idlist = [i for i in twd]
if req_:
for id in idlist:
if (twd[id]['wid'],twd[id]['pid']) in req_:
twd.pop(id)
idlist = [i for i in twd if type(i) is not int]
for wd in idlist:
twd[int(wd)] = twd[wd]
twd.pop(wd)
p = get_parents(twd)
Word_ids = get_words_id(twd)
w_size = len(Word_ids)
dep_order, d1 = pdep_2_deporder_dep(p, d)
h_index, h_vect, wp , hh_index = dep_2_hid_var(p, dep_order, d1, Word_ids)
return h_index, h_vect, wp, Word_ids, w_size, dep_order, hh_index, p
def word_to_extra(words_data, w, w_range, h_size):
wd_extra = []
wpresent = []
for iword in range(len(words_data)):
if words_data[iword] == -2 and not words_data[iword]:
continue
h_index, h_vect, wp, Word_ids, w_size, dep_order, hh_index, p = wd_preprocess(words_data[iword])
if h_index == -1:
continue
cflag = 0
for i in wp:
if len(i) > abs(w_range / 2):
cflag = 1
break
for j in i:
wpresent.append(j)
if type(w[j]) != np.ndarray:
w[j] = init_weight(h_size, h_size)
Global.m[j] = zero_weight(h_size, h_size)
Global.v[j] = zero_weight(h_size, h_size)
Global.lr[j] = neta
if cflag == 1:
continue
wpresent = list(set(wpresent))
Word_vects = []
try:
for i in sorted(words_data[iword]):
Word_vects.append(get_word_vect(words_data[iword][i]['word'].lower(), Global.v_size))
except KeyError:
continue
wd_extra.append({'w_size': w_size, 'h_index': h_index, 'h_vect': h_vect, 'Word_vects': Word_vects, 'wp': wp,
"dep_order": dep_order, "hh_index": hh_index, 'Word_ids': Word_ids, 'p': p})
return wd_extra, wpresent
def log_data(text,fname):
open(fname,'a+').write(text+'\n')
return
from list_data import list_1perdir_files
def train(inflag, src, mode, w, neta, regu, v_size = 50, h_size = 30, w_range = 51, ite=50, ep_size=1000, wfname = 'weight',debug = 1, log=None):
## errors of various type
iter_err = {0:np.inf}
epoc_err = []
epoc_samp_err = []
epoc_samp_branch_err = []
samp_err = []
branch_samp_err = []
## errors of various type end
ecount = 1
if w == None:
w = get_weight_matrices(w_range, h_size, v_size)
Global.m = get_zero_weight_matrices(w_range, h_size, v_size)
Global.v = get_zero_weight_matrices(w_range, h_size, v_size)
Global.lr = {0:neta}
wprev = {}
for i in w:
wprev[i] = np.copy(w)
iter_count=1
of_flag = []
log_text = ''
if os.path.isfile(log['iter']):
iter_count=pickle.load(open(log['iter'],'rb'))
if os.path.isfile(log['iter_err']):
iter_err= pickle.load(open(log['iter_err'],'rb'))
if inflag == 'd':
if debug == 1:
print "Training Directory :", src,"\n\n"
log_text += "\nTraining Directory : "+ src
elif inflag == 't':
if debug == 1:
print "Training file :", src,"\n\n"
log_text += "\nTraining file : " + src
while iter_count < ite:
words_data = []
if inflag == 'd':
file_list = list_1perdir_files(src)
if debug == 1:
print iter_count, "iteration is running\n\tfiles read :"
log_text += '\n' + str(iter_count) + " iteration is running\n\tfiles read : "
for fname in file_list:
if debug == 1:
print '\t', fname
log_text += '\n\t' + fname
words_data += pickle.load(open(fname, 'rb'))
elif inflag == 't':
if debug == 1:
print iter_count, "iteration is running"
log_text += '\n' + str(iter_count) + " iteration is running"
words_data = pickle.load(open(src, 'rb'))
iter_err[iter_count]=0.0
total_sample = 0
dw1={}
epnum1={}
for wi in w:
dw1[wi] = 0.0
epnum1[wi] = 0.0
wd_extra, wpresent = word_to_extra(words_data, w, w_range,h_size)
del words_data
wd_len = len(wd_extra)
if debug == 1:
print "\n\tStart trainning "
print '\tnumber of training sample :', wd_len
log_text += "\n\n\tStart trainning\n\tnumber of training sample : "+str(wd_len)
index=0;
wsum = 0.0;
kerr=0.0;
wsum1 = 0.0;
shuffle(wd_extra)
while index < wd_len:
wtrain = (index + ep_size) if index + ep_size < wd_len else wd_len
terr = 0.0;
tkerr=0.0
# try:
if mode == 'normal':
w, dw1, epnum1, tkerr, terr, tsamp_err, tbranch_samp_err, branchs= rae_trainning_normal(wd_extra[index:wtrain], w=w, neta=neta, regu=regu, wpresent=wpresent)
if wtrain-index == ep_size:
epoc_samp_err.append(terr/(wtrain-index))
epoc_err.append(terr)
epoc_samp_branch_err.append(terr/branchs)
# samp_err += tsamp_err
# branch_samp_err += tbranch_samp_err
# except KeyError:
# tkerr += 1
# index += ep_size
# continue
index += ep_size
iter_err[iter_count] += terr
kerr += tkerr
for wi in w:
wsum += np.sum(abs(w[wi]))
wsum1 += np.sum(w[wi])
save2pickle(w, wfname)
total_sample+=wd_len
if debug == 1:
print "\tWeight sum :", wsum
print "\tWeight sum1 :", wsum1
print "\tKey error :", kerr
print "\tTotal sample trained :", total_sample
print "Prev iteration error :", iter_err[iter_count - 1]
print "This iteration error :", iter_err[iter_count]
print "break lower Threshold :", .005 * total_sample
print "break value :", abs(iter_err[iter_count - 1]) - abs(iter_err[iter_count])
log_text += "\n\tWeight sum : " + str(wsum) + "\n\tError sum : " + str(
kerr) + "\n\tTotal sample trained : " + str(total_sample) \
+ "\nPrev iteration error : " + str(
iter_err[iter_count - 1]) + "\nThis iteration error : " + str(iter_err[iter_count]) + \
"\nbreak lower Threshold : " + str(.005 * total_sample) + "\nbreak value : " + str(
abs(iter_err[iter_count - 1]) - abs(iter_err[iter_count]))
pickle.dump(iter_err, open(log['iter_err'], 'wb'))
pickle.dump(epoc_err, open(log['epoc_err'] + str(ecount), 'wb'))
pickle.dump(epoc_samp_err, open(log['epoc_samp_err'] + str(ecount), 'wb'))
pickle.dump(epoc_samp_branch_err, open(log['epoc_samp_branch_err'] + str(ecount), 'wb'))
if len(epoc_err) > 20000:
epoc_err = []
epoc_samp_err = []
epoc_samp_branch_err = []
ecount += 1
if abs(iter_err[iter_count - 1]) - abs(iter_err[iter_count]) < .005 * total_sample:
of_flag.append(1)
if debug == 1:
print "Number of Boundry cross :", len(of_flag)
log_text += "\nNumber of Boundry cross : " + str(len(of_flag))
if len(of_flag) > 0:
t1 = wfname.split('/');
wf = '/'.join(t1[:-1]) + '/' + str(iter_count) + t1[-1]
save2pickle(w, wf)
pickle.dump(iter_count+1, open(log['iter'], 'wb'))
break
else:
if of_flag:
of_flag.pop()
t1 = wfname.split('/');
wf = '/'.join(t1[:-1]) + '/' + str(iter_count) + t1[-1]
save2pickle(w, wf)
if debug == 1:
print '\n\n'
log_text += '\n\n\n'
log_data(log_text, log['rae'])
log_text = ''
if iter_count%4 == 0:
pickle.dump(samp_err,open(log['samp_err'] + str(iter_count),'wb'))
pickle.dump(branch_samp_err,open(log['branch_samp_err']+ str(iter_count),'wb'))
samp_err=[]
branch_samp_err=[]
iter_count += 1
pickle.dump(iter_count, open(log['iter'], 'wb'))
for i in iter_err:
if debug == 1:
print i, "iteration error :", iter_err[i]
log_text += '\n' + str(i) + " iteration error : " + str(iter_err[i])
if debug == 1:
if epoc_err:
pickle.dump(epoc_err, open(log['epoc_err'] + str(ecount), 'wb'))
pickle.dump(epoc_samp_err, open(log['epoc_samp_err'] + str(ecount), 'wb'))
pickle.dump(epoc_samp_branch_err, open(log['epoc_samp_branch_err'] + str(ecount), 'wb'))
log_data(log_text, log['rae'])
log_text = ''
return
import time
if __name__=='__main__':
Global.init()
flag=None
w = None
neta = None; regu = 0.01; v_size = None; h_size = None; w_range = 201; ite = 5; epoch = 1000; wname = 'weight(w)';src=None;wload=None;spwd=0
log = {}
if sys.argv[1] == '--help':
print "Usage : python RAE_trainning.py [options] src"
print "options:"
print "\t -dir directory_name: directory has the files contain pickle data for trainning"
print "\t -in infile: infile has location of pickle file for trainning"
print "\t -neta value: learning rate(default=0.001)"
print "\t -hlayer value: Number of nodes in hidden layer"
print "\t -insize value: word vector size(50 or 200)"
print "\t -regu value: reguleraziation (default=0.01)"
print "\t -w weight_file_name: filename contain weights parameter for machine"
print "\t -wload weight_file_name: filename contain weights parameter for machine(when weight of file as initiate weight)"
print '\t -iter iterations'
print '\t -stopwrd include stopword(by default 0)'
print '\t -epoch epoch size\n\n'
exit()
elif '-dir' in sys.argv:
ind=sys.argv.index('-dir')
if os.path.isdir(sys.argv[ind+1]):
flag ='d'
src = sys.argv[ind+1]
elif '-in' in sys.argv:
ind = sys.argv.index('-in')
if os.path.isfile(sys.argv[ind+1]):
flag='t'
src = sys.argv[ind + 1]
else:
exit()
if '-neta' in sys.argv:
ind=sys.argv.index('-neta')
neta = float(sys.argv[ind+1])
if '-regu' in sys.argv:
ind=sys.argv.index('-regu')
regu = float(sys.argv[ind+1])
if '-w' in sys.argv:
ind=sys.argv.index('-w')
wname = sys.argv[ind+1]
if '-iter' in sys.argv:
ind=sys.argv.index('-iter')
ite = int(sys.argv[ind+1])
if '-stopwrd' in sys.argv:
ind=sys.argv.index('-stopwrd')
spwd = int(sys.argv[ind+1])
if '-epoch' in sys.argv:
ind=sys.argv.index('-epoch')
epoch = int(sys.argv[ind+1])
if '-wload' in sys.argv:
ind=sys.argv.index('-wload')
wload = sys.argv[ind+1]
if '-hlayer' in sys.argv:
ind = sys.argv.index('-hlayer')
h_size = int(sys.argv[ind + 1])
if '-insize' in sys.argv:
ind = sys.argv.index('-insize')
v_size = int(sys.argv[ind + 1])
# if v_size != 50 or v_size != 200:
# print "Enter : -insize (50 or 200)"
# exit()
wt = wname.split('/')[-1]
wfile = '_'
ddir = wname + wfile
if not os.path.isdir(ddir):
os.mkdir(ddir)
if not os.path.isdir(ddir+'/error/'):
os.mkdir(ddir+'/error/')
wfname = ddir + '/' + wt + wfile + '_' + str(spwd) + '_.pickle'
log['iter'] = ddir + '/iter_count.pickle'
log['iter_err'] = ddir + '/iter_err_count.pickle'
log['rae'] = ddir + '/log.txt'
log['epoc_err'] = ddir + '/error/epoc_err.pickle'
log['epoc_samp_err'] = ddir + '/error/epoc_samp_err.pickle'
log['epoc_samp_branch_err'] = ddir + '/error/epoc_samp_branch_err.pickle'
log['samp_err'] = ddir + '/error/samp_err.pickle'
log['branch_samp_err'] = ddir + '/error/branch_samp_err.pickle'
open(ddir +'/setting.txt','w').write(\
"\nneta ="+str(neta)\
+"\nregu="+str(regu)\
+"\nword vector size ="+str(v_size)\
+"\npharse vector size ="+str(h_size)\
+"\niteration :"+str(ite)\
+"\nStopword :"+str(spwd)\
+"\nepoch size :"+str(epoch)\
+"\nInput flag :"+str(flag)\
+"\nInput file :"+src\
+"\nweight file :"+wfname\
)
print "\n\nTrainning Start with:"
print " neta =",neta
print " regu=",regu
print " word vector size =",v_size
print " pharse vector size =",h_size
print " iteration :",ite
print " Stopword :",spwd
print " epoch size :",epoch
print " Input flag :",flag
print " Input file :",src
print " weight file :",wfname
if os.path.isfile(wfname):
print " load weight file :", wfname
else:
print " load weight file :",wload
print
# a=raw_input("Want to continue (y\Y):")
# a=str(a)
# if a == 'y' or a == 'Y':
# pass
# else:
# exit()
if wload:
if os.path.isfile(wload):
w = pickle.load(open(wload, 'rb'))
else:
if os.path.isfile(wfname):
w = pickle.load(open(wfname, 'rb'))
Global.init_wv_self(stopword=spwd, vsize=v_size)
Global.adam_parm_init()
train(flag, src=src, mode='normal', w=w, neta=neta, regu=regu, v_size=Global.v_size, h_size=h_size, w_range=w_range, ite=ite, ep_size=epoch, wfname = wfname, log=log, debug=1)
# from main import main_fun
# main_fun([wfname])
|
|
import os
import struct
import tracer
import random
import logging
from itertools import groupby
import binascii
import claripy
import angr
from angr.state_plugins.trace_additions import ChallRespInfo, ZenPlugin
from angr.state_plugins.preconstrainer import SimStatePreconstrainer
from angr.state_plugins.posix import SimSystemPosix
from angr.storage.file import SimFileStream
from rex.exploit.cgc import CGCExploit
from .harvester import Harvester
from .pov import ColorguardExploit, ColorguardNaiveExploit, ColorguardNaiveHexExploit, ColorguardNaiveAtoiExploit
l = logging.getLogger("colorguard.ColorGuard")
class ColorGuard(object):
"""
Detect leaks of the magic flag page data.
Most logic is offloaded to the tracer.
"""
def __init__(self, binary, payload):
"""
:param binary: path to the binary which is suspect of leaking
:param payload: concrete input string to feed to the binary
"""
self.binary = binary
self.payload = payload
if not os.access(self.binary, os.X_OK):
raise ValueError("\"%s\" binary does not exist or is not executable" % self.binary)
# will be set by causes_leak
self._leak_path = None
self._runner = tracer.QEMURunner(binary=binary, input=payload)
# load the binary
self.project = angr.Project(binary)
self.project.simos.syscall_library.update(angr.SIM_LIBRARIES['cgcabi_tracer'])
# set up the state for analysis
remove_options = {angr.options.SUPPORT_FLOATING_POINT}
add_options = angr.options.unicorn | {
angr.options.CGC_NO_SYMBOLIC_RECEIVE_LENGTH,
angr.options.UNICORN_THRESHOLD_CONCRETIZATION,
angr.options.REPLACEMENT_SOLVER }
state = self.project.factory.full_init_state(remove_options=remove_options, add_options=add_options)
# Make our own special posix
state.register_plugin('posix', SimSystemPosix(
stdin=SimFileStream('stdin', content=payload),
stdout=SimFileStream('stdout'),
stderr=SimFileStream('stderr')))
# Create the preconstrainer plugin
state.register_plugin('preconstrainer', SimStatePreconstrainer())
state.preconstrainer.preconstrain_flag_page(self._runner.magic)
# Set up zen
ZenPlugin.prep_tracer(state)
# Make the simulation manager
self._simgr = self.project.factory.simulation_manager(state, save_unsat=True, hierarchy=False, save_unconstrained=self._runner.crash_mode)
self._t = angr.exploration_techniques.Tracer(trace=self._runner.trace, resiliency=False)
self._simgr.use_technique(self._t)
self._simgr.use_technique(angr.exploration_techniques.Oppologist())
# will be overwritten by _concrete_difference if the input was filtered
# this attributed is used exclusively for testing at the moment
self._no_concrete_difference = not self._concrete_difference()
self.leak_ast = None
def _concrete_leak_info(self, seed=None):
if seed is None:
seed = random.randint(0, 2**32)
r1 = tracer.QEMURunner(self.binary, input=self.payload, record_magic=True, record_stdout=True, seed=seed)
return r1.stdout, r1.magic
def _concrete_difference(self):
"""
Does an input when ran concretely produce two separate outputs?
If it causes a leak it should, but if the outputs differ
it is not guaranteed there is a leak.
:return: True if the there is a concrete difference
"""
s1, _ = self._concrete_leak_info()
s2, _ = self._concrete_leak_info()
return s1 != s2
def causes_dumb_leak(self):
return not self._no_concrete_difference
def _find_dumb_leaks_raw(self):
s1, m1 = self._concrete_leak_info()
potential_leaks = [ ]
for i in range(len(s1)):
pchunk = s1[i:i+4]
if len(pchunk) == 4 and pchunk in m1:
potential_leaks.append(i)
return potential_leaks
def _find_dumb_leaks_hex(self):
s1, m1 = self._concrete_leak_info()
potential_leaks = [ ]
for i in range(len(s1)):
pchunk = s1[i:i+8]
if len(pchunk) == 8 and pchunk in binascii.hexlify(m1):
potential_leaks.append(i)
return potential_leaks
def _find_dumb_leaks_atoi(self):
s1, m1 = self._concrete_leak_info()
potential_leaks = []
for i in range(len(m1)):
pchunk = m1[i:i+4]
if len(pchunk) != 4:
continue
val = struct.unpack("<I", pchunk)[0]
if str(val).encode() in s1:
potential_leaks.append(s1.find(str(val).encode()))
val2 = -((1 << 32) - val)
if str(val2).encode() in s1:
potential_leaks.append(s1.find(str(val2).encode()))
return potential_leaks
def attempt_dumb_pov_raw(self):
p1 = self._find_dumb_leaks_raw()
p2 = self._find_dumb_leaks_raw()
leaks = list(set(p1).intersection(set(p2)))
if leaks:
leaked_bytes = list(range(leaks[0], leaks[0]+4))
l.info("Found dumb leak which leaks bytes %s", leaked_bytes)
return ColorguardNaiveExploit(self.binary, self.payload, leaked_bytes[-1]+1, leaked_bytes)
else:
l.debug("No dumb leak found")
def attempt_dumb_pov_hex(self):
p1 = self._find_dumb_leaks_hex()
p2 = self._find_dumb_leaks_hex()
leaks = list(set(p1).intersection(set(p2)))
if leaks:
leaked_bytes = list(range(leaks[0], leaks[0]+8))
l.info("Found dumb hex leak which leaks bytes %s", leaked_bytes)
return ColorguardNaiveHexExploit(self.binary, self.payload, leaked_bytes[-1]+1, leaked_bytes)
else:
l.debug("No dumb hex leak found")
def attempt_dumb_pov_atoi(self):
p1 = self._find_dumb_leaks_atoi()
p2 = self._find_dumb_leaks_atoi()
leaks = list(set(p1).intersection(set(p2)))
if leaks:
leak_start = leaks[0]
l.info("Found dumb atoi leak which leaks at byte %s", leak_start)
return ColorguardNaiveAtoiExploit(self.binary, self.payload, leak_start)
else:
l.debug("No dumb leak found")
def attempt_dumb_pov(self):
pov = self.attempt_dumb_pov_raw()
if pov is not None:
return pov
pov = self.attempt_dumb_pov_hex()
if pov is not None:
return pov
pov = self.attempt_dumb_pov_atoi()
if pov is not None:
return pov
def causes_naive_leak(self):
return self.causes_dumb_leak()
def _find_naive_leaks(self, seed=None):
"""
Naive implementation of colorguard which looks for concrete leaks of
the flag page.
"""
stdout, magic = self._concrete_leak_info(seed=seed)
# byte indices where a leak might have occured
potential_leaks = dict()
for si, b in enumerate(stdout):
try:
indices = [i for i, x in enumerate(magic) if x == b]
potential_leaks[si] = indices
except ValueError:
pass
return potential_leaks
def attempt_naive_pov(self):
p1 = self._find_naive_leaks()
p2 = self._find_naive_leaks()
leaked = dict()
for si in p1:
if si in p2:
li = list(set(p2[si]).intersection(set(p1[si])))
if len(li) > 0:
for lb in li:
leaked[lb] = si
# find four contiguous
consecutive_groups = [ ]
for _, g in groupby(enumerate(sorted(leaked)), lambda ix: ix[0]-ix[1]):
consecutive_groups.append([x[1] for x in g])
lgroups = [x for x in consecutive_groups if len(x) >= 4]
if len(lgroups):
l.info("Found naive leak which leaks bytes %s", lgroups[0])
leaked_bytes = [ ]
for b in leaked:
leaked_bytes.append(leaked[b])
return ColorguardNaiveExploit(self.binary, self.payload, max(leaked_bytes)+1, leaked_bytes)
else:
l.debug("No naive leak found")
def causes_leak(self):
if not self.causes_naive_leak():
return False
self._simgr.run()
if 'traced' in self._simgr.stashes:
self._leak_path = self._simgr.traced[0]
elif 'crashed' in self._simgr.stashes:
self._leak_path = self._t.predecessors[-1]
else:
l.error("Something went wrong, tracing didn't terminate with traced or crashed.")
return False
stdout = self._leak_path.posix.stdout
output = stdout.load(0, stdout.pos)
for var in output.variables:
if var.startswith("cgc-flag"):
self.leak_ast = output
return True
return False
def attempt_pov(self, enabled_chall_resp=False):
assert self.leak_ast is not None, "must run causes_leak first or input must cause a leak"
st = self._leak_path
# switch to a composite solver
st.preconstrainer.remove_preconstraints(simplify=False)
# get the flag var
flag_bytes = st.cgc.flag_bytes
# remove constraints from the state which involve only the flagpage
# this solves a problem with CROMU_00070, where the floating point
# operations have to be done concretely and constrain the flagpage
# to being a single value
CGCExploit.filter_uncontrolled_constraints(st)
simplified = st.solver.simplify(self.leak_ast)
harvester = Harvester(simplified, st.copy(), flag_bytes)
output_var = claripy.BVS('output_var', harvester.minimized_ast.size(), explicit_name=True) #pylint:disable=no-member
st.add_constraints(harvester.minimized_ast == output_var)
leaked_bytes = harvester.get_largest_consecutive()
if len(leaked_bytes) < 4:
l.warning("input does not leak enough bytes, %d bytes leaked, need 4", len(leaked_bytes))
return None
exploit = ColorguardExploit(self.binary, st,
self.payload, harvester,
simplified, output_var, leaked_bytes)
# only want to try this once
if not enabled_chall_resp:
l.info('testing for challenge response')
if self._challenge_response_exists(exploit):
l.warning('challenge response detected')
exploit = self._prep_challenge_response()
return exploit
def attempt_exploit(self):
"""
Try all techniques
"""
if self.causes_dumb_leak():
pov = self.attempt_dumb_pov()
if pov is not None and any(pov.test_binary(times=10, enable_randomness=True, timeout=5)):
return pov
else:
l.warning("Dumb leak exploitation failed")
if self.causes_naive_leak():
pov = self.attempt_naive_pov()
if pov is not None and any(pov.test_binary(times=10, enable_randomness=True, timeout=5)):
return pov
else:
l.warning("Naive leak exploitation failed")
if self.causes_leak():
pov = self.attempt_pov()
if pov is not None:
return pov
else:
l.warning("Colorguard leak exploitation failed")
### CHALLENGE RESPONSE
@staticmethod
def _challenge_response_exists(exploit):
"""
Since one success may actually occur, let's test for two successes
"""
return not (exploit.test_binary(times=10, enable_randomness=True, timeout=30).count(True) > 1)
def _prep_challenge_response(self, format_infos=None):
"""
Set up the internal tracer for challenge-response analysis
:param format_infos: a list of atoi FormatInfo objects that should be used when analyzing the crash
"""
# need to re-trace the binary with stdin symbolic
remove_options = {angr.options.SUPPORT_FLOATING_POINT}
add_options = angr.options.unicorn | {
angr.options.CGC_NO_SYMBOLIC_RECEIVE_LENGTH,
angr.options.UNICORN_THRESHOLD_CONCRETIZATION,
angr.options.REPLACEMENT_SOLVER }
state = self.project.factory.full_init_state(add_options=add_options, remove_options=remove_options)
# Make our own special posix
state.register_plugin('posix', SimSystemPosix(
stdin=SimFileStream('stdin', ident='aeg_stdin'), # we do tests against the name of the variable...
stdout=SimFileStream('stdout'),
stderr=SimFileStream('stderr')))
# Create the preconstrainer plugin
state.register_plugin('preconstrainer', SimStatePreconstrainer())
state.preconstrainer.preconstrain_flag_page(self._runner.magic)
state.preconstrainer.preconstrain_file(self.payload, state.posix.stdin)
# Set up zen
ZenPlugin.prep_tracer(state)
ChallRespInfo.prep_tracer(state, format_infos)
self._simgr = self.project.factory.simulation_manager(state, save_unsat=True, hierarchy=False, save_unconstrained=self._runner.crash_mode)
self._t = angr.exploration_techniques.Tracer(trace=self._runner.trace, resiliency=False)
self._simgr.use_technique(self._t)
self._simgr.use_technique(angr.exploration_techniques.Oppologist())
assert self.causes_leak(), "challenge did not cause leak when trying to recover challenge-response"
return self.attempt_pov(enabled_chall_resp=True)
|
|
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.urls import reverse
from corehq.apps.accounting.models import BillingAccount, Subscription
from corehq.apps.sso import certificates
from corehq.apps.sso.exceptions import ServiceProviderCertificateError
from corehq.apps.sso.utils.user_helpers import get_email_domain_from_username
from corehq.util.quickcache import quickcache
class IdentityProviderType:
AZURE_AD = 'azure_ad'
CHOICES = (
(AZURE_AD, "Azure AD"),
)
class ServiceProviderCertificate:
def __init__(self):
"""
To increase the security with SAML transactions, we will provide the IdP
with our public key for an x509 certificate unique to our interactions with
a particular IdP. This certificate will be regenerated automatically by
a periodic task every year.
"""
key_pair = certificates.create_key_pair()
cert = certificates.create_self_signed_cert(key_pair)
self.public_key = certificates.get_public_key(cert)
self.private_key = certificates.get_private_key(key_pair)
self.date_expires = certificates.get_expiration_date(cert)
class IdentityProvider(models.Model):
"""
This stores the information necessary to make a SAML request to an external
IdP. Right now this process supports Azure AD and the plan is to add
support for other identity provider types in the future.
"""
# these three fields must only ever be editable by Accounting admins
name = models.CharField(max_length=128)
slug = models.CharField(max_length=256, db_index=True, unique=True)
idp_type = models.CharField(
max_length=50,
default=IdentityProviderType.AZURE_AD,
choices=IdentityProviderType.CHOICES,
)
# whether an IdP is editable by its BillingAccount owner
# (it will always be editable by accounting admins)
is_editable = models.BooleanField(default=False)
# whether an IdP is actively in use as an authentication method on HQ
is_active = models.BooleanField(default=False)
# the enterprise admins of this account will be able to edit the SAML
# configuration fields
owner = models.ForeignKey(BillingAccount, on_delete=models.PROTECT)
# these are fields required by the external IdP to form a SAML request
entity_id = models.TextField(blank=True, null=True)
login_url = models.TextField(blank=True, null=True)
logout_url = models.TextField(blank=True, null=True)
idp_cert_public = models.TextField(blank=True, null=True)
# the date the IdP's SAML signing certificate expires.
# this will be filled out by enterprise admins
date_idp_cert_expiration = models.DateTimeField(blank=True, null=True)
# Requires that <saml:Assertion> elements received by the SP are encrypted.
# In Azure AD this requires that Token Encryption is enabled, a premium feature
require_encrypted_assertions = models.BooleanField(default=False)
# as the service provider, this will store our x509 certificates and
# will be renewed automatically by a periodic task
sp_cert_public = models.TextField(blank=True, null=True)
sp_cert_private = models.TextField(blank=True, null=True)
date_sp_cert_expiration = models.DateTimeField(blank=True, null=True)
# as the x509 certificate expires, we need to provide the IdP with our next
# "rollover" cert to prepare the IdP for the transfer
sp_rollover_cert_public = models.TextField(blank=True, null=True)
sp_rollover_cert_private = models.TextField(blank=True, null=True)
date_sp_rollover_cert_expiration = models.DateTimeField(blank=True, null=True)
# for auditing purposes
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.EmailField()
last_modified = models.DateTimeField(auto_now=True)
last_modified_by = models.EmailField()
class Meta:
app_label = 'sso'
def __str__(self):
return f"{self.name} IdP [{self.idp_type}]"
def create_service_provider_certificate(self):
sp_cert = ServiceProviderCertificate()
self.sp_cert_public = sp_cert.public_key
self.sp_cert_private = sp_cert.private_key
self.date_sp_cert_expiration = sp_cert.date_expires
self.save()
def create_rollover_service_provider_certificate(self):
sp_cert = ServiceProviderCertificate()
self.sp_rollover_cert_public = sp_cert.public_key
self.sp_rollover_cert_private = sp_cert.private_key
self.date_sp_rollover_cert_expiration = sp_cert.date_expires
self.save()
def renew_service_provider_certificate(self):
if not self.sp_rollover_cert_public:
raise ServiceProviderCertificateError(
"A rollover certificate for the Service Provider was never "
"generated. You should first create a rollover certificate and "
"leave it active for a few days to give the IdP a heads up."
)
self.sp_cert_public = self.sp_rollover_cert_public
self.sp_cert_private = self.sp_rollover_cert_private
self.date_sp_cert_expiration = self.date_sp_rollover_cert_expiration
self.sp_rollover_cert_public = None
self.sp_rollover_cert_private = None
self.date_sp_rollover_cert_expiration = None
self.save()
def get_email_domains(self):
return AuthenticatedEmailDomain.objects.filter(
identity_provider=self
).values_list('email_domain', flat=True).all()
def get_sso_exempt_users(self):
return UserExemptFromSingleSignOn.objects.filter(
email_domain__identity_provider=self,
).values_list('username', flat=True)
def get_login_url(self, username=None):
"""
Gets the login endpoint for the IdentityProvider based on the protocol
being used. Since we only support SAML2 right now, this redirects to
the SAML2 login endpoint.
:param username: (string) username to pre-populate IdP login with
:return: (String) identity provider login url
"""
return '{}?username={}'.format(
reverse('sso_saml_login', args=(self.slug,)),
username
)
def get_active_projects(self):
"""
Returns a list of active domains/project spaces for this identity
provider.
:return: list of strings (domain names)
"""
return list(Subscription.visible_objects.filter(
account=self.owner,
is_active=True
).values_list('subscriber__domain', flat=True))
@quickcache(['self.slug', 'domain'])
def is_domain_an_active_member(self, domain):
"""
Checks whether the given Domain is an Active Member of the current
Identity Provider.
An "Active Member" is defined by having an active Subscription that
belongs to the BillingAccount owner of this IdentityProvider.
:param domain: String (the Domain name)
:return: Boolean (True if Domain is an Active Member)
"""
return Subscription.visible_objects.filter(
account=self.owner,
is_active=True,
subscriber__domain=domain,
).exists()
@quickcache(['self.slug', 'domain'])
def does_domain_trust_this_idp(self, domain):
"""
Checks whether the given Domain trusts this Identity Provider.
:param domain: String (the Domain name)
:return: Boolean (True if Domain trusts this Identity Provider)
"""
is_active_member = self.is_domain_an_active_member(domain)
if not is_active_member:
# Since this Domain is not an Active Member, check whether an
# administrator of this domain has trusted this Identity Provider
return TrustedIdentityProvider.objects.filter(
domain=domain, identity_provider=self
).exists()
return is_active_member
def clear_domain_caches(self, domain):
"""
Clear all caches associated with a Domain and this IdentityProvider
:param domain: String (the Domain name)
"""
IdentityProvider.does_domain_trust_this_idp.clear(self, domain)
IdentityProvider.is_domain_an_active_member.clear(self, domain)
from corehq.apps.sso.utils.domain_helpers import is_domain_using_sso
is_domain_using_sso.clear(domain)
@staticmethod
def clear_email_domain_caches(email_domain):
"""
Clears all caches associated with a given email_domain
:param email_domain: String (email domain)
"""
IdentityProvider.get_active_identity_provider_by_email_domain.clear(
IdentityProvider,
email_domain
)
def clear_all_email_domain_caches(self):
"""
Clears the email_domain-related caches of all the email domains
associated with this IdentityProvider.
"""
all_email_domains_for_idp = AuthenticatedEmailDomain.objects.filter(
identity_provider=self).values_list('email_domain', flat=True)
for email_domain in all_email_domains_for_idp:
self.clear_email_domain_caches(email_domain)
def clear_all_domain_subscriber_caches(self):
"""
Ensure that we clear all domain caches tied to the Subscriptions
associated with the BillingAccount owner of this IdentityProvider.
"""
for domain in self.get_active_projects():
self.clear_domain_caches(domain)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.clear_all_email_domain_caches()
self.clear_all_domain_subscriber_caches()
def create_trust_with_domain(self, domain, username):
"""
This creates a TrustedIdentityProvider relationship between the Domain
and the current Identity Provider.
:param domain: String (the Domain name)
:param username: String (the username of the user creating this agreement)
:return: Boolean (True if a new trust was created, False if it already exists)
"""
if not TrustedIdentityProvider.objects.filter(
domain=domain, identity_provider=self
).exists():
TrustedIdentityProvider.objects.create(
domain=domain,
identity_provider=self,
acknowledged_by=username,
)
return True
return False
@classmethod
def domain_has_editable_identity_provider(cls, domain):
"""
Check to see that a Domain is associated with an IdentityProvider
that is editable.
:param domain: (String) Domain name
:return: Boolean (True if an editable IdentityProvider exists)
"""
owner = BillingAccount.get_account_by_domain(domain)
return cls.objects.filter(owner=owner, is_editable=True).exists()
@classmethod
@quickcache(['cls.__name__', 'email_domain'])
def get_active_identity_provider_by_email_domain(cls, email_domain):
"""
Returns the active Identity Provider associated with a given email
domain or None.
:param email_domain: (string)
:return: IdentityProvider or None
"""
try:
authenticated_email_domain = AuthenticatedEmailDomain.objects.get(
email_domain=email_domain
)
idp = authenticated_email_domain.identity_provider
except AuthenticatedEmailDomain.DoesNotExist:
return None
return idp if idp.is_active else None
@classmethod
def get_active_identity_provider_by_username(cls, username):
"""
Returns the active Identity Provider associated with a user's email
domain or None.
:param username: (string)
:return: IdentityProvider or None
"""
email_domain = get_email_domain_from_username(username)
if not email_domain:
# malformed username/email
return None
return cls.get_active_identity_provider_by_email_domain(email_domain)
@classmethod
def does_domain_trust_user(cls, domain, username):
"""
Check to see if the given domain trusts the user's IdentityProvider
(if applicable) based on their email domain. If the user has no
IdentityProvider, it will also return True.
:param domain: (String) name of the domain
:param username: (String) username of the user
:return: Boolean (True if an IdP trust exists or is not applicable)
"""
idp = cls.get_active_identity_provider_by_username(username)
if idp is None:
return True
return idp.does_domain_trust_this_idp(domain)
@classmethod
def get_required_identity_provider(cls, username):
"""
Gets the Identity Provider for the given username only if that
user is required to login or sign up with that Identity Provider.
:param username: String
:return: IdentityProvider or None
"""
idp = cls.get_active_identity_provider_by_username(username)
if idp and not UserExemptFromSingleSignOn.objects.filter(
username=username
).exists():
return idp
return None
@receiver(post_save, sender=Subscription)
@receiver(post_delete, sender=Subscription)
def clear_caches_when_subscription_status_changes(sender, instance, **kwargs):
"""
Catches the post-save and post-delete signals of Subscription to ensure
that if the Subscription status for a domain changes, that the
domain-related caches for IdentityProvider are all cleared.
:param sender: The sender class (in this case Subscription)
:param instance: Subscription - the instance being saved/deleted
:param kwargs:
"""
for identity_provider in IdentityProvider.objects.filter(owner=instance.account):
identity_provider.clear_domain_caches(instance.subscriber.domain)
class AuthenticatedEmailDomain(models.Model):
"""
This specifies the email domains that are tied to an Identity Provider and
a list of users that would be exempt from SSO.
"""
email_domain = models.CharField(max_length=256, db_index=True, unique=True)
identity_provider = models.ForeignKey(IdentityProvider, on_delete=models.PROTECT)
class Meta:
app_label = 'sso'
def __str__(self):
return f"{self.email_domain} authenticated by [{self.identity_provider.name}]"
@receiver(post_save, sender=AuthenticatedEmailDomain)
@receiver(post_delete, sender=AuthenticatedEmailDomain)
def clear_caches_for_email_domain(sender, instance, **kwargs):
"""
Catches the post-save and post-delete signals of AuthenticatedEmailDomain
to ensure that we immediately clear the related email-domain quickcaches
for IdentityProvider.
:param sender: The sender class (in this case AuthenticatedEmailDomain)
:param instance: AuthenticatedEmailDomain - the instance being saved/deleted
:param kwargs:
"""
IdentityProvider.clear_email_domain_caches(instance.email_domain)
class UserExemptFromSingleSignOn(models.Model):
"""
This specifies what users are exempt from SSO for a given
AuthenticatedEmailDomain. Other users will be required to use SSO once
an AuthenticatedEmailDomain is specified for their email domain.
"""
username = models.CharField(max_length=128, db_index=True)
email_domain = models.ForeignKey(AuthenticatedEmailDomain, on_delete=models.CASCADE)
class Meta:
app_label = 'sso'
def __str__(self):
return f"{self.username} is exempt from SSO with {self.email_domain}"
class TrustedIdentityProvider(models.Model):
"""
This specifies the trust between domains (who are not associated with the
IdP's BillingAccount owner) and an IdentityProvider
"""
domain = models.CharField(max_length=256, db_index=True)
identity_provider = models.ForeignKey(IdentityProvider, on_delete=models.PROTECT)
date_acknowledged = models.DateTimeField(auto_now_add=True)
acknowledged_by = models.EmailField()
class Meta:
app_label = 'sso'
def __str__(self):
return f"{self.domain} trusts [{self.identity_provider.name}]"
@receiver(post_save, sender=TrustedIdentityProvider)
@receiver(post_delete, sender=TrustedIdentityProvider)
def clear_caches_when_trust_is_established_or_removed(sender, instance, **kwargs):
"""
Catches the post-save and post-delete signals of TrustedIdentityProvider
to ensure that we immediately clear the related domain quickcaches
for IdentityProvider.
:param sender: The sender class (in this case AuthenticatedEmailDomain)
:param instance: TrustedIdentityProvider - the instance being saved/deleted
:param kwargs:
"""
instance.identity_provider.clear_domain_caches(instance.domain)
|
|
"""Module to help with parsing and generating configuration files."""
import asyncio
from collections import OrderedDict
import logging
import os
import re
import shutil
import sys
# pylint: disable=unused-import
from typing import Any, List, Tuple # NOQA
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, CONF_PACKAGES, CONF_UNIT_SYSTEM,
CONF_TIME_ZONE, CONF_ELEVATION, CONF_UNIT_SYSTEM_METRIC,
CONF_UNIT_SYSTEM_IMPERIAL, CONF_TEMPERATURE_UNIT, TEMP_CELSIUS,
__version__, CONF_CUSTOMIZE, CONF_CUSTOMIZE_DOMAIN, CONF_CUSTOMIZE_GLOB)
from homeassistant.core import callback, DOMAIN as CONF_CORE
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import get_component, get_platform
from homeassistant.util.yaml import load_yaml
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as date_util, location as loc_util
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.helpers import config_per_platform, extract_domain_configs
_LOGGER = logging.getLogger(__name__)
DATA_PERSISTENT_ERRORS = 'bootstrap_persistent_errors'
HA_COMPONENT_URL = '[{}](https://home-assistant.io/components/{}/)'
YAML_CONFIG_FILE = 'configuration.yaml'
VERSION_FILE = '.HA_VERSION'
CONFIG_DIR_NAME = '.homeassistant'
DATA_CUSTOMIZE = 'hass_customize'
DEFAULT_CORE_CONFIG = (
# Tuples (attribute, default, auto detect property, description)
(CONF_NAME, 'Home', None, 'Name of the location where Home Assistant is '
'running'),
(CONF_LATITUDE, 0, 'latitude', 'Location required to calculate the time'
' the sun rises and sets'),
(CONF_LONGITUDE, 0, 'longitude', None),
(CONF_ELEVATION, 0, None, 'Impacts weather/sunrise data'
' (altitude above sea level in meters)'),
(CONF_UNIT_SYSTEM, CONF_UNIT_SYSTEM_METRIC, None,
'{} for Metric, {} for Imperial'.format(CONF_UNIT_SYSTEM_METRIC,
CONF_UNIT_SYSTEM_IMPERIAL)),
(CONF_TIME_ZONE, 'UTC', 'time_zone', 'Pick yours from here: http://en.wiki'
'pedia.org/wiki/List_of_tz_database_time_zones'),
) # type: Tuple[Tuple[str, Any, Any, str], ...]
DEFAULT_CONFIG = """
# Show links to resources in log and frontend
introduction:
# Enables the frontend
frontend:
# Enables configuration UI
config:
http:
# Uncomment this to add a password (recommended!)
# api_password: PASSWORD
# Uncomment this if you are using SSL or running in Docker etc
# base_url: example.duckdns.org:8123
# Checks for available updates
# Note: This component will send some information about your system to
# the developers to assist with development of Home Assistant.
# For more information, please see:
# https://home-assistant.io/blog/2016/10/25/explaining-the-updater/
updater:
# Discover some devices automatically
discovery:
# Allows you to issue voice commands from the frontend in enabled browsers
conversation:
# Enables support for tracking state changes over time.
history:
# View all events in a logbook
logbook:
# Track the sun
sun:
# Weather Prediction
sensor:
platform: yr
# Text to speech
tts:
platform: google
group: !include groups.yaml
"""
PACKAGES_CONFIG_SCHEMA = vol.Schema({
cv.slug: vol.Schema( # Package names are slugs
{cv.slug: vol.Any(dict, list)}) # Only slugs for component names
})
CUSTOMIZE_CONFIG_SCHEMA = vol.Schema({
vol.Optional(CONF_CUSTOMIZE, default={}):
vol.Schema({cv.entity_id: dict}),
vol.Optional(CONF_CUSTOMIZE_DOMAIN, default={}):
vol.Schema({cv.string: dict}),
vol.Optional(CONF_CUSTOMIZE_GLOB, default={}):
vol.Schema({cv.string: OrderedDict}),
})
CORE_CONFIG_SCHEMA = CUSTOMIZE_CONFIG_SCHEMA.extend({
CONF_NAME: vol.Coerce(str),
CONF_LATITUDE: cv.latitude,
CONF_LONGITUDE: cv.longitude,
CONF_ELEVATION: vol.Coerce(int),
vol.Optional(CONF_TEMPERATURE_UNIT): cv.temperature_unit,
CONF_UNIT_SYSTEM: cv.unit_system,
CONF_TIME_ZONE: cv.time_zone,
vol.Optional(CONF_PACKAGES, default={}): PACKAGES_CONFIG_SCHEMA,
})
def get_default_config_dir() -> str:
"""Put together the default configuration directory based on OS."""
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
return os.path.join(data_dir, CONFIG_DIR_NAME)
def ensure_config_exists(config_dir: str, detect_location: bool=True) -> str:
"""Ensure a config file exists in given configuration directory.
Creating a default one if needed.
Return path to the config file.
"""
config_path = find_config_file(config_dir)
if config_path is None:
print("Unable to find configuration. Creating default one in",
config_dir)
config_path = create_default_config(config_dir, detect_location)
return config_path
def create_default_config(config_dir, detect_location=True):
"""Create a default configuration file in given configuration directory.
Return path to new config file if success, None if failed.
This method needs to run in an executor.
"""
from homeassistant.components.config.group import (
CONFIG_PATH as GROUP_CONFIG_PATH)
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
version_path = os.path.join(config_dir, VERSION_FILE)
group_yaml_path = os.path.join(config_dir, GROUP_CONFIG_PATH)
info = {attr: default for attr, default, _, _ in DEFAULT_CORE_CONFIG}
location_info = detect_location and loc_util.detect_location_info()
if location_info:
if location_info.use_metric:
info[CONF_UNIT_SYSTEM] = CONF_UNIT_SYSTEM_METRIC
else:
info[CONF_UNIT_SYSTEM] = CONF_UNIT_SYSTEM_IMPERIAL
for attr, default, prop, _ in DEFAULT_CORE_CONFIG:
if prop is None:
continue
info[attr] = getattr(location_info, prop) or default
if location_info.latitude and location_info.longitude:
info[CONF_ELEVATION] = loc_util.elevation(location_info.latitude,
location_info.longitude)
# Writing files with YAML does not create the most human readable results
# So we're hard coding a YAML template.
try:
with open(config_path, 'w') as config_file:
config_file.write("homeassistant:\n")
for attr, _, _, description in DEFAULT_CORE_CONFIG:
if info[attr] is None:
continue
elif description:
config_file.write(" # {}\n".format(description))
config_file.write(" {}: {}\n".format(attr, info[attr]))
config_file.write(DEFAULT_CONFIG)
with open(version_path, 'wt') as version_file:
version_file.write(__version__)
with open(group_yaml_path, 'w'):
pass
return config_path
except IOError:
print('Unable to create default configuration file', config_path)
return None
@asyncio.coroutine
def async_hass_config_yaml(hass):
"""Load YAML from hass config File.
This function allow component inside asyncio loop to reload his config by
self.
This method is a coroutine.
"""
def _load_hass_yaml_config():
path = find_config_file(hass.config.config_dir)
conf = load_yaml_config_file(path)
return conf
conf = yield from hass.loop.run_in_executor(None, _load_hass_yaml_config)
return conf
def find_config_file(config_dir):
"""Look in given directory for supported configuration files.
Async friendly.
"""
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
return config_path if os.path.isfile(config_path) else None
def load_yaml_config_file(config_path):
"""Parse a YAML configuration file.
This method needs to run in an executor.
"""
try:
conf_dict = load_yaml(config_path)
except FileNotFoundError as err:
raise HomeAssistantError("Config file not found: {}".format(
getattr(err, 'filename', err)))
if not isinstance(conf_dict, dict):
msg = 'The configuration file {} does not contain a dictionary'.format(
os.path.basename(config_path))
_LOGGER.error(msg)
raise HomeAssistantError(msg)
return conf_dict
def process_ha_config_upgrade(hass):
"""Upgrade config if necessary.
This method needs to run in an executor.
"""
version_path = hass.config.path(VERSION_FILE)
try:
with open(version_path, 'rt') as inp:
conf_version = inp.readline().strip()
except FileNotFoundError:
# Last version to not have this file
conf_version = '0.7.7'
if conf_version == __version__:
return
_LOGGER.info('Upgrading config directory from %s to %s', conf_version,
__version__)
lib_path = hass.config.path('deps')
if os.path.isdir(lib_path):
shutil.rmtree(lib_path)
with open(version_path, 'wt') as outp:
outp.write(__version__)
@callback
def async_log_exception(ex, domain, config, hass):
"""Generate log exception for config validation.
This method must be run in the event loop.
"""
message = 'Invalid config for [{}]: '.format(domain)
if hass is not None:
async_notify_setup_error(hass, domain, True)
if 'extra keys not allowed' in ex.error_message:
message += '[{}] is an invalid option for [{}]. Check: {}->{}.'\
.format(ex.path[-1], domain, domain,
'->'.join(str(m) for m in ex.path))
else:
message += '{}.'.format(humanize_error(config, ex))
domain_config = config.get(domain, config)
message += " (See {}, line {}). ".format(
getattr(domain_config, '__config_file__', '?'),
getattr(domain_config, '__line__', '?'))
if domain != 'homeassistant':
message += ('Please check the docs at '
'https://home-assistant.io/components/{}/'.format(domain))
_LOGGER.error(message)
@asyncio.coroutine
def async_process_ha_core_config(hass, config):
"""Process the [homeassistant] section from the config.
This method is a coroutine.
"""
config = CORE_CONFIG_SCHEMA(config)
hac = hass.config
def set_time_zone(time_zone_str):
"""Help to set the time zone."""
if time_zone_str is None:
return
time_zone = date_util.get_time_zone(time_zone_str)
if time_zone:
hac.time_zone = time_zone
date_util.set_default_time_zone(time_zone)
else:
_LOGGER.error('Received invalid time zone %s', time_zone_str)
for key, attr in ((CONF_LATITUDE, 'latitude'),
(CONF_LONGITUDE, 'longitude'),
(CONF_NAME, 'location_name'),
(CONF_ELEVATION, 'elevation')):
if key in config:
setattr(hac, attr, config[key])
if CONF_TIME_ZONE in config:
set_time_zone(config.get(CONF_TIME_ZONE))
# Customize
cust_exact = dict(config[CONF_CUSTOMIZE])
cust_domain = dict(config[CONF_CUSTOMIZE_DOMAIN])
cust_glob = OrderedDict(config[CONF_CUSTOMIZE_GLOB])
for name, pkg in config[CONF_PACKAGES].items():
pkg_cust = pkg.get(CONF_CORE)
if pkg_cust is None:
continue
try:
pkg_cust = CUSTOMIZE_CONFIG_SCHEMA(pkg_cust)
except vol.Invalid:
_LOGGER.warning('Package %s contains invalid customize', name)
continue
cust_exact.update(pkg_cust[CONF_CUSTOMIZE])
cust_domain.update(pkg_cust[CONF_CUSTOMIZE_DOMAIN])
cust_glob.update(pkg_cust[CONF_CUSTOMIZE_GLOB])
hass.data[DATA_CUSTOMIZE] = \
EntityValues(cust_exact, cust_domain, cust_glob)
if CONF_UNIT_SYSTEM in config:
if config[CONF_UNIT_SYSTEM] == CONF_UNIT_SYSTEM_IMPERIAL:
hac.units = IMPERIAL_SYSTEM
else:
hac.units = METRIC_SYSTEM
elif CONF_TEMPERATURE_UNIT in config:
unit = config[CONF_TEMPERATURE_UNIT]
if unit == TEMP_CELSIUS:
hac.units = METRIC_SYSTEM
else:
hac.units = IMPERIAL_SYSTEM
_LOGGER.warning("Found deprecated temperature unit in core config, "
"expected unit system. Replace '%s: %s' with "
"'%s: %s'", CONF_TEMPERATURE_UNIT, unit,
CONF_UNIT_SYSTEM, hac.units.name)
# Shortcut if no auto-detection necessary
if None not in (hac.latitude, hac.longitude, hac.units,
hac.time_zone, hac.elevation):
return
discovered = []
# If we miss some of the needed values, auto detect them
if None in (hac.latitude, hac.longitude, hac.units,
hac.time_zone):
info = yield from hass.loop.run_in_executor(
None, loc_util.detect_location_info)
if info is None:
_LOGGER.error('Could not detect location information')
return
if hac.latitude is None and hac.longitude is None:
hac.latitude, hac.longitude = (info.latitude, info.longitude)
discovered.append(('latitude', hac.latitude))
discovered.append(('longitude', hac.longitude))
if hac.units is None:
hac.units = METRIC_SYSTEM if info.use_metric else IMPERIAL_SYSTEM
discovered.append((CONF_UNIT_SYSTEM, hac.units.name))
if hac.location_name is None:
hac.location_name = info.city
discovered.append(('name', info.city))
if hac.time_zone is None:
set_time_zone(info.time_zone)
discovered.append(('time_zone', info.time_zone))
if hac.elevation is None and hac.latitude is not None and \
hac.longitude is not None:
elevation = yield from hass.loop.run_in_executor(
None, loc_util.elevation, hac.latitude, hac.longitude)
hac.elevation = elevation
discovered.append(('elevation', elevation))
if discovered:
_LOGGER.warning(
'Incomplete core config. Auto detected %s',
', '.join('{}: {}'.format(key, val) for key, val in discovered))
def _log_pkg_error(package, component, config, message):
"""Log an error while merging."""
message = "Package {} setup failed. Component {} {}".format(
package, component, message)
pack_config = config[CONF_CORE][CONF_PACKAGES].get(package, config)
message += " (See {}:{}). ".format(
getattr(pack_config, '__config_file__', '?'),
getattr(pack_config, '__line__', '?'))
_LOGGER.error(message)
def _identify_config_schema(module):
"""Extract the schema and identify list or dict based."""
try:
schema = module.CONFIG_SCHEMA.schema[module.DOMAIN]
except (AttributeError, KeyError):
return (None, None)
t_schema = str(schema)
if t_schema.startswith('{'):
return ('dict', schema)
if t_schema.startswith(('[', 'All(<function ensure_list')):
return ('list', schema)
return '', schema
def merge_packages_config(config, packages):
"""Merge packages into the top-level config. Mutate config."""
# pylint: disable=too-many-nested-blocks
PACKAGES_CONFIG_SCHEMA(packages)
for pack_name, pack_conf in packages.items():
for comp_name, comp_conf in pack_conf.items():
if comp_name == CONF_CORE:
continue
component = get_component(comp_name)
if component is None:
_log_pkg_error(pack_name, comp_name, config, "does not exist")
continue
if hasattr(component, 'PLATFORM_SCHEMA'):
config[comp_name] = cv.ensure_list(config.get(comp_name))
config[comp_name].extend(cv.ensure_list(comp_conf))
continue
if hasattr(component, 'CONFIG_SCHEMA'):
merge_type, _ = _identify_config_schema(component)
if merge_type == 'list':
config[comp_name] = cv.ensure_list(config.get(comp_name))
config[comp_name].extend(cv.ensure_list(comp_conf))
continue
if merge_type == 'dict':
if not isinstance(comp_conf, dict):
_log_pkg_error(
pack_name, comp_name, config,
"cannot be merged. Expected a dict.")
continue
if comp_name not in config:
config[comp_name] = OrderedDict()
if not isinstance(config[comp_name], dict):
_log_pkg_error(
pack_name, comp_name, config,
"cannot be merged. Dict expected in main config.")
continue
for key, val in comp_conf.items():
if key in config[comp_name]:
_log_pkg_error(pack_name, comp_name, config,
"duplicate key '{}'".format(key))
continue
config[comp_name][key] = val
continue
# The last merge type are sections that may occur only once
if comp_name in config:
_log_pkg_error(
pack_name, comp_name, config, "may occur only once"
" and it already exist in your main config")
continue
config[comp_name] = comp_conf
return config
@callback
def async_process_component_config(hass, config, domain):
"""Check component config and return processed config.
Raise a vol.Invalid exception on error.
This method must be run in the event loop.
"""
component = get_component(domain)
if hasattr(component, 'CONFIG_SCHEMA'):
try:
config = component.CONFIG_SCHEMA(config)
except vol.Invalid as ex:
async_log_exception(ex, domain, config, hass)
return None
elif hasattr(component, 'PLATFORM_SCHEMA'):
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
p_validated = component.PLATFORM_SCHEMA(p_config)
except vol.Invalid as ex:
async_log_exception(ex, domain, config, hass)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
platform = get_platform(domain, p_name)
if platform is None:
continue
# Validate platform specific schema
if hasattr(platform, 'PLATFORM_SCHEMA'):
# pylint: disable=no-member
try:
p_validated = platform.PLATFORM_SCHEMA(p_validated)
except vol.Invalid as ex:
async_log_exception(ex, '{}.{}'.format(domain, p_name),
p_validated, hass)
continue
platforms.append(p_validated)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
filter_keys = extract_domain_configs(config, domain)
config = {key: value for key, value in config.items()
if key not in filter_keys}
config[domain] = platforms
return config
@asyncio.coroutine
def async_check_ha_config_file(hass):
"""Check if HA config file valid.
This method is a coroutine.
"""
proc = yield from asyncio.create_subprocess_exec(
sys.executable, '-m', 'homeassistant', '--script',
'check_config', '--config', hass.config.config_dir,
stdout=asyncio.subprocess.PIPE, loop=hass.loop)
# Wait for the subprocess exit
stdout_data, dummy = yield from proc.communicate()
result = yield from proc.wait()
if not result:
return None
return re.sub(r'\033\[[^m]*m', '', str(stdout_data, 'utf-8'))
@callback
def async_notify_setup_error(hass, component, link=False):
"""Print a persistent notification.
This method must be run in the event loop.
"""
from homeassistant.components import persistent_notification
errors = hass.data.get(DATA_PERSISTENT_ERRORS)
if errors is None:
errors = hass.data[DATA_PERSISTENT_ERRORS] = {}
errors[component] = errors.get(component) or link
_lst = [HA_COMPONENT_URL.format(name.replace('_', '-'), name)
if link else name for name, link in errors.items()]
message = ('The following components and platforms could not be set up:\n'
'* ' + '\n* '.join(list(_lst)) + '\nPlease check your config')
persistent_notification.async_create(
hass, message, 'Invalid config', 'invalid_config')
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cinderclient.v1.contrib import list_extensions as cinder_list_extensions
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
# API static values
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
VERSIONS = base.APIVersionManager("volume", preferred_version=1)
try:
from cinderclient.v1 import client as cinder_client_v1
VERSIONS.load_supported_version(1, {"client": cinder_client_v1,
"version": 1})
except ImportError:
pass
try:
from cinderclient.v2 import client as cinder_client_v2
VERSIONS.load_supported_version(2, {"client": cinder_client_v2,
"version": 2})
except ImportError:
pass
class BaseCinderAPIResourceWrapper(base.APIResourceWrapper):
@property
def name(self):
# If a volume doesn't have a name, use its id.
return (getattr(self._apiresource, 'name', None) or
getattr(self._apiresource, 'display_name', None) or
getattr(self._apiresource, 'id', None))
@property
def description(self):
return (getattr(self._apiresource, 'description', None) or
getattr(self._apiresource, 'display_description', None))
class Volume(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status', 'created_at',
'volume_type', 'availability_zone', 'imageRef', 'bootable',
'snapshot_id', 'source_volid', 'attachments', 'tenant_name',
'os-vol-host-attr:host', 'os-vol-tenant-attr:tenant_id',
'metadata', 'volume_image_metadata', 'encrypted']
@property
def is_bootable(self):
return self.bootable == 'true'
class VolumeSnapshot(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status',
'created_at', 'volume_id',
'os-extended-snapshot-attributes:project_id']
class VolumeBackup(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'container', 'size', 'status',
'created_at', 'volume_id', 'availability_zone']
_volume = None
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
class VolTypeExtraSpec(object):
def __init__(self, type_id, key, val):
self.type_id = type_id
self.id = key
self.key = key
self.value = val
def cinderclient(request):
api_version = VERSIONS.get_active_version()
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
cinder_url = ""
try:
# The cinder client assumes that the v2 endpoint type will be
# 'volumev2'. However it also allows 'volume' type as a
# fallback if the requested version is 2 and there is no
# 'volumev2' endpoint.
if api_version['version'] == 2:
try:
cinder_url = base.url_for(request, 'volumev2')
except exceptions.ServiceCatalogException:
LOG.warning("Cinder v2 requested but no 'volumev2' service "
"type available in Keystone catalog. Falling back "
"to 'volume'.")
if cinder_url == "":
cinder_url = base.url_for(request, 'volume')
except exceptions.ServiceCatalogException:
LOG.debug('no volume service configured.')
raise
LOG.debug('cinderclient connection created using token "%s" and url "%s"' %
(request.user.token.id, cinder_url))
c = api_version['client'].Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=cinder_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = cinder_url
return c
def _replace_v2_parameters(data):
if VERSIONS.active < 2:
data['display_name'] = data['name']
data['display_description'] = data['description']
del data['name']
del data['description']
return data
def volume_list(request, search_opts=None):
"""To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
if c_client is None:
return []
return [Volume(v) for v in c_client.volumes.list(search_opts=search_opts)]
def volume_get(request, volume_id):
volume_data = cinderclient(request).volumes.get(volume_id)
for attachment in volume_data.attachments:
if "server_id" in attachment:
instance = nova.server_get(request, attachment['server_id'])
attachment['instance_name'] = instance.name
else:
# Nova volume can occasionally send back error'd attachments
# the lack a server_id property; to work around that we'll
# give the attached instance a generic name.
attachment['instance_name'] = _("Unknown instance")
return Volume(volume_data)
def volume_create(request, size, name, description, volume_type,
snapshot_id=None, metadata=None, image_id=None,
availability_zone=None, source_volid=None):
data = {'name': name,
'description': description,
'volume_type': volume_type,
'snapshot_id': snapshot_id,
'metadata': metadata,
'imageRef': image_id,
'availability_zone': availability_zone,
'source_volid': source_volid}
data = _replace_v2_parameters(data)
volume = cinderclient(request).volumes.create(size, **data)
return Volume(volume)
def volume_extend(request, volume_id, new_size):
return cinderclient(request).volumes.extend(volume_id, new_size)
def volume_delete(request, volume_id):
return cinderclient(request).volumes.delete(volume_id)
def volume_update(request, volume_id, name, description):
vol_data = {'name': name,
'description': description}
vol_data = _replace_v2_parameters(vol_data)
return cinderclient(request).volumes.update(volume_id,
**vol_data)
def volume_snapshot_get(request, snapshot_id):
snapshot = cinderclient(request).volume_snapshots.get(snapshot_id)
return VolumeSnapshot(snapshot)
def volume_snapshot_list(request):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeSnapshot(s) for s in c_client.volume_snapshots.list()]
def volume_snapshot_create(request, volume_id, name,
description=None, force=False):
data = {'name': name,
'description': description,
'force': force}
data = _replace_v2_parameters(data)
return VolumeSnapshot(cinderclient(request).volume_snapshots.create(
volume_id, **data))
def volume_snapshot_delete(request, snapshot_id):
return cinderclient(request).volume_snapshots.delete(snapshot_id)
def volume_snapshot_update(request, snapshot_id, name, description):
snapshot_data = {'name': name,
'description': description}
snapshot_data = _replace_v2_parameters(snapshot_data)
return cinderclient(request).volume_snapshots.update(snapshot_id,
**snapshot_data)
@memoized
def volume_backup_supported(request):
"""This method will determine if cinder supports backup.
"""
# TODO(lcheng) Cinder does not expose the information if cinder
# backup is configured yet. This is a workaround until that
# capability is available.
# https://bugs.launchpad.net/cinder/+bug/1334856
cinder_config = getattr(settings, 'OPENSTACK_CINDER_FEATURES', {})
return cinder_config.get('enable_backup', False)
def volume_backup_get(request, backup_id):
backup = cinderclient(request).backups.get(backup_id)
return VolumeBackup(backup)
def volume_backup_list(request):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeBackup(b) for b in c_client.backups.list()]
def volume_backup_create(request,
volume_id,
container_name,
name,
description):
backup = cinderclient(request).backups.create(
volume_id,
container=container_name,
name=name,
description=description)
return VolumeBackup(backup)
def volume_backup_delete(request, backup_id):
return cinderclient(request).backups.delete(backup_id)
def volume_backup_restore(request, backup_id, volume_id):
return cinderclient(request).restores.restore(backup_id=backup_id,
volume_id=volume_id)
def tenant_quota_get(request, tenant_id):
c_client = cinderclient(request)
if c_client is None:
return base.QuotaSet()
return base.QuotaSet(c_client.quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
return cinderclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(cinderclient(request).quotas.defaults(tenant_id))
def volume_type_list(request):
return cinderclient(request).volume_types.list()
def volume_type_create(request, name):
return cinderclient(request).volume_types.create(name)
def volume_type_delete(request, volume_type_id):
return cinderclient(request).volume_types.delete(volume_type_id)
def volume_type_get(request, volume_type_id):
return cinderclient(request).volume_types.get(volume_type_id)
def volume_type_extra_get(request, type_id, raw=False):
vol_type = volume_type_get(request, type_id)
extras = vol_type.get_keys()
if raw:
return extras
return [VolTypeExtraSpec(type_id, key, value) for
key, value in extras.items()]
def volume_type_extra_set(request, type_id, metadata):
vol_type = volume_type_get(request, type_id)
if not metadata:
return None
return vol_type.set_keys(metadata)
def volume_type_extra_delete(request, type_id, keys):
vol_type = volume_type_get(request, type_id)
return vol_type.unset_keys([keys])
def tenant_absolute_limits(request):
limits = cinderclient(request).limits.get().absolute
limits_dict = {}
for limit in limits:
# -1 is used to represent unlimited quotas
if limit.value == -1:
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def service_list(request):
return cinderclient(request).services.list()
def availability_zone_list(request, detailed=False):
return cinderclient(request).availability_zones.list(detailed=detailed)
@memoized
def list_extensions(request):
return cinder_list_extensions.ListExtManager(cinderclient(request))\
.show_all()
@memoized
def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name.
"""
extensions = list_extensions(request)
for extension in extensions:
if extension.name == extension_name:
return True
return False
|
|
"""
sentry.interfaces.http
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('Http',)
from django.conf import settings
from django.utils.translation import ugettext as _
from urllib import urlencode
from urlparse import parse_qsl, urlsplit, urlunsplit
from sentry.constants import HTTP_METHODS
from sentry.interfaces.base import Interface, InterfaceValidationError
from sentry.utils import json
from sentry.utils.safe import trim, trim_dict, trim_pairs
from sentry.web.helpers import render_to_string
def format_headers(value):
if not value:
return ()
if isinstance(value, dict):
value = value.items()
result = []
cookie_header = None
for k, v in value:
if k.lower() == 'cookie':
cookie_header = v
else:
result.append((k.title(), v))
return result, cookie_header
def format_cookies(value):
if not value:
return ()
if isinstance(value, basestring):
value = parse_qsl(value, keep_blank_values=True)
if isinstance(value, dict):
value = value.items()
return [
(k.encode('utf8').strip(), v)
for k, v in value
]
class Http(Interface):
"""
The Request information is stored in the Http interface. Two arguments
are required: ``url`` and ``method``.
The ``env`` variable is a compounded dictionary of HTTP headers as well
as environment information passed from the webserver. Sentry will explicitly
look for ``REMOTE_ADDR`` in ``env`` for things which require an IP address.
The ``data`` variable should only contain the request body (not the query
string). It can either be a dictionary (for standard HTTP requests) or a
raw request body.
>>> {
>>> "url": "http://absolute.uri/foo",
>>> "method": "POST",
>>> "data": "foo=bar",
>>> "query_string": "hello=world",
>>> "cookies": "foo=bar",
>>> "headers": [
>>> ["Content-Type", "text/html"]
>>> ],
>>> "env": {
>>> "REMOTE_ADDR": "192.168.0.1"
>>> }
>>> }
.. note:: This interface can be passed as the 'request' key in addition
to the full interface path.
"""
display_score = 1000
score = 800
FORM_TYPE = 'application/x-www-form-urlencoded'
@classmethod
def to_python(cls, data):
if not data.get('url'):
raise InterfaceValidationError("No value for 'url'")
kwargs = {}
if data.get('method'):
method = data['method'].upper()
if method not in HTTP_METHODS:
raise InterfaceValidationError("Invalid value for 'method'")
kwargs['method'] = method
else:
kwargs['method'] = None
scheme, netloc, path, query_bit, fragment_bit = urlsplit(data['url'])
query_string = data.get('query_string') or query_bit
if query_string:
# if querystring was a dict, convert it to a string
if isinstance(query_string, dict):
query_string = urlencode(query_string.items())
else:
query_string = query_string
if query_string[0] == '?':
# remove '?' prefix
query_string = query_string[1:]
kwargs['query_string'] = trim(query_string, 4096)
else:
kwargs['query_string'] = ''
fragment = data.get('fragment') or fragment_bit
cookies = data.get('cookies')
# if cookies were [also] included in headers we
# strip them out
headers = data.get('headers')
if headers:
headers, cookie_header = format_headers(headers)
if not cookies and cookie_header:
cookies = cookie_header
else:
headers = ()
body = data.get('data')
if isinstance(body, dict):
body = json.dumps(body)
if body:
body = trim(body, settings.SENTRY_MAX_HTTP_BODY_SIZE)
kwargs['cookies'] = trim_pairs(format_cookies(cookies))
kwargs['env'] = trim_dict(data.get('env') or {})
kwargs['headers'] = trim_pairs(headers)
kwargs['data'] = body
kwargs['url'] = urlunsplit((scheme, netloc, path, '', ''))
kwargs['fragment'] = trim(fragment, 1024)
return cls(**kwargs)
def get_path(self):
return 'sentry.interfaces.Http'
@property
def full_url(self):
url = self.url
if self.query_string:
url = url + '?' + self.query_string
if self.fragment:
url = url + '#' + self.fragment
return url
def to_email_html(self, event, **kwargs):
return render_to_string('sentry/partial/interfaces/http_email.html', {
'event': event,
'url': self.full_url,
'short_url': self.url,
'method': self.method,
'query_string': self.query_string,
})
def get_alias(self):
return 'request'
def get_title(self):
return _('Request')
def get_api_context(self, is_public=False):
data = self.data
if isinstance(data, dict):
data = json.dumps(data)
cookies = self.cookies or ()
if isinstance(cookies, dict):
cookies = sorted(self.cookies.items())
headers = self.headers or ()
if isinstance(headers, dict):
headers = sorted(self.headers.items())
data = {
'method': self.method,
'url': self.url,
'query': self.query_string,
'fragment': self.fragment,
'data': data,
'headers': headers,
}
if not is_public:
data.update({
'cookies': cookies,
'env': self.env or None,
})
return data
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Recorder subscribes to ROS messages and writes them to a bag file.
"""
from __future__ import print_function
try:
from queue import Queue
except ImportError:
from Queue import Queue
import re
import threading
import time
import rosbag
import rosgraph
import roslib
import rospy
import sys
class Recorder(object):
def __init__(self, filename, bag_lock=None, all=True, topics=[], regex=False, limit=0, master_check_interval=1.0):
"""
Subscribe to ROS messages and record them to a bag file.
@param filename: filename of bag to write to
@type filename: str
@param all: all topics are to be recorded [default: True]
@type all: bool
@param topics: topics (or regexes if regex is True) to record [default: empty list]
@type topics: list of str
@param regex: topics should be considered as regular expressions [default: False]
@type regex: bool
@param limit: record only this number of messages on each topic (if non-positive, then unlimited) [default: 0]
@type limit: int
@param master_check_interval: period (in seconds) to check master for new topic publications [default: 1]
@type master_check_interval: float
"""
self._all = all
self._topics = topics
self._regex = regex
self._limit = limit
self._master_check_interval = master_check_interval
self._bag = rosbag.Bag(filename, 'w')
self._bag_lock = bag_lock if bag_lock else threading.Lock()
self._listeners = []
self._subscriber_helpers = {}
self._limited_topics = set()
self._failed_topics = set()
self._last_update = time.time()
self._write_queue = Queue()
self._paused = False
self._stop_condition = threading.Condition()
self._stop_flag = False
# Compile regular expressions
if self._regex:
self._regexes = [re.compile(t) for t in self._topics]
else:
self._regexes = None
self._message_count = {} # topic -> int (track number of messages recorded on each topic)
self._master_check_thread = threading.Thread(target=self._run_master_check)
self._write_thread = threading.Thread(target=self._run_write)
@property
def bag(self):
return self._bag
def add_listener(self, listener):
"""
Add a listener which gets called whenever a message is recorded.
@param listener: function to call
@type listener: function taking (topic, message, time)
"""
self._listeners.append(listener)
def start(self):
"""
Start subscribing and recording messages to bag.
"""
self._master_check_thread.start()
self._write_thread.start()
@property
def paused(self):
return self._paused
def pause(self):
self._paused = True
def unpause(self):
self._paused = False
def toggle_paused(self):
self._paused = not self._paused
def stop(self):
"""
Stop recording.
"""
with self._stop_condition:
self._stop_flag = True
self._stop_condition.notify_all()
self._write_queue.put(self)
# Implementation
def _run_master_check(self):
master = rosgraph.Master('rqt_bag_recorder')
try:
while not self._stop_flag:
# Check for new topics
for topic, datatype in master.getPublishedTopics(''):
# Check if:
# the topic is already subscribed to, or
# we've failed to subscribe to it already, or
# we've already reached the message limit, or
# we don't want to subscribe
if topic in self._subscriber_helpers or topic in self._failed_topics or topic in self._limited_topics or not self._should_subscribe_to(
topic):
continue
try:
pytype = roslib.message.get_message_class(datatype)
self._message_count[topic] = 0
self._subscriber_helpers[topic] = _SubscriberHelper(self, topic, pytype)
except Exception as ex:
print('Error subscribing to %s (ignoring): %s' % (topic, str(ex)), file=sys.stderr)
self._failed_topics.add(topic)
# Wait a while
self._stop_condition.acquire()
self._stop_condition.wait(self._master_check_interval)
except Exception as ex:
print('Error recording to bag: %s' % str(ex), file=sys.stderr)
# Unsubscribe from all topics
for topic in list(self._subscriber_helpers.keys()):
self._unsubscribe(topic)
# Close the bag file so that the index gets written
try:
self._bag.close()
except Exception as ex:
print('Error closing bag [%s]: %s' % (self._bag.filename, str(ex)))
def _should_subscribe_to(self, topic):
if self._all:
return True
if not self._regex:
return topic in self._topics
for regex in self._regexes:
if regex.match(topic):
return True
return False
def _unsubscribe(self, topic):
try:
self._subscriber_helpers[topic].subscriber.unregister()
except Exception:
return
del self._subscriber_helpers[topic]
def _record(self, topic, m):
if self._paused:
return
if self._limit and self._message_count[topic] >= self._limit:
self._limited_topics.add(topic)
self._unsubscribe(topic)
return
self._write_queue.put((topic, m, rospy.get_rostime()))
self._message_count[topic] += 1
def _run_write(self):
try:
while not self._stop_flag:
# Wait for a message
item = self._write_queue.get()
if item == self:
continue
topic, m, t = item
# Write to the bag
with self._bag_lock:
self._bag.write(topic, m, t)
# Notify listeners that a message has been recorded
for listener in self._listeners:
listener(topic, m, t)
except Exception as ex:
print('Error write to bag: %s' % str(ex), file=sys.stderr)
class _SubscriberHelper(object):
def __init__(self, recorder, topic, pytype):
print('init _SubscriberHelper ' + topic)
self.recorder = recorder
self.topic = topic
self.subscriber = rospy.Subscriber(self.topic, pytype, self.callback)
def callback(self, m):
self.recorder._record(self.topic, m)
|
|
#
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
from io import StringIO
from antlr4.Token import Token
from antlr4.CommonTokenStream import CommonTokenStream
class TokenStreamRewriter(object):
DEFAULT_PROGRAM_NAME = "default"
PROGRAM_INIT_SIZE = 100
MIN_TOKEN_INDEX = 0
def __init__(self, tokens):
"""
:type tokens: antlr4.BufferedTokenStream.BufferedTokenStream
:param tokens:
:return:
"""
super(TokenStreamRewriter, self).__init__()
self.tokens = tokens
self.programs = {self.DEFAULT_PROGRAM_NAME: []}
self.lastRewriteTokenIndexes = {}
def getTokenStream(self):
return self.tokens
def rollback(self, instruction_index, program_name):
ins = self.programs.get(program_name, None)
if ins:
self.programs[program_name] = ins[self.MIN_TOKEN_INDEX: instruction_index]
def deleteProgram(self, program_name=DEFAULT_PROGRAM_NAME):
self.rollback(self.MIN_TOKEN_INDEX, program_name)
def insertAfterToken(self, token, text, program_name=DEFAULT_PROGRAM_NAME):
self.insertAfter(token.tokenIndex, text, program_name)
def insertAfter(self, index, text, program_name=DEFAULT_PROGRAM_NAME):
self.insertBefore(program_name, index + 1, text)
def insertBeforeIndex(self, index, text):
self.insertBefore(self.DEFAULT_PROGRAM_NAME, index, text)
def insertBeforeToken(self, token, text, program_name=DEFAULT_PROGRAM_NAME):
self.insertBefore(program_name, token.tokenIndex, text)
def insertBefore(self, program_name, index, text):
op = self.InsertBeforeOp(self.tokens, index, text)
rewrites = self.getProgram(program_name)
op.instructionIndex = len(rewrites)
rewrites.append(op)
def replaceIndex(self, index, text):
self.replace(self.DEFAULT_PROGRAM_NAME, index, index, text)
def replaceRange(self, from_idx, to_idx, text):
self.replace(self.DEFAULT_PROGRAM_NAME, from_idx, to_idx, text)
def replaceSingleToken(self, token, text):
self.replace(self.DEFAULT_PROGRAM_NAME, token.tokenIndex, token.tokenIndex, text)
def replaceRangeTokens(self, from_token, to_token, text, program_name=DEFAULT_PROGRAM_NAME):
self.replace(program_name, from_token.tokenIndex, to_token.tokenIndex, text)
def replace(self, program_name, from_idx, to_idx, text):
if any((from_idx > to_idx, from_idx < 0, to_idx < 0, to_idx >= len(self.tokens.tokens))):
raise ValueError(
'replace: range invalid: {}..{}(size={})'.format(from_idx, to_idx, len(self.tokens.tokens)))
op = self.ReplaceOp(from_idx, to_idx, self.tokens, text)
rewrites = self.getProgram(program_name)
op.instructionIndex = len(rewrites)
rewrites.append(op)
def deleteToken(self, token):
self.delete(self.DEFAULT_PROGRAM_NAME, token, token)
def deleteIndex(self, index):
self.delete(self.DEFAULT_PROGRAM_NAME, index, index)
def delete(self, program_name, from_idx, to_idx):
if isinstance(from_idx, Token):
self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, None)
self.replace(program_name, from_idx, to_idx, None)
def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME):
return self.lastRewriteTokenIndexes.get(program_name, -1)
def setLastRewriteTokenIndex(self, program_name, i):
self.lastRewriteTokenIndexes[program_name] = i
def getProgram(self, program_name):
return self.programs.setdefault(program_name, [])
def getText(self, program_name, interval):
"""
:type interval: Interval.Interval
:param program_name:
:param interval:
:return:
"""
rewrites = self.programs.get(program_name)
start = interval.start
stop = interval.stop
# ensure start/end are in range
if stop > len(self.tokens.tokens) - 1: stop = len(self.tokens.tokens) - 1
if start < 0: start = 0
# if no instructions to execute
if not rewrites: return self.tokens.getText(interval)
buf = StringIO()
indexToOp = self._reduceToSingleOperationPerIndex(rewrites)
i = start
while all((i <= stop, i < len(self.tokens.tokens))):
op = indexToOp.get(i)
token = self.tokens.get(i)
if op is None:
if token.type != Token.EOF: buf.write(token.text)
i += 1
else:
i = op.execute(buf)
if stop == len(self.tokens.tokens)-1:
for op in indexToOp.values():
if op.index >= len(self.tokens.tokens)-1: buf.write(op.text)
return buf.getvalue()
def _reduceToSingleOperationPerIndex(self, rewrites):
# Walk replaces
for i, rop in enumerate(rewrites):
if any((rop is None, not isinstance(rop, TokenStreamRewriter.ReplaceOp))):
continue
# Wipe prior inserts within range
inserts = [op for op in rewrites[:i] if isinstance(rop, TokenStreamRewriter.InsertBeforeOp)]
for iop in inserts:
if iop.index == rop.index:
rewrites[iop.instructionIndex] = None
rop.text = '{}{}'.format(iop.text, rop.text)
elif all((iop.index > rop.index, iop.index <= rop.last_index)):
rewrites[iop.instructionIndex] = None
# Drop any prior replaces contained within
prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)]
for prevRop in prevReplaces:
if all((prevRop.index >= rop.index, prevRop.last_index <= rop.last_index)):
rewrites[prevRop.instructioIndex] = None
continue
isDisjoint = any((prevRop.last_index<rop.index, prevRop.index>rop))
isSame = all((prevRop.index == rop.index, prevRop.last_index == rop.last_index))
if all((prevRop.text is None, rop.text is None, not isDisjoint)):
rewrites[prevRop.instructioIndex] = None
rop.index = min(prevRop.index, rop.index)
rop.last_index = min(prevRop.last_index, rop.last_index)
print('New rop {}'.format(rop))
elif not all((isDisjoint, isSame)):
raise ValueError("replace op boundaries of {} overlap with previous {}".format(rop, prevRop))
# Walk inserts
for i, iop in enumerate(rewrites):
if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))):
continue
prevInserts = [op for op in rewrites[:i] if isinstance(iop, TokenStreamRewriter.InsertBeforeOp)]
for prevIop in prevInserts:
if prevIop.index == iop.index:
iop.text += prevIop.text
rewrites[i] = None
# look for replaces where iop.index is in range; error
prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)]
for rop in prevReplaces:
if iop.index == rop.index:
rop.text = iop.text + rop.text
rewrites[i] = None
continue
if all((iop.index >= rop.index, iop.index <= rop.index)):
raise ValueError("insert op {} within boundaries of previous {}".format(iop, rop))
reduced = {}
for i, op in enumerate(rewrites):
if op is None: continue
if reduced.get(op.index): raise ValueError('should be only one op per index')
reduced[op.index] = op
return reduced
class RewriteOperation(object):
def __init__(self, tokens, index, text=""):
"""
:type tokens: CommonTokenStream
:param tokens:
:param index:
:param text:
:return:
"""
self.tokens = tokens
self.index = index
self.text = text
self.instructionIndex = 0
def execute(self, buf):
"""
:type buf: StringIO.StringIO
:param buf:
:return:
"""
return self.index
def __str__(self):
pass
class InsertBeforeOp(RewriteOperation):
def __init__(self, tokens, index, text=""):
super(TokenStreamRewriter.InsertBeforeOp, self).__init__(tokens, index, text)
def execute(self, buf):
buf.write(self.text)
if self.tokens.get(self.index).type != Token.EOF:
buf.write(self.tokens.get(self.index).text)
return self.index + 1
class ReplaceOp(RewriteOperation):
def __init__(self, from_idx, to_idx, tokens, text):
super(TokenStreamRewriter.ReplaceOp, self).__init__(tokens, from_idx, text)
self.last_index = to_idx
def execute(self, buf):
if self.text:
buf.write(self.text)
return self.last_index + 1
|
|
# mako/runtime.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides runtime services for templates, including Context,
Namespace, and various helper functions."""
from docassemble.base.mako import exceptions, util, compat
from docassemble.base.mako.compat import compat_builtins
import sys
class Context(object):
"""Provides runtime namespace, output buffer, and various
callstacks for templates.
See :ref:`runtime_toplevel` for detail on the usage of
:class:`.Context`.
"""
def __init__(self, buffer, **data):
self._buffer_stack = [buffer]
self._data = data
self._kwargs = data.copy()
self._with_template = None
self._outputting_as_unicode = None
self.namespaces = {}
# "capture" function which proxies to the
# generic "capture" function
self._data['capture'] = compat.partial(capture, self)
# "caller" stack used by def calls with content
self.caller_stack = self._data['caller'] = CallerStack()
def _set_with_template(self, t):
self._with_template = t
illegal_names = t.reserved_names.intersection(self._data)
if illegal_names:
raise exceptions.NameConflictError(
"Reserved words passed to render(): %s" %
", ".join(illegal_names))
@property
def lookup(self):
"""Return the :class:`.TemplateLookup` associated
with this :class:`.Context`.
"""
return self._with_template.lookup
@property
def kwargs(self):
"""Return the dictionary of top level keyword arguments associated
with this :class:`.Context`.
This dictionary only includes the top-level arguments passed to
:meth:`.Template.render`. It does not include names produced within
the template execution such as local variable names or special names
such as ``self``, ``next``, etc.
The purpose of this dictionary is primarily for the case that
a :class:`.Template` accepts arguments via its ``<%page>`` tag,
which are normally expected to be passed via :meth:`.Template.render`,
except the template is being called in an inheritance context,
using the ``body()`` method. :attr:`.Context.kwargs` can then be
used to propagate these arguments to the inheriting template::
${next.body(**context.kwargs)}
"""
return self._kwargs.copy()
def push_caller(self, caller):
"""Push a ``caller`` callable onto the callstack for
this :class:`.Context`."""
self.caller_stack.append(caller)
def pop_caller(self):
"""Pop a ``caller`` callable onto the callstack for this
:class:`.Context`."""
del self.caller_stack[-1]
def keys(self):
"""Return a list of all names established in this :class:`.Context`."""
return list(self._data.keys())
def __getitem__(self, key):
if key in self._data:
return self._data[key]
else:
return compat_builtins.__dict__[key]
def _push_writer(self):
"""push a capturing buffer onto this Context and return
the new writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
def _push_buffer(self):
"""push a capturing buffer onto this Context."""
self._push_writer()
def _pop_buffer(self):
"""pop the most recent capturing buffer from this Context."""
return self._buffer_stack.pop()
def get(self, key, default=None):
"""Return a value from this :class:`.Context`."""
return self._data.get(key, compat_builtins.__dict__.get(key, default))
def write(self, string):
"""Write a string to this :class:`.Context` object's
underlying output buffer."""
self._buffer_stack[-1].write(string)
def writer(self):
"""Return the current writer function."""
return self._buffer_stack[-1].write
def _copy(self):
c = Context.__new__(Context)
c._buffer_stack = self._buffer_stack
c._data = self._data.copy()
c._kwargs = self._kwargs
c._with_template = self._with_template
c._outputting_as_unicode = self._outputting_as_unicode
c.namespaces = self.namespaces
c.caller_stack = self.caller_stack
return c
def _locals(self, d):
"""Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
"""
if not d:
return self
c = self._copy()
c._data.update(d)
return c
def _clean_inheritance_tokens(self):
"""create a new copy of this :class:`.Context`. with
tokens related to inheritance state removed."""
c = self._copy()
x = c._data
x.pop('self', None)
x.pop('parent', None)
x.pop('next', None)
return c
class CallerStack(list):
def __init__(self):
self.nextcaller = None
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return len(self) and self._get_caller() and True or False
def _get_caller(self):
# this method can be removed once
# codegen MAGIC_NUMBER moves past 7
return self[-1]
def __getattr__(self, key):
return getattr(self._get_caller(), key)
def _push_frame(self):
frame = self.nextcaller or None
self.append(frame)
self.nextcaller = None
return frame
def _pop_frame(self):
self.nextcaller = self.pop()
class Undefined(object):
"""Represents an undefined value in a template.
All template modules have a constant value
``UNDEFINED`` present which is an instance of this
object.
"""
def __str__(self):
raise NameError("Undefined")
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
UNDEFINED = Undefined()
STOP_RENDERING = ""
class LoopStack(object):
"""a stack for LoopContexts that implements the context manager protocol
to automatically pop off the top of the stack on context exit
"""
def __init__(self):
self.stack = []
def _enter(self, iterable):
self._push(iterable)
return self._top
def _exit(self):
self._pop()
return self._top
@property
def _top(self):
if self.stack:
return self.stack[-1]
else:
return self
def _pop(self):
return self.stack.pop()
def _push(self, iterable):
new = LoopContext(iterable)
if self.stack:
new.parent = self.stack[-1]
return self.stack.append(new)
def __getattr__(self, key):
raise exceptions.RuntimeException("No loop context is established")
def __iter__(self):
return iter(self._top)
class LoopContext(object):
"""A magic loop variable.
Automatically accessible in any ``% for`` block.
See the section :ref:`loop_context` for usage
notes.
:attr:`parent` -> :class:`.LoopContext` or ``None``
The parent loop, if one exists.
:attr:`index` -> `int`
The 0-based iteration count.
:attr:`reverse_index` -> `int`
The number of iterations remaining.
:attr:`first` -> `bool`
``True`` on the first iteration, ``False`` otherwise.
:attr:`last` -> `bool`
``True`` on the last iteration, ``False`` otherwise.
:attr:`even` -> `bool`
``True`` when ``index`` is even.
:attr:`odd` -> `bool`
``True`` when ``index`` is odd.
"""
def __init__(self, iterable):
self._iterable = iterable
self.index = 0
self.parent = None
def __iter__(self):
for i in self._iterable:
yield i
self.index += 1
@util.memoized_instancemethod
def __len__(self):
return len(self._iterable)
@property
def reverse_index(self):
return len(self) - self.index - 1
@property
def first(self):
return self.index == 0
@property
def last(self):
return self.index == len(self) - 1
@property
def even(self):
return not self.odd
@property
def odd(self):
return bool(self.index % 2)
def cycle(self, *values):
"""Cycle through values as the loop progresses.
"""
if not values:
raise ValueError("You must provide values to cycle through")
return values[self.index % len(values)]
class _NSAttr(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
ns = self.__parent
while ns:
if hasattr(ns.module, key):
return getattr(ns.module, key)
else:
ns = ns.inherits
raise AttributeError(key)
class Namespace(object):
"""Provides access to collections of rendering methods, which
can be local, from other templates, or from imported modules.
To access a particular rendering method referenced by a
:class:`.Namespace`, use plain attribute access:
.. sourcecode:: mako
${some_namespace.foo(x, y, z)}
:class:`.Namespace` also contains several built-in attributes
described here.
"""
def __init__(self, name, context,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
callables = ()
module = None
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
template = None
"""The :class:`.Template` object referenced by this
:class:`.Namespace`, if any.
"""
context = None
"""The :class:`.Context` object for this :class:`.Namespace`.
Namespaces are often created with copies of contexts that
contain slightly different data, particularly in inheritance
scenarios. Using the :class:`.Context` off of a :class:`.Namespace` one
can traverse an entire chain of templates that inherit from
one-another.
"""
filename = None
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
If this is a pure module-based
:class:`.Namespace`, this evaluates to ``module.__file__``. If a
template-based namespace, it evaluates to the original
template file location.
"""
uri = None
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
_templateuri = None
@util.memoized_property
def attr(self):
"""Access module level attributes by name.
This accessor allows templates to supply "scalar"
attributes which are particularly handy in inheritance
relationships.
.. seealso::
:ref:`inheritance_attr`
:ref:`namespace_attr_for_includes`
"""
return _NSAttr(self)
def get_namespace(self, uri):
"""Return a :class:`.Namespace` corresponding to the given ``uri``.
If the given ``uri`` is a relative URI (i.e. it does not
contain a leading slash ``/``), the ``uri`` is adjusted to
be relative to the ``uri`` of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`.
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaluated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
"""
key = (self, uri)
if key in self.context.namespaces:
return self.context.namespaces[key]
else:
ns = TemplateNamespace(uri, self.context._copy(),
templateuri=uri,
calling_uri=self._templateuri)
self.context.namespaces[key] = ns
return ns
def get_template(self, uri):
"""Return a :class:`.Template` from the given ``uri``.
The ``uri`` resolution is relative to the ``uri`` of this
:class:`.Namespace` object's :class:`.Template`.
"""
return _lookup_template(self.context, uri, self._templateuri)
def get_cached(self, key, **kwargs):
"""Return a value from the :class:`.Cache` referenced by this
:class:`.Namespace` object's :class:`.Template`.
The advantage to this method versus direct access to the
:class:`.Cache` is that the configuration parameters
declared in ``<%page>`` take effect here, thereby calling
up the same configured backend as that configured
by ``<%page>``.
"""
return self.cache.get(key, **kwargs)
@property
def cache(self):
"""Return the :class:`.Cache` object referenced
by this :class:`.Namespace` object's
:class:`.Template`.
"""
return self.template.cache
def include_file(self, uri, **kwargs):
"""Include a file at the given ``uri``."""
_include_file(self.context, uri, self._templateuri, **kwargs)
def _populate(self, d, l):
for ident in l:
if ident == '*':
for (k, v) in self._get_star():
d[k] = v
else:
d[ident] = getattr(self, ident)
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class TemplateNamespace(Namespace):
"""A :class:`.Namespace` specific to a :class:`.Template` instance."""
def __init__(self, name, context, template=None, templateuri=None,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
if templateuri is not None:
self.template = _lookup_template(context, templateuri,
calling_uri)
self._templateuri = self.template.module._template_uri
elif template is not None:
self.template = template
self._templateuri = template.module._template_uri
else:
raise TypeError("'template' argument is required.")
if populate_self:
lclcallable, lclcontext = \
_populate_self_namespace(context, self.template,
self_ns=self)
@property
def module(self):
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
return self.template.module
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.template.filename
@property
def uri(self):
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
return self.template.uri
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def get(key):
callable_ = self.template._get_def_callable(key)
return compat.partial(callable_, self.context)
for k in self.template.module._exports:
yield (k, get(k))
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.template.has_def(key):
callable_ = self.template._get_def_callable(key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class ModuleNamespace(Namespace):
"""A :class:`.Namespace` specific to a Python module instance."""
def __init__(self, name, context, module,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
mod = __import__(module)
for token in module.split('.')[1:]:
mod = getattr(mod, token)
self.module = mod
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.module.__file__
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
for key in dir(self.module):
if key[0] != '_':
callable_ = getattr(self.module, key)
if compat.callable(callable_):
yield key, compat.partial(callable_, self.context)
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif hasattr(self.module, key):
callable_ = getattr(self.module, key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
def supports_caller(func):
"""Apply a caller_stack compatibility decorator to a plain
Python function.
See the example in :ref:`namespaces_python_modules`.
"""
def wrap_stackframe(context, *args, **kwargs):
context.caller_stack._push_frame()
try:
return func(context, *args, **kwargs)
finally:
context.caller_stack._pop_frame()
return wrap_stackframe
def capture(context, callable_, *args, **kwargs):
"""Execute the given template def, capturing the output into
a buffer.
See the example in :ref:`namespaces_python_modules`.
"""
if not compat.callable(callable_):
raise exceptions.RuntimeException(
"capture() function expects a callable as "
"its argument (i.e. capture(func, *args, **kwargs))"
)
context._push_buffer()
try:
callable_(*args, **kwargs)
finally:
buf = context._pop_buffer()
return buf.getvalue()
def _decorate_toplevel(fn):
def decorate_render(render_fn):
def go(context, *args, **kw):
def y(*args, **kw):
return render_fn(context, *args, **kw)
try:
y.__name__ = render_fn.__name__[7:]
except TypeError:
# < Python 2.4
pass
return fn(y)(context, *args, **kw)
return go
return decorate_render
def _decorate_inline(context, fn):
def decorate_render(render_fn):
dec = fn(render_fn)
def go(*args, **kw):
return dec(context, *args, **kw)
return go
return decorate_render
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in
the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(
context._clean_inheritance_tokens(),
template)
callable_(ctx, **_kwargs_for_include(callable_, context._data, **kwargs))
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context['self']
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context._locals({'next': ih})
ih.inherits = TemplateNamespace("self:%s" % template.uri,
lclcontext,
template=template,
populate_self=False)
context._data['parent'] = lclcontext._data['local'] = ih.inherits
callable_ = getattr(template.module, '_mako_inherit', None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, '_mako_generate_namespaces', None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext)
def _lookup_template(context, uri, relativeto):
lookup = context._with_template.lookup
if lookup is None:
raise exceptions.TemplateLookupException(
"Template '%s' has no TemplateLookup associated" %
context._with_template.uri)
uri = lookup.adjust_uri(uri, relativeto)
try:
return lookup.get_template(uri)
except exceptions.TopLevelLookupException:
raise exceptions.TemplateLookupException(str(compat.exception_as()))
def _populate_self_namespace(context, template, self_ns=None):
if self_ns is None:
self_ns = TemplateNamespace('self:%s' % template.uri,
context, template=template,
populate_self=False)
context._data['self'] = context._data['local'] = self_ns
if hasattr(template.module, '_mako_inherit'):
ret = template.module._mako_inherit(template, context)
if ret:
return ret
return (template.callable_, context)
def _render(template, callable_, args, data, as_unicode=False):
"""create a Context and return the string
output of the given template and template callable."""
if as_unicode:
buf = util.FastEncodingBuffer(as_unicode=True)
elif template.bytestring_passthrough:
buf = compat.StringIO()
else:
buf = util.FastEncodingBuffer(
as_unicode=as_unicode,
encoding=template.output_encoding,
errors=template.encoding_errors)
context = Context(buf, **data)
context._outputting_as_unicode = as_unicode
context._set_with_template(template)
_render_context(template, callable_, context, *args,
**_kwargs_for_callable(callable_, data))
return context._pop_buffer().getvalue()
def _kwargs_for_callable(callable_, data):
argspec = compat.inspect_func_args(callable_)
# for normal pages, **pageargs is usually present
if argspec[2]:
return data
# for rendering defs from the top level, figure out the args
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
kwargs = {}
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _kwargs_for_include(callable_, data, **kwargs):
argspec = compat.inspect_func_args(callable_)
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _render_context(tmpl, callable_, context, *args, **kwargs):
import docassemble.base.mako.template as template
# create polymorphic 'self' namespace for this
# template with possibly updated context
if not isinstance(tmpl, template.DefTemplate):
# if main render method, call from the base of the inheritance stack
(inherit, lclcontext) = _populate_self_namespace(context, tmpl)
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
else:
# otherwise, call the actual rendering method specified
(inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
_exec_template(callable_, context, args=args, kwargs=kwargs)
def _exec_template(callable_, context, args=None, kwargs=None):
"""execute a rendering callable given the callable, a
Context, and optional explicit arguments
the contextual Template will be located if it exists, and
the error handling options specified on that Template will
be interpreted here.
"""
template = context._with_template
if template is not None and \
(template.format_exceptions or template.error_handler):
try:
callable_(context, *args, **kwargs)
except Exception:
_render_error(template, context, compat.exception_as())
except:
e = sys.exc_info()[0]
_render_error(template, context, e)
else:
callable_(context, *args, **kwargs)
def _render_error(template, context, error):
if template.error_handler:
result = template.error_handler(context, error)
if not result:
compat.reraise(*sys.exc_info())
else:
error_template = exceptions.html_error_template()
if context._outputting_as_unicode:
context._buffer_stack[:] = [
util.FastEncodingBuffer(as_unicode=True)]
else:
context._buffer_stack[:] = [util.FastEncodingBuffer(
error_template.output_encoding,
error_template.encoding_errors)]
context._set_with_template(error_template)
error_template.render_context(context, error=error)
|
|
import datetime
import webbrowser
from operator import itemgetter
from PySide.QtCore import QRect, QSize, Qt, Signal, Slot
from PySide.QtGui import *
from .Searching import SortingEnum
from ._paths import *
DEFAULT_DATE = '19800101'
MAX_DESCRIPTION_LEN = 400
THUMB_SIZE = 200
class VideoItem(QFrame):
def __init__(self, videoData, parent, thumbPix):
super(VideoItem, self).__init__(parent=parent)
self.videoData = videoData
self.setFrameStyle(QFrame.Panel | QFrame.Sunken)
lm = QHBoxLayout()
ld = QVBoxLayout()
lt = QVBoxLayout()
self.widgetData = QWidget()
self.widgetData.setLayout(ld)
lm.addLayout(lt)
lm.addWidget(self.widgetData)
imageThumb = Thumbnail(videoData, thumbPix)
self.thumb = imageThumb
lt.addWidget(imageThumb)
self.viewMode = QListView.ListMode
'''
view_count
subtitles
chapters
title
series
average_rating
episode_number
license
categories
is_live
age_limit
duration
automatic_captions
tags
description
formats
start_time
uploader_url
thumbnail
like_count
annotations
creator
end_time
upload_date
season_number
uploader_id
uploader
dislike_count
id
webpage_url
alt_title
'''
labelTitle = QLabel(videoData['title'])
labelTitle.setStyleSheet("QLabel {font-size: 16px;}")
ld.addWidget(labelTitle)
if videoData['start_time']:
rawDate = videoData['start_time']
formatedStart = getDateObject(rawDate)
rawDate = videoData['end_time'] or DEFAULT_DATE
formatedEnd = getDateObject(rawDate)
ld.addWidget(QLabel('Starts at: {} | Ends at {}'.format(str(formatedStart), str(formatedEnd), True)))
else:
rawDate = videoData['upload_date'] or DEFAULT_DATE
formatedDate = getDateObject(rawDate)
ld.addWidget(QLabel('{} | {:,} views'.format(str(formatedDate), videoData['view_count'], True)))
labelUploader = QLabel()
url = videoData['uploader_url']
if url:
labelUploader.setTextInteractionFlags(Qt.LinksAccessibleByMouse)
labelUploader.setOpenExternalLinks(True)
uploaderStr = "<a href=\"{}\">{}</a>".format(url, videoData['uploader'])
else:
uploaderStr = videoData['uploader']
labelUploader.setText(uploaderStr)
ld.addWidget(labelUploader)
desc = videoData['description']
if len(desc) > MAX_DESCRIPTION_LEN:
desc = desc[:MAX_DESCRIPTION_LEN] + '...'
labelDesc = QLabel(desc)
labelDesc.setWordWrap(True)
labelDesc.setStyleSheet("QLabel {color : gray; }")
labelDesc.setAlignment(Qt.AlignLeft | Qt.AlignTop)
ld.addWidget(labelDesc)
ld.addStretch()
self.setLayout(lm)
@Slot(str, object)
def thumbArrived(self, videoID, retrieverFuction):
if videoID == self.videoData['id']:
newQpixmap = retrieverFuction(videoID)
self.thumb.setThumbPixmap(newQpixmap)
def setViewMode(self, mode):
if mode == QListView.IconMode:
self.widgetData.hide()
self.thumb.switchData(0)
else:
self.widgetData.show()
self.thumb.switchData(1)
self.viewMode = mode
def likesReformat(count):
unit = ['', 'K', 'M']
i = 0
while count > 1000:
count /= float(1000)
i += 1
finalCount = round(count, 1)
if finalCount == int(finalCount):
finalCount = int(finalCount)
return '{} {}'.format(finalCount, unit[i])
class Thumbnail(QWidget):
def __init__(self, videoData, thumbPix):
super(Thumbnail, self).__init__()
self.setFixedSize(THUMB_SIZE, THUMB_SIZE)
self.resize(self.sizeHint())
like_count = likesReformat(videoData['like_count'])
labelLike = QLabel(str(like_count), self)
labelLike.setFixedWidth(THUMB_SIZE / 2)
labelLike.setAlignment(Qt.AlignCenter)
labelLike.setStyleSheet("QLabel { background-color : rgb(10, 155, 10); color : white; }")
labelLike.move(0, THUMB_SIZE - labelLike.height())
labelLike.show()
dislike_count = likesReformat(videoData['dislike_count'])
labelDislike = QLabel(str(dislike_count), self)
labelDislike.setFixedWidth(THUMB_SIZE / 2)
labelDislike.setAlignment(Qt.AlignCenter)
labelDislike.setStyleSheet("QLabel { background-color : rgb(155, 10, 10); color : white; }")
labelDislike.move(THUMB_SIZE / 2, THUMB_SIZE - labelDislike.height())
labelDislike.show()
if videoData['like_count'] > videoData['dislike_count']:
font = labelLike.font()
font.setBold(True)
labelLike.setFont(font)
else:
font = labelDislike.font()
font.setBold(True)
labelDislike.setFont(font)
labelDuration = QLabelS(self)
labelDuration.setFixedWidth(THUMB_SIZE)
labelDuration.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
labelDuration.move(0, THUMB_SIZE - labelDislike.height() - labelDuration.height())
labelDuration.setStyleSheet("QLabel {color : white; font-size: 14px;}")
labelDuration.show()
is_live = videoData['is_live']
if not is_live:
labelDuration.setStyleSheet("QLabel {color : white; }")
labelDuration.setText(str(datetime.timedelta(seconds=videoData['duration'])))
else:
labelDuration.setStyleSheet("QLabel {color : rgb(255, 30, 30); font-size: 14px;}")
labelDuration.setText('LIVE')
self.totalThumbHeight = THUMB_SIZE - (labelLike.height() * 2)
self.visibleThumbHeight = THUMB_SIZE - (labelLike.height())
if thumbPix is None:
thumbPix = QPixmap(THUMB_SIZE, THUMB_SIZE)
thumbPix.fill(QColor(0, 70, 100, 125))
self.pixmap = None
self.setThumbPixmap(thumbPix)
title = videoData['title']
if len(title) > 50:
title = title[0:50] + '...'
labelTitle = QLabel(title, self)
labelTitle.setFixedWidth(THUMB_SIZE)
labelTitle.setWordWrap(True)
labelTitle.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
labelTitle.move(0, THUMB_SIZE - labelDislike.height() * 3)
labelTitle.hide()
self.labelTitle = labelTitle
self.labelLike = labelLike
self.labelDislike = labelDislike
self.labelDuration = labelDuration
desc = videoData['description']
if len(desc) > MAX_DESCRIPTION_LEN * 2:
desc = desc[:MAX_DESCRIPTION_LEN * 2] + '...'
self.setToolTip(desc)
def setThumbPixmap(self, newQpixmap):
self.pixmap = newQpixmap.scaled(QSize(THUMB_SIZE, THUMB_SIZE), Qt.KeepAspectRatio, Qt.SmoothTransformation)
def switchData(self, state):
if state == 0:
self.labelDislike.hide()
self.labelLike.hide()
self.labelDuration.hide()
self.labelTitle.show()
else:
self.labelDislike.show()
self.labelLike.show()
self.labelDuration.show()
self.labelTitle.hide()
def paintEvent(self, *args, **kwargs):
painter = QPainter()
painter.begin(self)
devRect = QRect(painter.device().rect())
devRect.setHeight(self.visibleThumbHeight)
painter.fillRect(devRect, QColor(0, 0, 0))
devRect.setHeight(self.totalThumbHeight)
rect = QRect(self.pixmap.rect())
rect.moveCenter(devRect.center())
painter.drawPixmap(rect.topLeft(), self.pixmap)
painter.end()
class QLabelS(QLabel):
def __init__(self, parent):
super(QLabelS, self).__init__(parent)
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
rct = event.rect()
rct2 = QRect(event.rect())
x = rct2.x()
y = rct2.y()
rct2.moveTo(x - 1, y - 1)
qp.setPen(QColor(0, 0, 0))
qp.drawText(rct, self.alignment(), self.text())
qp.setPen(QColor(255, 255, 255))
qp.drawText(rct2, self.alignment(), self.text())
class PreviewWidget(QWidget):
newSearchRequested = Signal(str)
removeSearchRequested = Signal(str)
tabChanged = Signal(str)
searchPropertiesCheckChanged = Signal(bool)
searchIndexChanged = Signal(str, int)
sortingChanged = Signal()
def __init__(self, iconAdd, iconTools, iconSearch):
super(PreviewWidget, self).__init__(parent=None)
self.iconDetail = QIcon(path.join(nuovolaPath, 'detailed.png'))
self.iconIcon = QIcon(path.join(nuovolaPath, 'icon.png'))
toolbarSearches = QToolBar()
toolbarSearches.setObjectName('toolbarSearches')
self.toolbarSearches = toolbarSearches
ta = toolbarSearches.addAction(iconAdd, 'New search', self.queryNewSearch)
ta.setShortcut('Ctrl+S')
ta = toolbarSearches.addAction(iconTools, 'Configuration',
lambda: QMessageBox.information(self, '', 'Not implemented'))
ta.setShortcut('Ctrl+C')
ta = toolbarSearches.addAction(iconSearch, 'Search properties')
ta.setShortcut('Ctrl+P')
ta.toggled.connect(self._searchPropertiesCheckChanged)
ta.setCheckable(True)
self.actionSearchProperties = ta
menuViewModes = QMenu(self)
g = QActionGroup(self, exclusive=True)
g.triggered.connect(self.changeViewMode)
actionListView = g.addAction(self.iconDetail, "List")
actionListView.setCheckable(True)
actionListView.setChecked(True)
self.actionListView = actionListView
actionIconView = g.addAction(self.iconIcon, "Icon")
actionIconView.setCheckable(True)
self.actionIconView = actionIconView
menuViewModes.addAction(actionListView)
menuViewModes.addAction(actionIconView)
menuViewModes.setDefaultAction(actionListView)
actionViewModesMenu = menuViewModes.menuAction()
actionViewModesMenu.setIconVisibleInMenu(True)
actionViewModesMenu.triggered.connect(self.switchViewMode)
actionViewModesMenu.setToolTip('View mode')
actionViewModesMenu.setIcon(self.iconDetail)
self.actionViewModesMenu = actionViewModesMenu
toolbarSearches.addAction(actionViewModesMenu)
comboSorting = QComboBox(toolbarSearches)
comboSorting.addItems([getattr(SortingEnum, i) for i in dir(SortingEnum) if not i.startswith('_')])
actionSorting = QWidgetAction(toolbarSearches)
actionSorting.setDefaultWidget(comboSorting)
toolbarSearches.addAction(actionSorting)
self.actionSorting = actionSorting
index = comboSorting.findText(SortingEnum.newest)
comboSorting.setCurrentIndex(index)
comboSorting.setEditable(False)
comboSorting.currentIndexChanged.connect(self.sortItems)
self.comboSorting = comboSorting
tabWidget = QTabWidget()
tabWidget.setObjectName('tabWidget')
myTabBar = MyTabBar(tabWidget)
tabWidget.setTabBar(myTabBar)
self.tabWidget = tabWidget
self.addEmptyTab()
tabWidget.setMovable(True)
tabWidget.setCornerWidget(toolbarSearches)
tabWidget.currentChanged.connect(self._currentTabChanged)
tabWidget.tabCloseRequested.connect(self._tabClosing)
tabWidget.tabBar().tabMoved.connect(self.tabsMoved)
layoutMain = QVBoxLayout()
layoutMain.addWidget(self.tabWidget)
layoutBottom = QHBoxLayout()
buttonClear = QPushButton(QIcon(path.join(discoveryPath, 'clear.png')), 'Clear results')
buttonClear.clicked.connect(self.clearList)
buttonClear.setEnabled(False)
self.buttonClear = buttonClear
buttonForceSearchNow = QPushButton(QIcon(path.join(brightPath, 'search_find.png')), 'Search now')
buttonForceSearchNow.clicked.connect(self.forceSearchNow)
buttonForceSearchNow.setEnabled(False)
self.buttonForceSearchNow = buttonForceSearchNow
buttonMarkRead = QPushButton(QIcon(path.join(faiPath, 'Apply_modified.png')), 'Mark as read')
buttonMarkRead.clicked.connect(self.markAsRead)
buttonMarkRead.setEnabled(False)
self.buttonMarkRead = buttonMarkRead
layoutBottom.addWidget(buttonMarkRead)
layoutBottom.addStretch()
layoutBottom.addWidget(buttonForceSearchNow)
layoutBottom.addStretch()
layoutBottom.addWidget(buttonClear)
layoutMain.addLayout(layoutBottom)
self.setLayout(layoutMain)
self._isEmpty = True
self._isEmmitingCheckChanged = False
self._onInitialPlacement = False
self._isChangingVieModeFromButton = False
self.isSetupSorting = False
@Slot()
def forceSearchNow(self):
tabWidget = self.tabWidget
listPreviews = tabWidget.widget(tabWidget.currentIndex())
search = listPreviews.search
search.forceSearchNow()
@Slot()
def sortItems(self):
if self.isSetupSorting:
return
sortString = self.comboSorting.currentText()
tabWidget = self.tabWidget
currentIndex = tabWidget.currentIndex()
listPreviews = tabWidget.widget(currentIndex)
search = listPreviews.search
search.sorting = sortString
self._doActualSorting(listPreviews, search)
def sortItemsFromSearch(self, search):
if self.isSetupSorting:
return
tabWidget = self.tabWidget
index = self.findTabIndexByWord(search.word)
listPreviews = tabWidget.widget(index)
self._doActualSorting(listPreviews, search)
def setSortingModeFromSearch(self, search):
sortString = search.sorting
combo = self.comboSorting
index = combo.findText(sortString)
self.isSetupSorting = True
combo.setCurrentIndex(index)
self.isSetupSorting = False
def _doActualSorting(self, listPreviews, search):
results = search.currentResults
sortString = search.sorting
if len(results) == 1:
return
sortedResults = self._getSortedItemPlaces(results, sortString)
form = self.parentWidget()
listPreviews.clear()
listPreviews.setUpdatesEnabled(False)
for sortKey, videoId in sortedResults:
videoInfo = results[videoId]
self.appendVideoItem(search, videoInfo, form.retrieveThumbnail(videoInfo['id']))
qApp.processEvents()
listPreviews.setUpdatesEnabled(True)
listPreviews.repaint()
def _getSortedItemPlaces(self, results, sortString):
sortedResults = []
for key, val in results.items():
if sortString in (SortingEnum.newest, SortingEnum.oldest):
sortedResults.append((getDateObject(val['upload_date']), key))
elif sortString == SortingEnum.views:
sortedResults.append((val['view_count'], key))
elif sortString == SortingEnum.likes:
sortedResults.append((val['like_count'], key))
elif sortString == SortingEnum.lenght:
sortedResults.append((val['duration'], key))
if sortString == SortingEnum.oldest:
sortedResults.sort(key=itemgetter(0))
else:
sortedResults.sort(key=itemgetter(0), reverse=True)
return sortedResults
def markAsRead(self, listPreviews=None):
if listPreviews is None:
tabWidget = self.tabWidget
listPreviews = tabWidget.widget(tabWidget.currentIndex())
search = listPreviews.search
search.isRead = True
self.buttonMarkRead.setEnabled(False)
def clearList(self):
tabWidget = self.tabWidget
listPreviews = tabWidget.widget(tabWidget.currentIndex())
listPreviews.clear()
search = listPreviews.search
search.currentResults.clear()
self.markAsRead(listPreviews)
self.buttonClear.setEnabled(False)
def clear(self):
tabWidget = self.tabWidget
while tabWidget.count() > 0:
try:
tabWidget.widget(0).clear()
except Exception:
pass
tabWidget.removeTab(0)
def tabsMoved(self):
if not self._onInitialPlacement:
self.updateSearchesIndexes()
def _searchPropertiesCheckChanged(self, event):
if self._isEmmitingCheckChanged:
return
self._isEmmitingCheckChanged = True
self.searchPropertiesCheckChanged.emit(event)
self._isEmmitingCheckChanged = False
def addEmptyTab(self):
self._isEmpty = True
tabWidget = self.tabWidget
tabWidget.setTabsClosable(False)
tabWidget.addTab(QWidget(), '[no searches]')
self.setSearchRelatedToolsEnabled(False)
def removeEmptyTab(self):
self._isEmpty = False
tabWidget = self.tabWidget
tabWidget.removeTab(0)
tabWidget.setTabsClosable(True)
self.setSearchRelatedToolsEnabled(True)
def setSearchRelatedToolsEnabled(self, state):
self.actionViewModesMenu.setEnabled(state)
self.actionSorting.setEnabled(state)
try:
self.buttonClear.setEnabled(state)
self.buttonMarkRead.setEnabled(state)
self.buttonForceSearchNow.setEnabled(state)
except AttributeError:
pass
def queryNewSearch(self):
suggested = 'cat' if self._isEmpty else ''
word, ret = QInputDialog.getText(self, 'New search', 'Enter word to search for:', QLineEdit.Normal, suggested)
if ret and word != '':
self.newSearchRequested.emit(word)
@Slot(int)
def _currentTabChanged(self, index):
word = self.tabWidget.tabText(index)
if word != '':
listPreviews = self.tabWidget.widget(index)
try:
search = listPreviews.search
except AttributeError:
return
self.buttonMarkRead.setEnabled(not search.isRead)
self.buttonClear.setEnabled(listPreviews.count())
self.tabChanged.emit(word)
self.setSortingModeFromSearch(search)
self.setViewModeFromSearch(search)
def updateSearchesIndexes(self):
for index in range(self.tabWidget.count()):
word = self.tabWidget.tabText(index)
self.searchIndexChanged.emit(word, index)
def updateItemsViewMode(self, listWidget, viewMode):
for i in range(listWidget.count()):
item = listWidget.item(i)
listWidget.itemWidget(item).setViewMode(viewMode)
@Slot()
def switchViewMode(self):
listPreviews = self.tabWidget.widget(self.tabWidget.currentIndex())
search = listPreviews.search
self._isChangingVieModeFromButton = True
viewMode = search.viewMode
if viewMode == QListView.IconMode:
self.actionListView.setChecked(True)
viewMode = QListView.ListMode
self.actionViewModesMenu.setIcon(self.iconDetail)
else:
self.actionIconView.setChecked(True)
viewMode = QListView.IconMode
self.actionViewModesMenu.setIcon(self.iconIcon)
search.viewMode = viewMode
self._isChangingVieModeFromButton = False
listPreviews.setViewMode(viewMode)
self.updateItemsViewMode(listPreviews, viewMode)
def setViewModeFromSearch(self, search):
listPreviews = self.tabWidget.widget(self.findTabIndexByWord(search.word))
self._isChangingVieModeFromButton = True
viewMode = search.viewMode
if viewMode == QListView.ListMode:
self.actionListView.setChecked(True)
self.actionViewModesMenu.setIcon(self.iconDetail)
else:
self.actionIconView.setChecked(True)
self.actionViewModesMenu.setIcon(self.iconIcon)
self._isChangingVieModeFromButton = False
listPreviews.setViewMode(viewMode)
self.updateItemsViewMode(listPreviews, viewMode)
@Slot()
def changeViewMode(self):
if self._isChangingVieModeFromButton:
return
listPreviews = self.tabWidget.widget(self.tabWidget.currentIndex())
search = listPreviews.search
if self.actionListView.isChecked():
self.actionViewModesMenu.setIcon(self.iconDetail)
search.viewMode = QListView.ListMode
else:
self.actionViewModesMenu.setIcon(self.iconIcon)
search.viewMode = QListView.IconMode
mode = search.viewMode
listPreviews.setViewMode(mode)
self.updateItemsViewMode(listPreviews, mode)
@Slot(int)
def _tabClosing(self, index):
self.removeSearchRequested.emit(self.tabWidget.tabText(index))
def addSearchTab(self, search, icon):
if self._isEmpty:
self.removeEmptyTab()
listPreviews = QListWidget()
listPreviews.search = search
listPreviews.setResizeMode(QListView.Adjust)
listPreviews.setMovement(QListView.Static)
listPreviews.setObjectName(search.word)
listPreviews.itemDoubleClicked.connect(self.openVideoInBrowser)
tabWidget = self.tabWidget
tabWidget.addTab(listPreviews, icon, search.word)
search.index = tabWidget.count() - 1
tabWidget.setCurrentIndex(search.index)
self.setViewModeFromSearch(search)
def removeSearchTab(self, search):
index = search.index
tabWidget = self.tabWidget
tabWidget.removeTab(index)
if tabWidget.count() == 0:
self.addEmptyTab()
else:
self.updateSearchesIndexes()
def appendVideoItem(self, search, data, thumbPix):
sortString = search.sorting
results = search.currentResults
if len(results) == 1:
place = -1
else:
place = 0
videoID = data['id']
sortedResults = self._getSortedItemPlaces(results, sortString)
for i in range(len(sortedResults)):
key, sortedVideoID = sortedResults[i]
if videoID == sortedVideoID:
place = i
break
return self.insertVideoItem(data, thumbPix, search.word, place)
def insertVideoItem(self, data, thumbPix, word, place):
index = self.findTabIndexByWord(word)
if index is None:
return
if index == self.tabWidget.currentIndex():
self.buttonMarkRead.setEnabled(True)
self.buttonClear.setEnabled(True)
listPreviews = self.tabWidget.widget(index)
item = QListWidgetItem('')
item.setSizeHint(QSize(200, 200)) # todo: make thumb size modifiable from mainUI
newVideoItem = VideoItem(data, self, thumbPix)
newVideoItem.setViewMode(listPreviews.search.viewMode)
if place == -1:
listPreviews.addItem(item)
else:
listPreviews.insertItem(place, item)
listPreviews.setItemWidget(item, newVideoItem)
return newVideoItem, item
def findTabIndexByWord(self, word):
for index in range(self.tabWidget.count()):
text = self.tabWidget.tabText(index)
if text == word:
return index
def switchToTab(self, word):
index = self.findTabIndexByWord(word)
self.tabWidget.setCurrentIndex(index)
def openVideoInBrowser(self, item):
view = item.listWidget().itemWidget(item)
webbrowser.open(view.videoData['webpage_url'])
class MyTabBar(QTabBar):
def __init__(self, *args, **kwargs):
self.pixmap = QPixmap(path.join(iconPath, 'WAIS', 'Warning.png')).scaled(QSize(16, 16), Qt.KeepAspectRatio,
Qt.SmoothTransformation)
super(MyTabBar, self).__init__(*args, **kwargs)
def paintEvent(self, *args, **kwargs):
super(MyTabBar, self).paintEvent(*args, **kwargs)
painter = QPainter()
painter.begin(self)
selfRect = self.rect()
tabWidget = self.parentWidget()
for index in range(self.count()):
listPreviews = tabWidget.widget(index)
try:
search = listPreviews.search
except AttributeError:
continue
if search.isRead:
continue
oldrect = self.tabRect(index)
point = selfRect.topLeft() + oldrect.center()
point.setY(-1)
painter.drawPixmap(point, self.pixmap)
painter.end()
def getDateObject(rawDate):
year, month, day = (int(i) for i in (rawDate[:4], rawDate[4:6], rawDate[6:]))
return datetime.date(year, month, day)
def setApp(app):
global qApp
qApp = app
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import hashlib
import logging
import os
import re
import shutil
import tempfile
import requests
from platforms.common import ReleaseException, run
from releases import get_version_and_timestamp_from_release
def brew(homebrew_dir, command, *run_args, **run_kwargs):
"""
Run brew that is installed in the specified prefix.
Args:
homebrew_dir: The path containing bin/brew. e.g. /usr/local
command: The list of args to pass to the brew command
run_args: Extra args to send to platforms.common.run
run_kwargs: Extra kwargs to send to platforms.common.run
Returns:
Result from subprocess.run
"""
brew_path = os.path.join(homebrew_dir, "bin", "brew")
return run([brew_path] + command, *run_args, **run_kwargs)
def install_homebrew(homebrew_dir):
logging.info("Installing homebrew to {}".format(homebrew_dir))
if not os.path.exists(homebrew_dir):
os.makedirs(homebrew_dir)
logging.info("Downloading homebrew...")
response = requests.get(
"https://github.com/Homebrew/brew/tarball/master", stream=True
)
response.raise_for_status()
with tempfile.NamedTemporaryFile() as fout:
for chunk in response.iter_content(1024 * 1024):
fout.write(chunk)
fout.flush()
logging.info("Extracting homebrew...")
run(["tar", "xzf", fout.name, "--strip", "1", "-C", homebrew_dir])
logging.info("Extracted homebrew")
def fetch_tarball_sha256(url):
""" Get the sha256 of a tarball """
logging.info("Fetching tarball from {}...".format(url))
response = requests.get(url, stream=True)
sha256 = hashlib.sha256()
for chunk in response.iter_content(chunk_size=1024 * 1024):
sha256.update(chunk)
hex_hash = sha256.hexdigest()
logging.info("Downloaded {} with hash {}".format(url, hex_hash))
return hex_hash
def get_formula_path(homebrew_dir, tap_repository):
""" Get the path for the buck forumula in the given repository """
result = brew(homebrew_dir, ["formula", tap_repository + "/buck"], None, True)
return result.stdout.decode("utf-8").strip()
def setup_tap(homebrew_dir, tap_repository):
""" Make sure that `tap_repository` is tapped """
logging.info("Tapping {}".format(tap_repository))
brew(homebrew_dir, ["tap", tap_repository])
logging.info("Tapped {}".format(tap_repository))
def update_formula_before_bottle(
repository, release_version, release_timestamp, formula_path, tarball_sha256
):
"""
Updates `formula_path` with correct urls, version and sha for building a bottle
Args:
release: The github release object
release_version: The version of the release (no "v" prefix)
release_timestamp: The timestamp to use while building
formula_path: The local path to the buck formula
tarball_sha256: The sha256 of the source tarball for the specified release
"""
logging.info("Updating formula at {}".format(formula_path))
with open(formula_path, "r") as fin:
all_data = fin.read()
all_data = re.sub(
r"BUCK_VERSION = .*$",
'BUCK_VERSION = "{}".freeze'.format(release_version),
all_data,
flags=re.MULTILINE,
)
all_data = re.sub(
r"BUCK_RELEASE_TIMESTAMP = .*$",
'BUCK_RELEASE_TIMESTAMP = "{}".freeze'.format(release_timestamp),
all_data,
flags=re.MULTILINE,
)
all_data = re.sub(
r'sha256 "[a-z0-9]{64}"$',
'sha256 "{}"'.format(tarball_sha256),
all_data,
flags=re.MULTILINE,
)
# This is a wholly undocumented endpoint, but is not subject to ratelimiting
# See https://github.com/facebook/homebrew-fb/pull/33
all_data = re.sub(
r' url "https://.+"$',
r' url "https://github.com/{repository}/archive/v#{{BUCK_VERSION}}.tar.gz"'.format(
repository=repository
),
all_data,
flags=re.MULTILINE,
)
all_data = re.sub(
r' root_url "https://github.com/.*/releases/download/v#{BUCK_VERSION}"',
r' root_url "https://github.com/{repository}/releases/download/v#{{BUCK_VERSION}}"'.format(
repository=repository
),
all_data,
flags=re.MULTILINE,
)
with open(formula_path, "w") as fout:
fout.write(all_data)
def build_bottle_file(
homebrew_dir,
tap_repository,
tap_path,
release_version,
target_macos_version,
output_dir,
):
"""
Builds the actual bottle file via brew
Args:
tap_repository: The name of the tap repository
tap_path: The local path to the given tap repository
release_version: The version that should be built (no "v" prefix)
target_macos_version: The target macos short nameto use in the resulting path
output_dir: The directory to move the build artifact to after building
Returns:
The path to the bottle.tar.gz
"""
brew_target = tap_repository + "/buck"
logging.info("Building bottle")
# Cool, so install --force will still not rebuild. Uninstall, and just don't
# care if the uninstall fails
brew(homebrew_dir, ["uninstall", "--force", brew_target], tap_path, check=False)
brew(homebrew_dir, ["install", "--force", "--build-bottle", brew_target], tap_path)
logging.info("Creating bottle file")
brew(
homebrew_dir,
["bottle", "--no-rebuild", "--skip-relocation", brew_target],
tap_path,
)
logging.info("Created bottle file")
bottle_filename = "buck-{ver}.{macos_ver}.bottle.tar.gz".format(
ver=release_version, macos_ver=target_macos_version
)
bottle_path = os.path.join(output_dir, bottle_filename)
bottles = glob.glob(
os.path.join(tap_path, "buck--{}*.bottle.tar.gz".format(release_version))
)
if len(bottles) != 1:
raise ReleaseException(
"Got an invalid number of bottle files ({} files: {})".format(
len(bottles), " ".join(bottles)
)
)
shutil.move(bottles[0], bottle_path)
return bottle_path
def get_sha256(path, chunk_size=1024 * 1024):
""" Get the sha256 of a file """
sha = hashlib.sha256()
with open(path, "rb") as fin:
data = fin.read(chunk_size)
while data:
sha.update(data)
data = fin.read(chunk_size)
return sha.hexdigest()
def update_formula_after_bottle(formula_path, sha, target_macos_version_spec):
"""
Update the buck formula with the sha for the newly created bottle
Args:
formula_path: The path to the buck formula
sha: The new sha to use
target_macos_version_spec: The version spec to use for this sha
"""
logging.info("Updating formula with new bottle sha")
with open(formula_path, "r") as fin:
all_data = fin.read()
all_data = re.sub(
r'sha256 "[a-z0-9]+" => :.*$',
'sha256 "{}" => :{}'.format(sha, target_macos_version_spec),
all_data,
flags=re.MULTILINE,
)
with open(formula_path, "w") as fout:
fout.write(all_data)
logging.info("Updated formula with new bottle sha")
def push_tap(git_repository, tap_path, version):
"""
Grab any working directory changes for the tap, clone a new tap repository,
and push those changes upstream. The original tap path is in a clean state
after this push. The clone is done with ssh, so ssh keys must be available
Args:
git_repository: The repo on github that needs to be cloned/pushed to
tap_path: The directory that the tap (with changes) exists in
version: The version to use in commit messages
"""
logging.info("Gathering git diff from {}".format(tap_path))
git_diff = run(["git", "diff"], tap_path, True).stdout
git_url = "git@github.com:{}.git".format(git_repository)
with tempfile.TemporaryDirectory() as temp_dir:
logging.info("Cloning {} into {}".format(git_url, temp_dir))
run(["git", "clone", git_url, temp_dir])
logging.info("Cloned into {}. Applying patch".format(temp_dir))
run(["git", "apply", "-"], temp_dir, input=git_diff)
logging.info("Committing...")
with tempfile.NamedTemporaryFile() as fout:
commit_message = (
"Bump buck to version {}\n\nThis commit was generated by "
"release automation\n"
).format(version)
fout.write(commit_message.encode("utf-8"))
fout.flush()
run(["git", "commit", "-F", fout.name, "buck.rb"], temp_dir)
logging.info("Pushing commit upstream")
run(["git", "push", "origin"], temp_dir)
logging.info("Pushed commit upstream!")
logging.info("Resetting state of {}, and updating it after push".format(tap_path))
run(["git", "checkout", "buck.rb"], tap_path)
run(["git", "checkout", "master"], tap_path)
run(["git", "pull"], tap_path)
logging.info("Reset state of {}, and updating it after push".format(tap_path))
def validate_tap(homebrew_dir, tap_repository, version):
logging.info("Validating that brew installs with new tap information")
brew_target = tap_repository + "/buck"
brew(homebrew_dir, ["uninstall", "--force", brew_target])
brew(homebrew_dir, ["install", brew_target])
output = (
brew(homebrew_dir, ["info", brew_target], capture_output=True)
.stdout.decode("utf-8")
.splitlines()[0]
)
if "{}/buck: stable {}".format(tap_repository, version) not in output:
raise ReleaseException(
"Expected version {} to be installed, but got this from `brew info {}`: {}".format(
version, tap_repository, output
)
)
def audit_tap(homebrew_dir, tap_repository):
logging.info("Running brew audit")
brew_target = tap_repository + "/buck"
brew(homebrew_dir, ["audit", brew_target])
def publish_tap_changes(homebrew_dir, tap_repository, version):
git_user, git_repo = tap_repository.split("/")
full_git_repo = "{}/homebrew-{}".format(git_user, git_repo)
formula_path = get_formula_path(homebrew_dir, tap_repository)
tap_path = os.path.dirname(formula_path)
push_tap(full_git_repo, tap_path, version)
def log_about_manual_tap_push(homebrew_dir, tap_repository):
formula_path = get_formula_path(homebrew_dir, tap_repository)
tap_path = os.path.dirname(formula_path)
logging.info(
"The homebrew tap is ready for a pull request. It can be found at {}".format(
tap_path
)
)
def build_bottle(
homebrew_dir,
release,
repository,
tap_repository,
target_macos_version,
target_macos_version_spec,
output_dir,
):
release_version, release_timestamp = get_version_and_timestamp_from_release(release)
if not os.path.exists(os.path.join(homebrew_dir, "bin", "brew")):
install_homebrew(homebrew_dir)
setup_tap(homebrew_dir, tap_repository)
formula_path = get_formula_path(homebrew_dir, tap_repository)
tap_path = os.path.dirname(formula_path)
# This is a wholly undocumented endpoint, but is not subject to ratelimiting
# See https://github.com/facebook/homebrew-fb/pull/33
undocumented_tarball_url = "https://github.com/{repository}/archive/{tag_name}.tar.gz".format(
repository=repository, tag_name=release["tag_name"]
)
tarball_sha256 = fetch_tarball_sha256(undocumented_tarball_url)
# First, update the bottle to have the new version and tarball sha.
update_formula_before_bottle(
repository, release_version, release_timestamp, formula_path, tarball_sha256
)
# Build the actual bottle file
bottle_path = build_bottle_file(
homebrew_dir,
tap_repository,
tap_path,
release_version,
target_macos_version,
output_dir,
)
# Get the bottle file sha, and update the bottle formula
bottle_sha = get_sha256(bottle_path)
update_formula_after_bottle(formula_path, bottle_sha, target_macos_version_spec)
# Make sure that we still pass `brew audit`
audit_tap(homebrew_dir, tap_repository)
return bottle_path
|
|
import collections
import itertools
from typing import Iterable, List, NamedTuple, Optional, Tuple
from data import max_heap
from data.alphabets import morse
from puzzle.problems import problem
from puzzle.steps import generate_solutions
_IGNORE_DELIMITER = 'ignored'
_CHARACTER_DELIMITER = 'character delimiter'
_WORD_DELIMITER = 'word delimiter'
_DOT = '.'
_DASH = '-'
_MORSE_DENSITY = 3.3 # Typical number of morse characters per character.
_TARGET_WORD_LENGTH = 3 # Require 3+ letters.
_TARGET_LENGTH = _MORSE_DENSITY * _TARGET_WORD_LENGTH
class _Interpretation(NamedTuple):
dot: str
dash: str
character_delimiter: Optional[str]
word_delimiter: Optional[str]
ignored: set
# "Most common" interpretation.
_ALPHABET = _Interpretation(
'.',
'-',
' ',
'/',
set(),
)
class MorseProblem(problem.Problem):
def __init__(self, name: str, lines: List[str], **kwargs) -> None:
super(MorseProblem, self).__init__(name, lines, **kwargs)
self._normalized = '\n'.join(lines)
self._frequencies = collections.Counter(self._normalized)
for c in list(self._frequencies.keys()):
if c.isspace(): # Ignore whitespace in input.
del self._frequencies[c]
@staticmethod
def score(lines: List[str]) -> float:
return _score(lines)
def _solve(self) -> dict:
raise NotImplementedError() # Explicitly unsupported.
def _solve_iter(self) -> generate_solutions.Solutions:
fringe: max_heap.MaxHeap[Tuple[generate_solutions.Solutions, str, bool]] = (
max_heap.MaxHeap())
solution_buffer: max_heap.MaxHeap[generate_solutions.Solutions] = (
max_heap.MaxHeap())
# Initialize options with weights.
for weight, interpretation in _generate_interpretations(self._normalized):
fringe.push(weight, self._iter_interpretation(interpretation))
while fringe:
group_generator, group_weight = fringe.pop_with_weight()
if fringe:
next_best_weight = fringe.best_weight()
else:
next_best_weight = float('-inf')
while next_best_weight < self._solution_constraints.weight_threshold:
yield StopIteration() # Good solutions are impossible.
yield from solution_buffer.pop_with_weight_until(next_best_weight)
for result in group_generator:
(solution, weight), notes, has_ignored = result
if has_ignored and solution in self._notes:
continue # Skip duplicates which can occur with ignored characters.
self._notes[solution] = notes # Save (and throw away) notes.
solution_weight = group_weight * weight
if solution_weight > next_best_weight:
yield solution, solution_weight
else:
solution_buffer.push(solution_weight, solution)
# Abandon this generator for now.
fringe.push(group_weight, group_generator)
break
yield from solution_buffer.pop_with_weight_until(float('-inf'))
def _iter_interpretation(
self, interpretation: _Interpretation
) -> Tuple[generate_solutions.Solutions, str, bool]:
acc = []
result = []
for c in self._normalized:
if c == interpretation.dot:
acc.append('.')
elif c == interpretation.dash:
acc.append('-')
elif c in (
interpretation.character_delimiter, interpretation.word_delimiter):
buffer = ''.join(acc)
acc.clear()
if not buffer:
pass
elif buffer in morse.LOOKUP:
result.append(morse.LOOKUP[buffer])
else:
# Turns out this was an invalid interpretation?
return
if c == interpretation.word_delimiter:
result.append(' ')
elif c in interpretation.ignored:
continue
else:
raise ValueError(
'Unexpected "%s" for interpretation %s' % (c, interpretation))
if acc:
buffer = ''.join(acc)
if buffer in morse.LOOKUP:
result.append(morse.LOOKUP[buffer])
else:
return # TODO: Attempt inserting delimiters.
solution = (''.join(result), 1.0)
yield (
solution, _interpretation_notes(interpretation),
bool(interpretation.ignored))
def _score(lines: List[str]) -> float:
counts = collections.Counter()
size = 0
for line in lines:
counts.update(line)
size += len(line)
if len(counts) > len(_ALPHABET):
return 0
if len(counts) < 2:
return 0 # Minimal input.
# There are [1, _ALPHABET] symbols.
if all(c in _ALPHABET for c in counts):
return 1 # Looks like ordinary morse.
elif '.' in counts and '-' in counts:
margin_of_error = 0.1
elif len(counts) < len(_ALPHABET) - 1: # No ignored symbols
margin_of_error = 0.5
else:
margin_of_error = 1.0
# Increase confidence asymptotically with the size of input. Long inputs
# with only 3 symbols are very morse-like.
return max((
0, (1 - margin_of_error) + (margin_of_error * (1 - _TARGET_LENGTH / size))
))
def _generate_interpretations(
given: str) -> Iterable[Tuple[float, _Interpretation]]:
results = []
frequencies = collections.Counter(given)
(_, first_n), (_, second_n) = frequencies.most_common(2)
max_score = first_n * first_n + second_n + 1 # +1 to reserve true 1.0.
def _scored_interpretation(
dot: str, dash: str, character_delimiter: Optional[str],
word_delimiter: Optional[str], ignored: set, weight_penalty: float,
) -> Tuple[float, _Interpretation]:
interpretation = _Interpretation(
dot, dash, character_delimiter, word_delimiter, ignored)
if dot == '.' and dash == '-':
# Very probable assignment.
base_score = max_score
else:
base_score = (first_n * frequencies[dot] + frequencies[dash])
if dot == '-' and dash == '.':
# Improbable but interesting assignment.
base_score *= 0.8
if ignored and not word_delimiter:
penalty = len(ignored) # Discourage wasted characters.
else:
penalty = 0
# Prefer assigning (dot, dash) to 1st and 2nd most common, respectively.
return (
weight_penalty * base_score / (max_score + penalty),
interpretation,
)
n_chars = len(frequencies)
for x, y in itertools.combinations(frequencies, 2):
if n_chars == 2:
character_delimiter = None
word_delimiter = None
ignored = set()
results.append(_scored_interpretation(
x, y, character_delimiter, word_delimiter, ignored, 1.0))
results.append(_scored_interpretation(
y, x, character_delimiter, word_delimiter, ignored, 1.0))
elif n_chars == 3:
character_delimiter = None
for c in frequencies:
if c in (x, y):
continue
character_delimiter = c
break
# Ensure character_delimiter is never consecutive.
if character_delimiter * 2 in given:
continue # Repeated delimiter appears in input.
word_delimiter = None
ignored = set()
results.append(_scored_interpretation(
x, y, character_delimiter, word_delimiter, ignored, 1.0))
results.append(_scored_interpretation(
y, x, character_delimiter, word_delimiter, ignored, 1.0))
else:
# Need to choose both a character and word delimiter.
options = []
for c in frequencies:
if c in (x, y):
continue
options.append(c)
# 2+ items in options. Validate delimiters never repeat.
last = None
for character_delimiter, word_delimiter in itertools.permutations(
options, 2):
if given[-1] == word_delimiter:
continue # This interpretation would require a space at the end.
need_char = False # Allow arbitrary delimiters to start.
saw_word_delimiter = False # Require character between word_delimiter.
max_word_length = 0
acc_word_length = 0
for c in given:
if c != word_delimiter:
pass
elif saw_word_delimiter:
break # Two word delimiters occurred between a character.
else:
saw_word_delimiter = True
max_word_length = max(acc_word_length, max_word_length)
acc_word_length = 0
if c in (x, y):
need_char = False # Found a character. Reset expectations.
saw_word_delimiter = False
acc_word_length += 1
elif need_char and c == last:
break # Needed a character and delimiter repeated. Break.
elif c in (character_delimiter, word_delimiter):
need_char = True # Expect a character.
last = c
else:
# 'break' never happened
ignored = set()
for c in options:
if c in (character_delimiter, word_delimiter):
continue
ignored.add(c)
if given.startswith(word_delimiter) or given.endswith(word_delimiter):
weight_penalty = 0.25
else:
weight_penalty = 1.0
if max_word_length < _TARGET_WORD_LENGTH:
weight_penalty *= 0.1
results.append(_scored_interpretation(
x, y, character_delimiter, word_delimiter, ignored,
weight_penalty))
results.append(_scored_interpretation(
y, x, character_delimiter, word_delimiter, ignored,
weight_penalty))
# Perhaps word delimiters are ignored.
ignored = ignored.copy()
ignored.add(word_delimiter)
results.append(_scored_interpretation(
x, y, character_delimiter, None, ignored, weight_penalty * 0.9))
results.append(_scored_interpretation(
y, x, character_delimiter, None, ignored, weight_penalty * 0.9))
# Perhaps all delimiters are ignored.
ignored = ignored.copy()
ignored.add(character_delimiter)
results.append(_scored_interpretation(
x, y, None, None, ignored, weight_penalty * 0.8))
results.append(_scored_interpretation(
y, x, None, None, ignored, weight_penalty * 0.8))
return sorted(results, key=lambda key: key[0], reverse=True)
def _interpretation_notes(interpretation) -> List[str]:
parts = [
'dot: %s' % repr(interpretation.dot),
'dash: %s' % repr(interpretation.dash),
]
if interpretation.character_delimiter:
parts.append(
'character delimiter: %s' % repr(interpretation.character_delimiter))
if interpretation.word_delimiter:
parts.append('space: %s' % repr(interpretation.word_delimiter))
if interpretation.ignored:
parts.append('ignored: %s' % repr(''.join(sorted(interpretation.ignored))))
return parts
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Tintri, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import json
import requests
#disable security warnings
requests.packages.urllib3.disable_warnings()
"""
Python functions to assist with Tintri API calls for the explict purpose
of supporting Tintri's python examples.
1.1 - added exceptions
This library was NOT designed to be a general purpose Python library.
"""
API = "/api"
# Exception class for requests errors
class TintriRequestsException(Exception):
def __init__(self, *args):
self._message = args[0]
def __str__(self):
return self._message
# Exception class for API errors
class TintriApiException(Exception):
def __init__(self, *args):
self._message = args[0]
self.status_code = args[1]
self.url = args[2]
self.payload = args[3]
self.response = args[4]
def __str__(self):
return "%s status code=%d url:%s payload:%s response:%s" % \
(self._message, self.status_code, self.url, self.payload, self.response)
# API GET without query string. The session ID can be 'None'. This is for
# the info API.
def api_get(server_name, api, session_id=None):
return api_get_query(server_name, api, None, session_id)
# API GET with query string. The query and session ID can be 'None'.
# The requests get allows for a query params set to 'None'.
def api_get_query(server_name, api, query, session_id):
headers = {'content-type': 'application/json'}
if session_id is not None:
headers['cookie'] = 'JSESSIONID=' + session_id
url = 'https://' + server_name + API + api
try:
# Invoke the API.
r = requests.get(url, headers=headers, params=query, verify=False)
except requests.ConnectionError:
raise TintriRequestsException("API Connection error occurred.")
except requests.HTTPError:
raise TintriRequestsException("HTTP error occurred.")
except requests.Timeout:
raise TintriRequestsException("Request timed out.")
except:
raise TintriRequestsException("An unexpected error " + sys.exc_info()[0] + " occurred.")
# if HTTP Response is not 200 then raise an exception
if r.status_code != 200:
message = "The HTTP response for get call to the server is not 200."
raise TintriApiException(message, r.status_code, url, "No Payload", r.text)
return r
# API DELETE.
def api_delete(server_name, api, session_id):
#Header and URL for delete call
headers = {'content-type': 'application/json',
'cookie': 'JSESSIONID='+session_id }
url = 'https://' + server_name + API + api
try:
# Invoke the API.
r = requests.delete(url, headers=headers, verify=False)
except requests.ConnectionError:
raise TintrRequestsiApiException("API Connection error occurred.")
except requests.HTTPError:
raise TintriRequestsException("HTTP error occurred.")
except requests.Timeout:
raise TintriRequestsException("Request timed out.")
except:
raise TintriRequestsException("An unexpected error " + sys.exc_info()[0] + " occurred.")
return r
# PUT
def api_put(server_name, api, payload, session_id):
headers = {'content-type': 'application/json',
'cookie': 'JSESSIONID='+session_id }
url = 'https://' + server_name + API + api
try:
# Invoke the API.
r = requests.put(url, data=json.dumps(payload),
headers=headers, verify=False)
except requests.ConnectionError:
raise TintriRequestsException("API Connection error occurred.")
except requests.HTTPError:
raise TintriRequestsException("HTTP error occurred.")
except requests.Timeout:
raise TintriRequestsException("Request timed out.")
except:
raise TintriRequestsException("An unexpected error " + sys.exc_info()[0] + " occurred.")
return r
# POST
def api_post(server_name, api, payload, session_id):
headers = {'content-type': 'application/json',
'cookie': 'JSESSIONID='+session_id }
url = 'https://' + server_name + API + api
try:
# Invoke the API.
r = requests.post(url, data=json.dumps(payload),
headers=headers, verify=False)
except requests.ConnectionError:
raise TintriRequestsException("API Connection error occurred.")
except requests.HTTPError:
raise TintriRequestsException("HTTP error occurred.")
except requests.Timeout:
raise TintriRequestsException("Request timed out.")
except:
raise TintriRequestsException("An unexpected error " + sys.exc_info()[0] + " occurred.")
return r
# Login.
def api_login(server_name, user_name, password):
# Payload, header and URL for login call
headers = {'content-type': 'application/json'}
payload = {'username': user_name,
'password': password,
'typeId': 'com.tintri.api.rest.vcommon.dto.rbac.RestApiCredentials'}
url_login = 'https://'+ server_name + API + '/v310/session/login'
try:
# Invoke the login API.
r = requests.post(url_login, data=json.dumps(payload),
headers=headers, verify=False)
except requests.ConnectionError:
raise TintriRequestsException("Login: API Connection error occurred.")
except requests.HTTPError:
raise TintriRequestsException("Login: HTTP error occurred.")
except requests.Timeout:
raise TintriRequestsException("Login: Request timed out.")
except:
raise TintriRequestsException("Login: An unexpected error " + sys.exc_info()[0] +
" occurred.")
# if HTTP Response is not 200 then raise an exception
if r.status_code != 200:
message = "The HTTP response for login call to the server is not 200."
raise TintriApiException(message, r.status_code, url_login, str(payload), r.text)
session_id = r.cookies['JSESSIONID']
return session_id
# Logout
def api_logout(server_name, session_id):
#Header and URL for logout call
headers = {'content-type': 'application/json',
'cookie': 'JSESSIONID='+session_id }
url_logout = 'https://' + server_name + API + '/v310/session/logout'
try:
# Send the logout request.
r = requests.get(url_logout, headers=headers, verify=False)
except requests.ConnectionError:
raise TintriRequestsException("Logout: API Connection error occurred.")
except requests.HTTPError:
raise TintrRequestsiApiException("Logout: HTTP error occurred.")
except requests.Timeout:
raise TintriRequestsException("Logout: Request timed out.")
except:
raise TintriRequestsException("Logout: An unexpected error " + sys.exc_info()[0] +
" occurred.")
# if HTTP Response is not 204 then raise an exception
if r.status_code != 204:
message = "The HTTP response for logout call to the server is not 204."
raise TintriApiException(message, r.status_code, url_logout, "No Payload", r.text)
return
# Return API version information
def api_version(server_name):
r = api_get(server_name, '/info')
return r
|
|
# Copyright (C) 2002-2007 Python Software Foundation
# Contact: email-sig@python.org
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
__all__ = [
'mktime_tz',
'parsedate',
'parsedate_tz',
'quote',
]
import time
SPACE = ' '
EMPTYSTRING = ''
COMMASPACE = ', '
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
tzoffset = None
tz = tz.upper()
if _timezones.has_key(tz):
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def quote(str):
"""Add quotes around a string."""
return str.replace('\\', '\\\\').replace('"', '\\"')
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else:
break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(('', ''))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ''
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = True
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % self.getquote())
elif self.field[self.pos] in self.atomends:
break
else:
aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return EMPTYSTRING.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.FWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
|
|
# -*- coding: utf-8 -*-
"""Test GUI component."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
#from contextlib import contextmanager
from pytest import yield_fixture, fixture
import numpy as np
from numpy.testing import assert_array_equal as ae
from .. import supervisor as _supervisor
from ..supervisor import (
Supervisor, TaskLogger, ClusterView, SimilarityView, ActionCreator)
from phy.gui import GUI
from phy.gui.widgets import Barrier
from phy.gui.qt import qInstallMessageHandler
from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready
from phy.utils.context import Context
from phylib.utils import connect, Bunch, emit
def handler(msg_type, msg_log_context, msg_string):
pass
qInstallMessageHandler(handler)
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
@yield_fixture
def gui(tempdir, qtbot):
# NOTE: mock patch show box exec_
_supervisor._show_box = lambda _: _
gui = GUI(position=(200, 100), size=(500, 500), config_dir=tempdir)
gui.set_default_actions()
gui.show()
qtbot.waitForWindowShown(gui)
yield gui
qtbot.wait(5)
gui.close()
del gui
qtbot.wait(5)
@fixture
def supervisor(qtbot, gui, cluster_ids, cluster_groups, cluster_labels,
similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
s = Supervisor(
spike_clusters,
cluster_groups=cluster_groups,
cluster_labels=cluster_labels,
similarity=similarity,
context=Context(tempdir),
sort=('id', 'desc'),
)
s.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=s.cluster_view)
connect(b('similarity_view'), event='ready', sender=s.similarity_view)
b.wait()
return s
#------------------------------------------------------------------------------
# Test tasks
#------------------------------------------------------------------------------
@fixture
def tl():
class MockClusterView(object):
_selected = [0]
def select(self, cl, callback=None, **kwargs):
self._selected = cl
callback({'selected': cl, 'next': cl[-1] + 1})
def next(self, callback=None):
callback({'selected': [self._selected[-1] + 1], 'next': self._selected[-1] + 2})
def previous(self, callback=None): # pragma: no cover
callback({'selected': [self._selected[-1] - 1], 'next': self._selected[-1]})
class MockSimilarityView(MockClusterView):
pass
class MockSupervisor(object):
def merge(self, cluster_ids, to, callback=None):
callback(Bunch(deleted=cluster_ids, added=[to]))
def split(self, old_cluster_ids, new_cluster_ids, callback=None):
callback(Bunch(deleted=old_cluster_ids, added=new_cluster_ids))
def move(self, which, group, callback=None):
callback(Bunch(metadata_changed=which, metadata_value=group))
def undo(self, callback=None):
callback(Bunch())
def redo(self, callback=None):
callback(Bunch())
out = TaskLogger(MockClusterView(), MockSimilarityView(), MockSupervisor())
return out
def test_task_1(tl):
assert tl.last_state(None) is None
def test_task_2(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.process()
assert tl.last_state() == ([0], 1, None, None)
def test_task_3(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
def test_task_merge(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'merge', [0, 100], 1000)
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
tl.enqueue(tl.supervisor, 'undo')
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
tl.enqueue(tl.supervisor, 'redo')
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
def test_task_split(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'split', [0, 100], [1000, 1001])
tl.process()
assert tl.last_state() == ([1000, 1001], 1002, None, None)
def test_task_move_1(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.supervisor, 'move', [0], 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_best(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'best', 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_similar(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'similar', 'good')
tl.process()
assert tl.last_state() == ([0], 1, [101], 102)
def test_task_move_all(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'all', 'good')
tl.process()
assert tl.last_state() == ([1], 2, [101], 102)
#------------------------------------------------------------------------------
# Test cluster and similarity views
#------------------------------------------------------------------------------
@fixture
def data():
_data = [{"id": i,
"n_spikes": 100 - 10 * i,
"group": {2: 'noise', 3: 'noise', 5: 'mua', 8: 'good'}.get(i, None),
"is_masked": i in (2, 3, 5),
} for i in range(10)]
return _data
def test_cluster_view_1(qtbot, gui, data):
cv = ClusterView(gui, data=data)
_wait_until_table_ready(qtbot, cv)
cv.sort_by('n_spikes', 'asc')
cv.select([1])
qtbot.wait(10)
assert cv.state == {'current_sort': ('n_spikes', 'asc'), 'selected': [1]}
cv.set_state({'current_sort': ('id', 'desc'), 'selected': [2]})
assert cv.state == {'current_sort': ('id', 'desc'), 'selected': [2]}
def test_similarity_view_1(qtbot, gui, data):
sv = SimilarityView(gui, data=data)
_wait_until_table_ready(qtbot, sv)
@connect(sender=sv)
def on_request_similar_clusters(sender, cluster_id):
return [{'id': id} for id in (100 + cluster_id, 110 + cluster_id, 102 + cluster_id)]
sv.reset([5])
_assert(sv.get_ids, [105, 115, 107])
def test_cluster_view_extra_columns(qtbot, gui, data):
for cl in data:
cl['my_metrics'] = cl['id'] * 1000
cv = ClusterView(gui, data=data, columns=['id', 'n_spikes', 'my_metrics'])
_wait_until_table_ready(qtbot, cv)
#------------------------------------------------------------------------------
# Test ActionCreator
#------------------------------------------------------------------------------
def test_action_creator_1(qtbot, gui):
ac = ActionCreator()
ac.attach(gui)
gui.show()
#------------------------------------------------------------------------------
# Test GUI component
#------------------------------------------------------------------------------
def _select(supervisor, cluster_ids, similar=None):
supervisor.task_logger.enqueue(supervisor.cluster_view, 'select', cluster_ids)
if similar is not None:
supervisor.task_logger.enqueue(supervisor.similarity_view, 'select', similar)
supervisor.task_logger.process()
supervisor.block()
supervisor.task_logger.show_history()
assert supervisor.task_logger.last_state()[0] == cluster_ids
assert supervisor.task_logger.last_state()[2] == similar
def _assert_selected(supervisor, sel):
assert supervisor.selected == sel
def test_select(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
def test_supervisor_busy(qtbot, supervisor):
_select(supervisor, [30], [20])
o = object()
emit('is_busy', o, True)
assert supervisor._is_busy
# The action fails while the supervisor is busy.
emit('action', supervisor.action_creator, 'merge')
emit('is_busy', o, False)
assert not supervisor._is_busy
# The action succeeds because the supervisor is no longer busy.
emit('action', supervisor.action_creator, 'merge')
supervisor.block()
assert not supervisor._is_busy
def test_supervisor_cluster_metrics(
qtbot, gui, cluster_ids, cluster_groups, similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
def my_metrics(cluster_id):
return cluster_id ** 2
cluster_metrics = {'my_metrics': my_metrics}
mc = Supervisor(
spike_clusters,
cluster_groups=cluster_groups,
cluster_metrics=cluster_metrics,
similarity=similarity,
context=Context(tempdir),
)
mc.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=mc.cluster_view)
connect(b('similarity_view'), event='ready', sender=mc.similarity_view)
b.wait()
assert 'my_metrics' in mc.columns
def test_supervisor_select_1(qtbot, supervisor):
# WARNING: always use actions in tests, because this doesn't call
# the supervisor method directly, but raises an event, enqueue the task,
# and call TaskLogger.process() which handles the cascade of callbacks.
supervisor.select_actions.select([0])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.task_logger.show_history()
def test_supervisor_select_2(qtbot, supervisor):
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [30])
def test_supervisor_select_order(qtbot, supervisor):
_select(supervisor, [1, 0])
_assert_selected(supervisor, [1, 0])
_select(supervisor, [0, 1])
_assert_selected(supervisor, [0, 1])
def test_supervisor_edge_cases(supervisor):
# Empty selection at first.
ae(supervisor.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])
_select(supervisor, [0])
supervisor.undo()
supervisor.block()
supervisor.redo()
supervisor.block()
# Merge.
supervisor.merge()
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([10])
supervisor.block()
_assert_selected(supervisor, [0])
# Split.
supervisor.split([])
supervisor.block()
_assert_selected(supervisor, [0])
# Move.
supervisor.move('ignored', [])
supervisor.block()
supervisor.save()
def test_supervisor_save(qtbot, gui, supervisor):
emit('request_save', gui)
def test_supervisor_skip(qtbot, gui, supervisor):
# yield [0, 1, 2, 10, 11, 20, 30]
# # i, g, N, i, g, N, N
expected = [30, 20, 11, 2, 1]
for clu in expected:
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [clu])
def test_supervisor_sort(qtbot, supervisor):
supervisor.sort('id', 'desc')
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
supervisor.select_actions.sort_by_n_spikes()
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
def test_supervisor_filter(qtbot, supervisor):
supervisor.filter('5 <= id && id <= 20')
qtbot.wait(50)
_cl = []
supervisor.cluster_view.get_ids(lambda cluster_ids: _cl.extend(cluster_ids))
qtbot.wait(50)
assert _cl == [20, 11, 10]
supervisor.clear_filter()
qtbot.wait(50)
def test_supervisor_merge_1(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.actions.redo()
supervisor.block()
supervisor.task_logger.show_history()
_assert_selected(supervisor, [31])
assert supervisor.is_dirty()
def test_supervisor_merge_event(qtbot, supervisor):
_select(supervisor, [30], [20])
_l = []
@connect(sender=supervisor)
def on_select(sender, cluster_ids):
_l.append(cluster_ids)
supervisor.actions.merge()
supervisor.block()
# After a merge, there should be only one select event.
assert len(_l) == 1
def test_supervisor_merge_move(qtbot, supervisor):
"""Check that merge then move selects the next cluster in the original
cluster view, not the updated cluster view."""
_select(supervisor, [20, 11], [])
_assert_selected(supervisor, [20, 11])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [2])
def test_supervisor_split_0(qtbot, supervisor):
_select(supervisor, [1, 2])
_assert_selected(supervisor, [1, 2])
supervisor.actions.split([1, 2])
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [1, 2])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_1(supervisor):
supervisor.select_actions.select([1, 2])
supervisor.block()
@connect(sender=supervisor)
def on_request_split(sender):
return [1, 2]
supervisor.actions.split()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_2(gui, similarity):
spike_clusters = np.array([0, 0, 1])
supervisor = Supervisor(spike_clusters, similarity=similarity)
supervisor.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=supervisor.cluster_view)
connect(b('similarity_view'), event='ready', sender=supervisor.similarity_view)
b.wait()
supervisor.actions.split([0])
supervisor.block()
_assert_selected(supervisor, [2, 3])
def test_supervisor_state(tempdir, qtbot, gui, supervisor):
supervisor.select(1)
cv = supervisor.cluster_view
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
assert supervisor.state.cluster_view.selected == [1]
cv.sort_by('id')
assert supervisor.state.cluster_view.current_sort == ('id', 'asc')
cv.set_state({'current_sort': ('n_spikes', 'desc')})
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
cv.sort_by('id', 'desc')
assert supervisor.shown_cluster_ids == [30, 20, 11, 10, 2, 1, 0]
def test_supervisor_label(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
supervisor.label("my_field", 1.23, cluster_ids=30)
supervisor.block()
assert 'my_field' in supervisor.fields
assert supervisor.get_labels('my_field')[20] == 3.14
assert supervisor.get_labels('my_field')[30] == 1.23
def test_supervisor_label_cluster_1(supervisor):
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Same value for the old clusters.
l = supervisor.get_labels('my_field')
assert l[20] == l[30] == 3.14
up = supervisor.merge()
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_2(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
# One of the parents.
l = supervisor.get_labels('my_field')
assert l[20] == 3.14
assert l[30] is None
up = supervisor.merge([20, 30])
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_3(supervisor):
# Conflict: largest cluster wins.
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Create merged cluster from 20 and 30.
up = supervisor.merge()
new = up.added[0]
supervisor.block()
# It fot the label of its parents.
assert supervisor.get_labels('my_field')[new] == 3.14
# Now, we label a smaller cluster.
supervisor.label("my_field", 2.718, cluster_ids=[10])
# We merge the large and small cluster together.
up = supervisor.merge(up.added + [10])
supervisor.block()
# The new cluster should have the value of the first, merged big cluster, i.e. 3.14.
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_move_1(supervisor):
_select(supervisor, [20])
_assert_selected(supervisor, [20])
assert not supervisor.move('', '')
supervisor.actions.move('noise', 'all')
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [11])
def test_supervisor_move_2(supervisor):
_select(supervisor, [20], [10])
_assert_selected(supervisor, [20, 10])
supervisor.actions.move('noise', 10)
supervisor.block()
_assert_selected(supervisor, [20, 2])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20, 10])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [20, 2])
def test_supervisor_move_3(qtbot, supervisor):
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move_best_to_noise()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.move_best_to_mua()
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.move_best_to_good()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'mua'
supervisor.cluster_meta.get('group', 11) == 'good'
def test_supervisor_move_4(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_similar_to_noise()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.actions.move_similar_to_mua()
supervisor.block()
_assert_selected(supervisor, [30, 2])
supervisor.actions.move_similar_to_good()
supervisor.block()
_assert_selected(supervisor, [30, 1])
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
def test_supervisor_move_5(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_all_to_noise()
supervisor.block()
_assert_selected(supervisor, [11, 2])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [11, 1])
supervisor.actions.move_all_to_mua()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.actions.move_all_to_good()
supervisor.block()
_assert_selected(supervisor, [])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 10) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
supervisor.cluster_meta.get('group', 1) == 'good'
def test_supervisor_reset(qtbot, supervisor):
supervisor.select_actions.select([10, 11])
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.select_actions.previous()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.select_actions.unselect_similar()
supervisor.block()
_assert_selected(supervisor, [30])
def test_supervisor_nav(qtbot, supervisor):
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.select_actions.previous_best()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.first()
qtbot.wait(100)
_assert_selected(supervisor, [30])
supervisor.select_actions.last()
qtbot.wait(100)
_assert_selected(supervisor, [1])
|
|
""" report test results in JUnit-XML format, for use with Hudson and build integration servers.
Based on initial code from Ross Lawley.
"""
import py
import os
import re
import sys
import time
# Python 2.X and 3.X compatibility
if sys.version_info[0] < 3:
from codecs import open
else:
unichr = chr
unicode = str
long = int
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0d)
_legal_ranges = (
(0x20, 0x7E),
(0x80, 0xD7FF),
(0xE000, 0xFFFD),
(0x10000, 0x10FFFF),
)
_legal_xml_re = [unicode("%s-%s") % (unichr(low), unichr(high))
for (low, high) in _legal_ranges
if low < sys.maxunicode]
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile(unicode('[^%s]') %
unicode('').join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return unicode('#x%02X') % i
else:
return unicode('#x%04X') % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption('--junitxml', '--junit-xml', action="store",
dest="xmlpath", metavar="path", default=None,
help="create junit-xml style report file at given path.")
group.addoption('--junitprefix', '--junit-prefix', action="store",
metavar="str", default=None,
help="prepend prefix to classnames in junit-xml output")
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, 'slaveinput'):
config._xml = LogXML(xmlpath, config.option.junitprefix)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, '_xml', None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_testnames(names):
names = [x.replace(".py", "") for x in names if x != '()']
names[0] = names[0].replace("/", '.')
return names
class LogXML(object):
def __init__(self, logfile, prefix):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.tests = []
self.passed = self.skipped = 0
self.failed = self.errors = 0
def _opentestcase(self, report):
names = mangle_testnames(report.nodeid.split("::"))
classnames = names[:-1]
if self.prefix:
classnames.insert(0, self.prefix)
self.tests.append(Junit.testcase(
classname=".".join(classnames),
name=bin_xml_escape(names[-1]),
time=getattr(report, 'duration', 0)
))
def _write_captured_output(self, report):
for capname in ('out', 'err'):
allcontent = ""
for name, content in report.get_sections("Captured std%s" %
capname):
allcontent += content
if allcontent:
tag = getattr(Junit, 'system-'+capname)
self.append(tag(bin_xml_escape(allcontent)))
def append(self, obj):
self.tests[-1].append(obj)
def append_pass(self, report):
self.passed += 1
self._write_captured_output(report)
def append_failure(self, report):
#msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self.append(
Junit.skipped(message="xfail-marked test passes unexpectedly"))
self.skipped += 1
else:
if isinstance(report.longrepr, (unicode, str)):
message = report.longrepr
else:
message = report.longrepr.reprcrash.message
message = bin_xml_escape(message)
fail = Junit.failure(message=message)
fail.append(bin_xml_escape(report.longrepr))
self.append(fail)
self.failed += 1
self._write_captured_output(report)
def append_collect_error(self, report):
#msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.error(bin_xml_escape(report.longrepr),
message="collection failure"))
self.errors += 1
def append_collect_skipped(self, report):
#msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.skipped(bin_xml_escape(report.longrepr),
message="collection skipped"))
self.skipped += 1
def append_error(self, report):
self.append(Junit.error(bin_xml_escape(report.longrepr),
message="test setup failure"))
self.errors += 1
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
self.append(Junit.skipped(bin_xml_escape(report.wasxfail),
message="expected test failure"))
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = bin_xml_escape(skipreason[9:])
self.append(
Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
type="pytest.skip",
message=skipreason
))
self.skipped += 1
self._write_captured_output(report)
def pytest_runtest_logreport(self, report):
if report.passed:
if report.when == "call": # ignore setup/teardown
self._opentestcase(report)
self.append_pass(report)
elif report.failed:
self._opentestcase(report)
if report.when != "call":
self.append_error(report)
else:
self.append_failure(report)
elif report.skipped:
self._opentestcase(report)
self.append_skipped(report)
def pytest_collectreport(self, report):
if not report.passed:
self._opentestcase(report)
if report.failed:
self.append_collect_error(report)
else:
self.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
self.errors += 1
data = bin_xml_escape(excrepr)
self.tests.append(
Junit.testcase(
Junit.error(data, message="internal error"),
classname="pytest",
name="internal"))
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
logfile = open(self.logfile, 'w', encoding='utf-8')
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = self.passed + self.failed
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(Junit.testsuite(
self.tests,
name="pytest",
errors=self.errors,
failures=self.failed,
skips=self.skipped,
tests=numtests,
time="%.3f" % suite_time_delta,
).unicode(indent=0))
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.