gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import copy
from flask import current_app as app, abort
from eve.utils import config, debug_error_message
from werkzeug.exceptions import BadRequestKeyError
def versioned_id_field():
""" Shorthand to add two commonly added versioning parameters.
.. versionadded: 0.4
"""
return app.config['ID_FIELD'] + app.config['VERSION_ID_SUFFIX']
def resolve_document_version(document, resource, method, latest_doc=None):
""" Version number logic for all methods.
:param document: the document in question.
:param resource: the resource of the request/document.
:param method: method coorsponding to the request.
:param latest_doc: the most recent version of the document.
.. versionadded:: 0.4
"""
resource_def = app.config['DOMAIN'][resource]
version = app.config['VERSION']
latest_version = app.config['LATEST_VERSION']
if resource_def['versioning'] is True:
# especially on collection endpoints, we don't to encure an extra
# lookup if we are already pulling the latest version
if method == 'GET' and latest_doc is None:
if version not in document:
# well it should be... the api designer must have turned on
# versioning after data was already in the collection or the
# collection has been modified without respecting versioning
document[version] = 1 # the first saved version will be 2
document[latest_version] = document[version]
# include latest_doc if the request is for an older version so that we
# can set the latest_version field in the response
if method == 'GET' and latest_doc is not None:
if version not in latest_doc:
# well it should be... the api designer must have turned on
# versioning after data was already in the collection or the
# collection has been modified without respecting versioning
document[version] = 1 # the first saved version will be 2
document[latest_version] = document[version]
else:
document[latest_version] = latest_doc[version]
if version not in document:
# this version was put in the database before versioning
# was turned on or outside of Eve
document[version] = 1
if method == 'POST':
# this one is easy! it is a new document
document[version] = 1
if method == 'PUT' or method == 'PATCH':
if not latest_doc:
abort(500, description=debug_error_message(
'I need the latest document here!'
))
if version in latest_doc:
# all is right in the world :)
document[version] = latest_doc[version] + 1
else:
# if versioning was just turned on, then we will start
# versioning now. if the db was modified outside of Eve or
# versioning was turned of for a while, version numbers will
# not be consistent! you have been warned
document[version] = 1
def late_versioning_catch(document, resource):
""" Insert versioning copy of document for the previous version of a
document if it is missing. Intended for PUT and PATCH.
:param resource: the resource of the request/document.
:param ids: a list of id number coorsponding to the documents parameter.
:param document: the documents be written by POST, PUT, or PATCH.
.. versionadded:: 0.4
"""
resource_def = app.config['DOMAIN'][resource]
version = app.config['VERSION']
if resource_def['versioning'] is True:
# TODO: Could directly check that there are no shadow copies for this
# document. If there are shadow copies but the version field is in the
# stored document, then something is wrong. (Modified outside of Eve?)
if version not in document:
# The API maintainer must of turned on versioning after the
# document was added to the database, so let's add this old version
# to the shadow collection now as if it was a new document.
resolve_document_version(document, resource, 'POST')
insert_versioning_documents(resource, document)
def insert_versioning_documents(resource, documents):
""" Insert versioning copy of document. Intended for POST, PUT, and PATCH.
:param resource: the resource of the request/document.
:param documents: the documents be written by POST, PUT, or PATCH.
.. versionadded:: 0.4
"""
resource_def = app.config['DOMAIN'][resource]
_id = app.config['ID_FIELD']
# push back versioned items if applicable
# note: MongoDB doesn't have transactions! if the server dies, no
# history will be saved.
if resource_def['versioning'] is True:
# force input as lists
if not isinstance(documents, list):
documents = [documents]
# build vesioning documents
version = app.config['VERSION']
versioned_documents = []
for index, document in enumerate(documents):
ver_doc = {}
# push normal fields
fields = versioned_fields(resource_def)
for field in document:
if field in fields:
ver_doc[field] = document[field]
# push special fields
ver_doc[versioned_id_field()] = document[_id]
ver_doc[version] = document[version]
# add document to the stack
versioned_documents.append(ver_doc)
# bulk insert
versionable_resource_collection_name = resource_def['datasource']['source'] + app.config['VERSIONS']
app.data.insert(versionable_resource_collection_name, versioned_documents)
def versioned_fields(resource_def):
""" Returns a list of versioned fields for a resource.
:param resource_def: a resource definition.
.. versionchanged:: 0.5
ETAG is now a versioned field (#369).
.. versionadded:: 0.4
"""
schema = resource_def['schema']
fields = []
if resource_def['versioning'] is True:
fields.append(app.config['LAST_UPDATED'])
fields.append(app.config['ETAG'])
for field in schema:
if field not in schema or \
schema[field].get('versioned', True) is True:
fields.append(field)
return fields
def diff_document(resource_def, old_doc, new_doc):
""" Returns a list of added or modified fields.
:param resource_def: a resource definition.
:param old_doc: the document to compare against.
:param new_doc: the document in question.
.. versionadded:: 0.4
"""
diff = {}
fields = list(resource_def['schema'].keys()) + [
app.config['VERSION'],
app.config['LATEST_VERSION'],
app.config['ID_FIELD'],
app.config['LAST_UPDATED'],
app.config['DATE_CREATED'],
app.config['ETAG'],
app.config['LINKS']]
for field in fields:
if field in new_doc and \
(field not in old_doc or new_doc[field] != old_doc[field]):
diff[field] = new_doc[field]
# This method does not show when fields are deleted.
for field in app.config['VERSION_DIFF_INCLUDE']:
if field in new_doc:
diff[field] = new_doc[field]
return diff
def synthesize_versioned_document(document, delta, resource_def):
""" Synthesizes an old document from the latest document and the values of
all versioned fields from the old version. This is accomplished by removing
all versioned fields from the latest document before updating fields to
ensure that fields with required=False can be removed.
:param document: the current version of a document.
:param delta: the versioned fields from a specific document version.
:param resource_def: a resource definition.
.. versionadded:: 0.4
"""
old_doc = copy.deepcopy(document)
if versioned_id_field() not in delta:
abort(400, description=debug_error_message(
'You must include %s in any projection with a version query.'
% versioned_id_field()
))
delta[app.config['ID_FIELD']] = delta[versioned_id_field()]
del delta[versioned_id_field()]
# remove all versioned fields from document
fields = versioned_fields(resource_def)
for field in document:
if field in fields:
del old_doc[field]
# add versioned fields
old_doc.update(delta)
return old_doc
def get_old_document(resource, req, lookup, document, version):
""" Returns an old document if appropriate, otherwise passes the given
document through.
:param resource: the name of the resource.
:param req: the parsed request object.
:param lookup: a dictionary of lookup parameters.
:param document: the current version of the document.
:param version: the value of the version request parameter.
.. versionadded:: 0.4
"""
if version != 'all' and version != 'diffs' and version is not None:
try:
version = int(version)
assert version > 0
except (ValueError, BadRequestKeyError, AssertionError):
abort(400, description=debug_error_message(
'Document version number should be an int greater than 0'
))
# parameters to find specific document version
if versioned_id_field() not in lookup:
lookup[versioned_id_field()] = lookup[app.config['ID_FIELD']]
del lookup[app.config['ID_FIELD']]
lookup[config.VERSION] = version
# synthesize old document from latest and delta
delta = app.data.find_one(resource + config.VERSIONS, req, **lookup)
if not delta:
abort(404)
document = synthesize_versioned_document(
document,
delta,
config.DOMAIN[resource])
return document
def get_data_version_relation_document(data_relation, reference, latest=False):
""" Returns an old document if appropriate, otherwise passes the given
document through.
:param data_relation: the schema definition describing the data_relation.
:param reference: a dictionary with a value_field and a version_field.
:param latest: if we should obey the version param in reference or not.
.. versionadded:: 0.4
"""
value_field = data_relation['field']
version_field = app.config['VERSION']
collection = data_relation['resource']
resource_def = app.config['DOMAIN'][data_relation['resource']]
query = {}
# tweak the query if the foreign field is versioned
if value_field in versioned_fields(resource_def) and latest is False:
# the field is versioned, search the shadow collection
collection += app.config['VERSIONS']
# special consideration for _id overloading
if value_field == app.config['ID_FIELD']:
query[value_field + app.config['VERSION_ID_SUFFIX']] = \
reference[value_field]
else:
query[value_field] = reference[value_field]
# add the version to the query
query[version_field] = reference[version_field]
else:
# the field is not versioned, search the primary doc
query[value_field] = reference[value_field]
if latest is False:
query[version_field] = {'$gte': reference[version_field]}
return app.data.find_one(collection, None, **query)
def missing_version_field(data_relation, reference):
""" Returns a document if it matches the value_field but doesn't have a
_version field. This is the scenario when there is data in the database
before document versioning is turned on.
:param data_relation: the schema definition describing the data_relation.
:param reference: a dictionary with a value_field and a version_field.
.. versionadded:: 0.4
"""
value_field = data_relation['field']
version_field = app.config['VERSION']
collection = data_relation['resource']
query = {}
query[value_field] = reference[value_field]
query[version_field] = {'$exists': False}
return app.data.find_one(collection, None, **query)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
def CreateLowOverheadFilter():
"""Returns a filter with the least overhead possible.
This contains no sub-traces of thread tasks, so it's only useful for
capturing the cpu-time spent on threads (as well as needed benchmark
traces).
FIXME: Remove webkit.console when blink.console lands in chromium and
the ref builds are updated. crbug.com/386847
"""
categories = [
"toplevel",
"benchmark",
"webkit.console",
"blink.console",
"trace_event_overhead"
]
return ChromeTraceCategoryFilter(filter_string=','.join(categories))
def CreateDefaultOverheadFilter():
"""Returns a filter with the best-effort amount of overhead.
This matches Chrome tracing's default category filter setting, i.e., enable
all categories except the disabled-by-default-* ones.
We should use '*' instead of '' (empty string) here. On the Chrome side, both
'*' and '' mean default category filter setting. However, if someone adds
additional category filters, the behavior becomes different.
For example:
'*': enable all categories except the disabled-by-default-* ones.
'': enable all categories except the disabled-by-default-* ones.
Now add an additional category filter 'abc' to '*' and '':
'*,abc': enable all categories (including 'abc') except the
disabled-by-default-* ones.
'abc': enable only 'abc', and disable all other ones.
"""
return ChromeTraceCategoryFilter(filter_string='*')
def CreateDebugOverheadFilter():
"""Returns a filter with as many traces enabled as is useful."""
return ChromeTraceCategoryFilter(
filter_string='*,disabled-by-default-cc.debug')
_delay_re = re.compile(r'DELAY[(][A-Za-z0-9._;]+[)]')
class ChromeTraceCategoryFilter(object):
"""A set of included and excluded categories that should be traced.
The ChromeTraceCategoryFilter allows fine tuning of what data is traced for
Chrome. Basic choice of which tracers to use is done by TracingConfig.
Providing filter_string=None gives the default category filter, which leaves
what to trace up to the individual trace systems.
"""
def __init__(self, filter_string=None):
self._included_categories = set()
self._excluded_categories = set()
self._disabled_by_default_categories = set()
self._synthetic_delays = set()
self.contains_wildcards = False
self.AddFilterString(filter_string)
def AddFilterString(self, filter_string):
if filter_string is None:
return
filter_set = set([cf.strip() for cf in filter_string.split(',')])
for category in filter_set:
self.AddFilter(category)
def AddFilter(self, category):
if category == '':
return
if ',' in category:
raise ValueError("Invalid category filter name: '%s'" % category)
if '*' in category or '?' in category:
self.contains_wildcards = True
if _delay_re.match(category):
self._synthetic_delays.add(category)
return
if category[0] == '-':
assert not category[1:] in self._included_categories
self._excluded_categories.add(category[1:])
return
if category.startswith('disabled-by-default-'):
self._disabled_by_default_categories.add(category)
return
assert not category in self._excluded_categories
self._included_categories.add(category)
@property
def included_categories(self):
return self._included_categories
@property
def excluded_categories(self):
return self._excluded_categories
@property
def disabled_by_default_categories(self):
return self._disabled_by_default_categories
@property
def synthetic_delays(self):
return self._synthetic_delays
@property
def filter_string(self):
return self._GetFilterString(stable_output=False)
@property
def stable_filter_string(self):
return self._GetFilterString(stable_output=True)
def _GetFilterString(self, stable_output):
# Note: This outputs fields in an order that intentionally matches
# trace_event_impl's CategoryFilter string order.
lists = []
lists.append(self._included_categories)
lists.append(self._disabled_by_default_categories)
lists.append(['-%s' % x for x in self._excluded_categories])
lists.append(self._synthetic_delays)
categories = []
for l in lists:
if stable_output:
l = list(l)
l.sort()
categories.extend(l)
return ','.join(categories)
def GetDictForChromeTracing(self):
INCLUDED_CATEGORIES_PARAM = 'included_categories'
EXCLUDED_CATEGORIES_PARAM = 'excluded_categories'
SYNTHETIC_DELAYS_PARAM = 'synthetic_delays'
result = {}
if self._included_categories or self._disabled_by_default_categories:
result[INCLUDED_CATEGORIES_PARAM] = list(
self._included_categories | self._disabled_by_default_categories)
if self._excluded_categories:
result[EXCLUDED_CATEGORIES_PARAM] = list(self._excluded_categories)
if self._synthetic_delays:
result[SYNTHETIC_DELAYS_PARAM] = list(self._synthetic_delays)
return result
def AddDisabledByDefault(self, category):
assert category.startswith('disabled-by-default-')
self._disabled_by_default_categories.add(category)
def AddIncludedCategory(self, category_glob):
"""Explicitly enables anything matching category_glob."""
assert not category_glob.startswith('disabled-by-default-')
assert not category_glob in self._excluded_categories
self._included_categories.add(category_glob)
def AddExcludedCategory(self, category_glob):
"""Explicitly disables anything matching category_glob."""
assert not category_glob.startswith('disabled-by-default-')
assert not category_glob in self._included_categories
self._excluded_categories.add(category_glob)
def AddSyntheticDelay(self, delay):
assert _delay_re.match(delay)
self._synthetic_delays.add(delay)
def IsSubset(self, other):
""" Determine if filter A (self) is a subset of filter B (other).
Returns True if A is a subset of B, False if A is not a subset of B,
and None if we can't tell for sure.
"""
# We don't handle filters with wildcards in this test.
if self.contains_wildcards or other.contains_wildcards:
return None
# Disabled categories get into a trace if and only if they are contained in
# the 'disabled' set. Return False if A's disabled set is not a subset of
# B's disabled set.
if not self.disabled_by_default_categories <= \
other.disabled_by_default_categories:
return False
# If A defines more or different synthetic delays than B, then A is not a
# subset.
if not self.synthetic_delays <= other.synthetic_delays:
return False
if self.included_categories and other.included_categories:
# A and B have explicit include lists. If A includes something that B
# doesn't, return False.
if not self.included_categories <= other.included_categories:
return False
elif self.included_categories:
# Only A has an explicit include list. If A includes something that B
# excludes, return False.
if self.included_categories.intersection(other.excluded_categories):
return False
elif other.included_categories:
# Only B has an explicit include list. We don't know which categories are
# contained in the default list, so return None.
return None
else:
# None of the filter have explicit include list. If B excludes categories
# that A doesn't exclude, return False.
if not other.excluded_categories <= self.excluded_categories:
return False
return True
|
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to serve as proxy for the target engine for testing.
Receives documents from the oplog worker threads and indexes them
into the backend.
Please look at the Solr and ElasticSearch doc manager classes for a sample
implementation with real systems.
"""
from threading import RLock
from mongo_connector import constants
from mongo_connector.errors import OperationFailed
from mongo_connector.doc_managers.doc_manager_base import DocManagerBase
from mongo_connector.compat import u
__version__ = constants.__version__
"""DocManager Simulator version information
This is packaged with mongo-connector so it shares the same version.
Downstream DocManager implementations should add their package __version__
string here, for example:
__version__ = '0.1.0'
"""
class DocumentStore(dict):
def __init__(self):
self._lock = RLock()
def __getitem__(self, key):
with self._lock:
return super(DocumentStore, self).__getitem__(key)
def __setitem__(self, key, value):
with self._lock:
return super(DocumentStore, self).__setitem__(key, value)
def __iter__(self):
def __myiter__():
with self._lock:
for item in super(DocumentStore, self).__iter__():
yield item
return __myiter__()
class Entry(object):
def __init__(self, doc, ns, ts):
self.doc, self.ns, self.ts = doc, ns, ts
self._id = self.doc['_id']
@property
def meta_dict(self):
return {'_id': self._id, 'ns': self.ns, '_ts': self.ts}
@property
def merged_dict(self):
d = self.doc.copy()
d.update(**self.meta_dict)
return d
def update(self, ns, ts):
self.ns, self.ts = ns, ts
class DocManager(DocManagerBase):
"""BackendSimulator emulates both a target DocManager and a server.
The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url=None, unique_key='_id',
auto_commit_interval=None,
chunk_size=constants.DEFAULT_MAX_BULK, **kwargs):
"""Creates a dictionary to hold document id keys mapped to the
documents as values.
"""
self.unique_key = unique_key
self.auto_commit_interval = auto_commit_interval
self.doc_dict = DocumentStore()
self.url = url
self.chunk_size = chunk_size
self.kwargs = kwargs
def stop(self):
"""Stops any running threads in the DocManager.
"""
pass
def update(self, document_id, update_spec, namespace, timestamp):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
document = self.doc_dict[document_id].doc
updated = self.apply_update(document, update_spec)
if "_id" in updated:
updated.pop("_id")
updated[self.unique_key] = document_id
self.upsert(updated, namespace, timestamp)
return updated
def upsert(self, doc, namespace, timestamp):
"""Adds a document to the doc dict.
"""
# Allow exceptions to be triggered (for testing purposes)
if doc.get('_upsert_exception'):
raise Exception("upsert exception")
doc_id = doc["_id"]
self.doc_dict[doc_id] = Entry(doc=doc, ns=namespace, ts=timestamp)
def insert_file(self, f, namespace, timestamp):
"""Inserts a file to the doc dict.
"""
doc = f.get_metadata()
doc['content'] = f.read()
self.doc_dict[f._id] = Entry(doc=doc, ns=namespace, ts=timestamp)
def remove(self, document_id, namespace, timestamp):
"""Removes the document from the doc dict.
"""
try:
entry = self.doc_dict[document_id]
entry.doc = None
entry.update(namespace, timestamp)
except KeyError:
raise OperationFailed("Document does not exist: %s"
% u(document_id))
def search(self, start_ts, end_ts):
"""Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend.
"""
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.ts <= end_ts or entry.ts >= start_ts:
yield entry.meta_dict
def commit(self):
"""Simply passes since we're not using an engine that needs commiting.
"""
pass
def get_last_doc(self):
"""Searches through the doc dict to find the document that was
modified or deleted most recently."""
return max(self.doc_dict.values(), key=lambda x: x.ts).meta_dict
def handle_command(self, command_doc, namespace, timestamp):
pass
def _search(self):
"""Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend.
"""
results = []
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.doc is not None:
results.append(entry.merged_dict)
return results
def _delete(self):
"""Deletes all documents.
This function is not a part of the DocManager API, and is only used
to simulate deleting all documents from a backend.
"""
self.doc_dict = {}
|
|
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 50
validate_train_set = True
save_every = 5
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 1
sunny_batch_size = 4
batches_per_chunk = 32 *4
num_epochs_train = 62
# - learning rate and method
base_lr = 0.00003
learning_rate_schedule = {
0: base_lr,
45: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
"zoom_x": (.75, 1.25),
"zoom_y": (.75, 1.25),
"change_brightness": (-0.3, 0.3),
}
augmentation_params_test = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
"zoom_x": (.80, 1.20),
"zoom_y": (.80, 1.20),
"change_brightness": (-0.2, 0.2),
}
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(80,80)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
def filter_samples(folders):
# don't use patients who don't have mre than 6 slices
return [
folder for folder in folders
if data_loader.compute_nr_slices(folder) > 6]
# Input sizes
image_size = 64
nr_slices = 22
data_sizes = {
"sliced:data:sax": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:sax:locations": (batch_size, nr_slices),
"sliced:data:sax:is_not_padded": (batch_size, nr_slices),
"sliced:data:randomslices": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice": (batch_size, 30, image_size, image_size),
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size),
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.rectify,
hid_init=nn.init.Constant(0.),
backwards=False,
learn_init=True,
gradient_steps=-1,
grad_clipping=False,
unroll_scan=False,
precompute_input=False)
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:sax"]
input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
input_size_locations = data_sizes["sliced:data:sax:locations"]
l0 = nn.layers.InputLayer(input_size)
lin_slice_mask = nn.layers.InputLayer(input_size_mask)
lin_slice_locations = nn.layers.InputLayer(input_size_locations)
# PREPROCESS SLICES SEPERATELY
# Convolutional layers and some dense layers are defined in a submodel
l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
import je_ss_jonisc80_leaky_convroll_augzoombright
submodel = je_ss_jonisc80_leaky_convroll_augzoombright.build_model(l0_slices)
# Systole Dense layers
l_sys_mu = submodel["meta_outputs"]["systole:mu"]
l_sys_sigma = submodel["meta_outputs"]["systole:sigma"]
# Diastole Dense layers
l_dia_mu = submodel["meta_outputs"]["diastole:mu"]
l_dia_sigma = submodel["meta_outputs"]["diastole:sigma"]
# AGGREGATE SLICES PER PATIENT
l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)
# Systole
l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)
# Diastole
l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)
submodels = [submodel]
return {
"inputs":{
"sliced:data:sax": l0,
"sliced:data:sax:is_not_padded": lin_slice_mask,
"sliced:data:sax:locations": lin_slice_locations,
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": dict(
{},
**{
k: v
for d in [model["regularizable"] for model in submodels if "regularizable" in model]
for k, v in d.items() }
),
"pretrained":{
je_ss_jonisc80_leaky_convroll_augzoombright.__name__: submodel["outputs"],
}
}
|
|
import logging
import pickle
import nose
import gc
import tempfile
import os
import ana
import claripy
import angr
from angr import SimState
binaries_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries')
def test_state():
s = SimState(arch='AMD64')
s.registers.store('sp', 0x7ffffffffff0000)
nose.tools.assert_equals(s.se.eval(s.registers.load('sp')), 0x7ffffffffff0000)
s.stack_push(s.se.BVV("ABCDEFGH"))
nose.tools.assert_equals(s.se.eval(s.registers.load('sp')), 0x7fffffffffefff8)
s.stack_push(s.se.BVV("IJKLMNOP"))
nose.tools.assert_equals(s.se.eval(s.registers.load('sp')), 0x7fffffffffefff0)
a = s.stack_pop()
nose.tools.assert_equals(s.se.eval(s.registers.load('sp')), 0x7fffffffffefff8)
nose.tools.assert_equals(s.se.eval(a, cast_to=str), "IJKLMNOP")
b = s.stack_pop()
nose.tools.assert_equals(s.se.eval(s.registers.load('sp')), 0x7ffffffffff0000)
nose.tools.assert_equals(s.se.eval(b, cast_to=str), "ABCDEFGH")
#@nose.tools.timed(10)
def test_state_merge():
a = SimState(arch='AMD64', mode='symbolic')
a.memory.store(1, a.se.BVV(42, 8))
b = a.copy()
c = b.copy()
a.memory.store(2, a.memory.load(1, 1)+1)
b.memory.store(2, b.memory.load(1, 1)*2)
c.memory.store(2, c.memory.load(1, 1)/2)
# make sure the byte at 1 is right
nose.tools.assert_equal(a.se.eval(a.memory.load(1, 1)), 42)
nose.tools.assert_equal(b.se.eval(b.memory.load(1, 1)), 42)
nose.tools.assert_equal(c.se.eval(c.memory.load(1, 1)), 42)
# make sure the byte at 2 is right
nose.tools.assert_equal(a.se.eval(a.memory.load(2, 1)), 43)
nose.tools.assert_equal(b.se.eval(b.memory.load(2, 1)), 84)
nose.tools.assert_equal(c.se.eval(c.memory.load(2, 1)), 21)
# the byte at 2 should be unique for all before the merge
nose.tools.assert_true(a.se.unique(a.memory.load(2, 1)))
nose.tools.assert_true(b.se.unique(b.memory.load(2, 1)))
nose.tools.assert_true(c.se.unique(c.memory.load(2, 1)))
#logging.getLogger('angr.state_plugins.symbolic_memory').setLevel(logging.DEBUG)
m, merge_conditions, merging_occurred = a.merge(b, c)
#logging.getLogger('angr.state_plugins.symbolic_memory').setLevel(logging.WARNING)
nose.tools.assert_true(merging_occurred)
#nose.tools.assert_equals(sorted(m.se.eval_upto(merge_flag, 10)), [ 0,1,2 ])
assert len(merge_conditions) == 3
# the byte at 2 should now *not* be unique for a
nose.tools.assert_false(m.se.unique(m.memory.load(2, 1)))
nose.tools.assert_true(a.se.unique(a.memory.load(2, 1)))
nose.tools.assert_true(b.se.unique(b.memory.load(2, 1)))
nose.tools.assert_true(c.se.unique(c.memory.load(2, 1)))
# the byte at 2 should have the three values
nose.tools.assert_items_equal(m.se.eval_upto(m.memory.load(2, 1), 10), (43, 84, 21))
# we should be able to select them by adding constraints
a_a = m.copy()
a_a.add_constraints(merge_conditions[0])
nose.tools.assert_true(a_a.se.unique(a_a.memory.load(2, 1)))
nose.tools.assert_equal(a_a.se.eval(a_a.memory.load(2, 1)), 43)
a_b = m.copy()
a_b.add_constraints(merge_conditions[1])
nose.tools.assert_true(a_b.se.unique(a_b.memory.load(2, 1)))
nose.tools.assert_equal(a_b.se.eval(a_b.memory.load(2, 1)), 84)
a_c = m.copy()
a_c.add_constraints(merge_conditions[2])
nose.tools.assert_true(a_c.se.unique(a_c.memory.load(2, 1)))
nose.tools.assert_equal(a_c.se.eval(a_c.memory.load(2, 1)), 21)
# test different sets of plugins
a = SimState(arch='AMD64', mode='symbolic')
nose.tools.assert_true(a.has_plugin('memory'))
nose.tools.assert_true(a.has_plugin('registers'))
nose.tools.assert_false(a.has_plugin('libc'))
b = a.copy()
a.get_plugin('libc')
nose.tools.assert_true(a.has_plugin('libc'))
nose.tools.assert_false(b.has_plugin('libc'))
c = a.copy().merge(b.copy())[0]
d = b.copy().merge(a.copy())[0]
nose.tools.assert_true(c.has_plugin('libc'))
nose.tools.assert_true(d.has_plugin('libc'))
# test merging posix with different open files (illegal!)
a = SimState(arch='AMD64', mode='symbolic')
b = a.copy()
a.posix.open('/tmp/idk', 1)
nose.tools.assert_raises(angr.errors.SimMergeError, lambda: a.copy().merge(b.copy()))
def test_state_merge_static():
# With abstract memory
# Aligned memory merging
a = SimState(arch='AMD64', mode='static')
addr = a.se.ValueSet(32, 'global', 0, 8)
a.memory.store(addr, a.se.BVV(42, 32))
# Clear a_locs, so further writes will not try to merge with value 42
a.memory.regions['global']._alocs = { }
b = a.copy()
c = a.copy()
a.memory.store(addr, a.se.BVV(50, 32), endness='Iend_LE')
b.memory.store(addr, a.se.BVV(60, 32), endness='Iend_LE')
c.memory.store(addr, a.se.BVV(70, 32), endness='Iend_LE')
merged, _, _ = a.merge(b, c)
actual = claripy.backends.vsa.convert(merged.memory.load(addr, 4))
expected = claripy.backends.vsa.convert(a.se.SI(bits=32, stride=10, lower_bound=50, upper_bound=70))
nose.tools.assert_true(actual.identical(expected))
def test_state_merge_3way():
a = SimState(arch='AMD64', mode='symbolic')
b = a.copy()
c = a.copy()
conds = [ a.se.BoolS('cond_0'), a.se.BoolS('cond_1') ]
a.add_constraints(conds[0])
b.add_constraints(a.se.Not(conds[0]), conds[1])
c.add_constraints(a.se.Not(conds[0]), a.se.Not(conds[1]))
a.memory.store(0x400000, a.se.BVV(8, 32))
b.memory.store(0x400000, b.se.BVV(9, 32))
c.memory.store(0x400000, c.se.BVV(10, 32))
m, _, _ = a.merge(b)
m, _, _ = m.merge(c)
assert m.satisfiable(extra_constraints=(m.memory.load(0x400000, 4) == 8,))
assert m.satisfiable(extra_constraints=(m.memory.load(0x400000, 4) == 9,))
assert m.satisfiable(extra_constraints=(m.memory.load(0x400000, 4) == 10,))
def test_state_merge_optimal_nostrongrefstate():
# We do not specify the state option EFFICIENT_STATE_MERGING, and as a result, state histories do not store strong
# references to states. This will result in less efficient state merging since SimStateHistory will be the only
# state plugin that knows the common ancestor of all instances to merge. But it should still succeed.
binary_path = os.path.join(binaries_base, "tests", "x86_64", "state_merge_0")
p = angr.Project(binary_path, auto_load_libs=False)
sm = p.factory.simulation_manager()
sm.explore(find=0x400616, num_find=3)
var_addr = 0x601044
sm.merge(stash='found')
s = sm.one_found
culprit = s.mem[var_addr].dword.resolved
for i in xrange(8, 11):
assert i, s.solver.satisfiable(extra_constraints=(culprit == i,))
assert not s.solver.satisfiable(extra_constraints=(culprit == 12, ))
def test_state_merge_optimal():
# Unlike the above test case, EFFICIENT_STATE_MERGING is enabled here
binary_path = os.path.join(binaries_base, "tests", "x86_64", "state_merge_0")
p = angr.Project(binary_path, auto_load_libs=False)
state = p.factory.blank_state(add_options={angr.sim_options.EFFICIENT_STATE_MERGING})
sm = p.factory.simulation_manager(state)
sm.explore(find=0x400616, num_find=3)
var_addr = 0x601044
sm.merge(stash='found')
s = sm.one_found
culprit = s.mem[var_addr].dword.resolved
for i in xrange(8, 11):
assert i, s.solver.satisfiable(extra_constraints=(culprit == i,))
assert not s.solver.satisfiable(extra_constraints=(culprit == 12, ))
def setup():
tmp_dir = tempfile.mkdtemp(prefix='test_state_picklez')
ana.set_dl(ana.DirDataLayer(tmp_dir))
def teardown():
ana.set_dl(ana.SimpleDataLayer())
@nose.with_setup(setup, teardown)
def test_state_pickle():
s = SimState(arch="AMD64")
s.memory.store(100, s.se.BVV(0x4141414241414241424300, 88), endness='Iend_BE')
s.regs.rax = 100
sp = pickle.dumps(s)
del s
gc.collect()
s = pickle.loads(sp)
nose.tools.assert_equals(s.se.eval(s.memory.load(100, 10), cast_to=str), "AAABAABABC")
def test_global_condition():
s = SimState(arch="AMD64")
s.regs.rax = 10
old_rax = s.regs.rax
with s.with_condition(False):
nose.tools.assert_false(s.se.satisfiable())
s.regs.rax = 20
nose.tools.assert_is(s._global_condition, None)
nose.tools.assert_is(old_rax, s.regs.rax)
with s.with_condition(True):
s.regs.rax = 20
nose.tools.assert_is(s._global_condition, None)
nose.tools.assert_is_not(old_rax, s.regs.rax)
nose.tools.assert_is(s.se.BVV(20, s.arch.bits), s.regs.rax)
with s.with_condition(s.regs.rbx != 0):
s.regs.rax = 25
nose.tools.assert_is(s._global_condition, None)
nose.tools.assert_is_not(s.se.BVV(25, s.arch.bits), s.regs.rax)
with s.with_condition(s.regs.rbx != 1):
s.regs.rax = 30
nose.tools.assert_is(s._global_condition, None)
nose.tools.assert_is_not(s.se.BVV(30, s.arch.bits), s.regs.rax)
with s.with_condition(s.regs.rbx == 0):
nose.tools.assert_equals(s.se.eval_upto(s.regs.rbx, 10), [ 0 ])
nose.tools.assert_items_equal(s.se.eval_upto(s.regs.rax, 10), [ 30 ])
with s.with_condition(s.regs.rbx == 1):
nose.tools.assert_equals(s.se.eval_upto(s.regs.rbx, 10), [ 1 ])
nose.tools.assert_items_equal(s.se.eval_upto(s.regs.rax, 10), [ 25 ])
if __name__ == '__main__':
test_state()
test_state_merge()
test_state_merge_3way()
test_state_merge_optimal()
test_state_merge_optimal_nostrongrefstate()
test_state_merge_static()
test_state_pickle()
test_global_condition()
|
|
# -*- coding: utf-8 -*-
"""
gspread.utils
~~~~~~~~~~~~~
This module contains utility functions.
"""
import sys
import re
from functools import wraps
from collections import defaultdict
from itertools import chain
from .exceptions import IncorrectCellLabel, NoValidUrlKeyFound
if sys.version_info.major == 2:
import urllib
elif sys.version_info.major == 3:
import urllib.parse as urllib
MAGIC_NUMBER = 64
CELL_ADDR_RE = re.compile(r'([A-Za-z]+)([1-9]\d*)')
URL_KEY_V1_RE = re.compile(r'key=([^&#]+)')
URL_KEY_V2_RE = re.compile(r'/spreadsheets/d/([a-zA-Z0-9-_]+)')
def finditem(func, seq):
"""Finds and returns first item in iterable for which func(item) is True.
"""
return next((item for item in seq if func(item)))
def numericise(value, empty2zero=False, default_blank="", allow_underscores_in_numeric_literals=False):
"""Returns a value that depends on the input string:
- Float if input can be converted to Float
- Integer if input can be converted to integer
- Zero if the input string is empty and empty2zero flag is set
- The same input string, empty or not, otherwise.
Executable examples:
>>> numericise("faa")
'faa'
>>> numericise("3")
3
>>> numericise("3_2", allow_underscores_in_numeric_literals=False)
'3_2'
>>> numericise("3_2", allow_underscores_in_numeric_literals=True)
'32'
>>> numericise("3.1")
3.1
>>> numericise("", empty2zero=True)
0
>>> numericise("", empty2zero=False)
''
>>> numericise("", default_blank=None)
>>>
>>> numericise("", default_blank="foo")
'foo'
>>> numericise("")
''
>>> numericise(None)
>>>
"""
if value is not None:
if "_" in value and not allow_underscores_in_numeric_literals:
return value
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
if value == "":
if empty2zero:
value = 0
else:
value = default_blank
return value
def numericise_all(input, empty2zero=False, default_blank="", allow_underscores_in_numeric_literals=False):
"""Returns a list of numericised values from strings"""
return [numericise(s, empty2zero, default_blank, allow_underscores_in_numeric_literals) for s in input]
def rowcol_to_a1(row, col):
"""Translates a row and column cell address to A1 notation.
:param row: The row of the cell to be converted.
Rows start at index 1.
:type row: int, str
:param col: The column of the cell to be converted.
Columns start at index 1.
:type row: int, str
:returns: a string containing the cell's coordinates in A1 notation.
Example:
>>> rowcol_to_a1(1, 1)
A1
"""
row = int(row)
col = int(col)
if row < 1 or col < 1:
raise IncorrectCellLabel('(%s, %s)' % (row, col))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if mod == 0:
mod = 26
div -= 1
column_label = chr(mod + MAGIC_NUMBER) + column_label
label = '%s%s' % (column_label, row)
return label
def a1_to_rowcol(label):
"""Translates a cell's address in A1 notation to a tuple of integers.
:param label: A cell label in A1 notation, e.g. 'B1'.
Letter case is ignored.
:type label: str
:returns: a tuple containing `row` and `column` numbers. Both indexed
from 1 (one).
Example:
>>> a1_to_rowcol('A1')
(1, 1)
"""
m = CELL_ADDR_RE.match(label)
if m:
column_label = m.group(1).upper()
row = int(m.group(2))
col = 0
for i, c in enumerate(reversed(column_label)):
col += (ord(c) - MAGIC_NUMBER) * (26 ** i)
else:
raise IncorrectCellLabel(label)
return (row, col)
def cast_to_a1_notation(method):
"""
Decorator function casts wrapped arguments to A1 notation
in range method calls.
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
try:
if len(args):
int(args[0])
# Convert to A1 notation
range_start = rowcol_to_a1(*args[:2])
range_end = rowcol_to_a1(*args[-2:])
range_name = ':'.join((range_start, range_end))
args = (range_name,) + args[4:]
except ValueError:
pass
return method(self, *args, **kwargs)
return wrapper
def extract_id_from_url(url):
m2 = URL_KEY_V2_RE.search(url)
if m2:
return m2.group(1)
m1 = URL_KEY_V1_RE.search(url)
if m1:
return m1.group(1)
raise NoValidUrlKeyFound
def wid_to_gid(wid):
"""Calculate gid of a worksheet from its wid."""
widval = wid[1:] if len(wid) > 3 else wid
xorval = 474 if len(wid) > 3 else 31578
return str(int(widval, 36) ^ xorval)
def rightpad(row, max_len):
pad_len = max_len - len(row)
return row + ([''] * pad_len) if pad_len != 0 else row
def fill_gaps(L, rows=None, cols=None):
max_cols = max(len(row) for row in L) if cols is None else cols
max_rows = len(L) if rows is None else rows
pad_rows = max_rows - len(L)
if pad_rows:
L = L + ([[]] * pad_rows)
return [rightpad(row, max_cols) for row in L]
def cell_list_to_rect(cell_list):
if not cell_list:
return []
rows = defaultdict(lambda: {})
row_offset = cell_list[0].row
col_offset = cell_list[0].col
for cell in cell_list:
row = rows.setdefault(int(cell.row) - row_offset, {})
row[cell.col - col_offset] = cell.value
if not rows:
return []
all_row_keys = chain.from_iterable(row.keys() for row in rows.values())
rect_cols = range(max(all_row_keys) + 1)
rect_rows = range(max(rows.keys()) + 1)
# Return the values of the cells as a list of lists where each sublist
# contains all of the values for one row. The Google API requires a rectangle
# of updates, so if a cell isn't present in the input cell_list, then the
# value will be None and will not be updated.
return [[rows[i].get(j) for j in rect_cols] for i in rect_rows]
def quote(value, safe='', encoding='utf-8'):
return urllib.quote(value.encode(encoding), safe)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
#! /usr/bin/env python
#Todo
#Better outlier removal
#Check Nuth and Kaab bin median
#Implement check for empty diff
import sys
import os
import argparse
import subprocess
from osgeo import gdal
import numpy as np
import matplotlib.pyplot as plt
from pygeotools.lib import iolib, malib, geolib, warplib, filtlib
from demcoreg import coreglib, dem_mask
from imview.lib import pltlib
#Turn off numpy multithreading
#os.environ['OPENBLAS_NUM_THREADS'] = '1'
def get_mask(ds, mask_list, dem_fn=None):
#This returns True (1) for areas to mask, False (0) for valid static surfaces
static_mask = dem_mask.get_mask(ds, mask_list, dem_fn, writeout=False)
#return ~(static_mask)
return static_mask
def outlier_filter(diff, f=3, perc=None, max_dz=100):
print("Removing outliers")
print("Initial pixel count:")
print(diff.count())
print("Absolute dz filter: %0.2f" % max_dz)
#Absolute dz filter
diff = np.ma.masked_greater(diff, max_dz)
print(diff.count())
if perc is not None:
diff = filtlib.perc_fltr(diff, perc)
else:
#diff = filtlib.sigma_fltr(diff, f)
diff = filtlib.mad_fltr(diff, f)
print(diff.count())
return diff
def get_filtered_slope(ds, slope_lim=(0.1, 40)):
#Generate slope map
print("Computing slope")
slope = geolib.gdaldem_mem_ds(ds, processing='slope', returnma=True, computeEdges=False)
#slope_stats = malib.print_stats(slope)
print("Slope filter: %0.2f - %0.2f" % slope_lim)
print("Initial count: %i" % slope.count())
slope = filtlib.range_fltr(slope, slope_lim)
print(slope.count())
return slope
def compute_offset(ref_dem_ds, src_dem_ds, src_dem_fn, mode='nuth', remove_outliers=True, max_offset=100, \
max_dz=100, slope_lim=(0.1, 40), mask_list=['glaciers',], plot=True):
#Make sure the input datasets have the same resolution/extent
#Use projection of source DEM
ref_dem_clip_ds, src_dem_clip_ds = warplib.memwarp_multi([ref_dem_ds, src_dem_ds], \
res='max', extent='intersection', t_srs=src_dem_ds, r='cubic')
#Compute size of NCC and SAD search window in pixels
res = float(geolib.get_res(ref_dem_clip_ds, square=True)[0])
max_offset_px = (max_offset/res) + 1
#print(max_offset_px)
pad = (int(max_offset_px), int(max_offset_px))
#This will be updated geotransform for src_dem
src_dem_gt = np.array(src_dem_clip_ds.GetGeoTransform())
#Load the arrays
ref_dem = iolib.ds_getma(ref_dem_clip_ds, 1)
src_dem = iolib.ds_getma(src_dem_clip_ds, 1)
print("Elevation difference stats for uncorrected input DEMs (src - ref)")
diff = src_dem - ref_dem
static_mask = get_mask(src_dem_clip_ds, mask_list, src_dem_fn)
diff = np.ma.array(diff, mask=static_mask)
if diff.count() == 0:
sys.exit("No overlapping, unmasked pixels shared between input DEMs")
if remove_outliers:
diff = outlier_filter(diff, f=3, max_dz=max_dz)
#Want to use higher quality DEM, should determine automatically from original res/count
#slope = get_filtered_slope(ref_dem_clip_ds, slope_lim=slope_lim)
slope = get_filtered_slope(src_dem_clip_ds, slope_lim=slope_lim)
print("Computing aspect")
#aspect = geolib.gdaldem_mem_ds(ref_dem_clip_ds, processing='aspect', returnma=True, computeEdges=False)
aspect = geolib.gdaldem_mem_ds(src_dem_clip_ds, processing='aspect', returnma=True, computeEdges=False)
ref_dem_clip_ds = None
src_dem_clip_ds = None
#Apply slope filter to diff
#Note that we combine masks from diff and slope in coreglib
diff = np.ma.array(diff, mask=np.ma.getmaskarray(slope))
#Get final mask after filtering
static_mask = np.ma.getmaskarray(diff)
#Compute stats for new masked difference map
print("Filtered difference map")
diff_stats = malib.print_stats(diff)
dz = diff_stats[5]
print("Computing sub-pixel offset between DEMs using mode: %s" % mode)
#By default, don't create output figure
fig = None
#Default horizntal shift is (0,0)
dx = 0
dy = 0
#Sum of absolute differences
if mode == "sad":
ref_dem = np.ma.array(ref_dem, mask=static_mask)
src_dem = np.ma.array(src_dem, mask=static_mask)
m, int_offset, sp_offset = coreglib.compute_offset_sad(ref_dem, src_dem, pad=pad)
#Geotransform has negative y resolution, so don't need negative sign
#np array is positive down
#GDAL coordinates are positive up
dx = sp_offset[1]*src_dem_gt[1]
dy = sp_offset[0]*src_dem_gt[5]
#Normalized cross-correlation of clipped, overlapping areas
elif mode == "ncc":
ref_dem = np.ma.array(ref_dem, mask=static_mask)
src_dem = np.ma.array(src_dem, mask=static_mask)
m, int_offset, sp_offset, fig = coreglib.compute_offset_ncc(ref_dem, src_dem, \
pad=pad, prefilter=False, plot=plot)
dx = sp_offset[1]*src_dem_gt[1]
dy = sp_offset[0]*src_dem_gt[5]
#Nuth and Kaab (2011)
elif mode == "nuth":
#Compute relationship between elevation difference, slope and aspect
fit_param, fig = coreglib.compute_offset_nuth(diff, slope, aspect, plot=plot)
if fit_param is None:
print("Failed to calculate horizontal shift")
else:
#fit_param[0] is magnitude of shift vector
#fit_param[1] is direction of shift vector
#fit_param[2] is mean bias divided by tangent of mean slope
#print(fit_param)
dx = fit_param[0]*np.sin(np.deg2rad(fit_param[1]))
dy = fit_param[0]*np.cos(np.deg2rad(fit_param[1]))
med_slope = malib.fast_median(slope)
nuth_dz = fit_param[2]*np.tan(np.deg2rad(med_slope))
print('Median dz: %0.2f\nNuth dz: %0.2f' % (dz, nuth_dz))
#dz = nuth_dz
elif mode == "all":
print("Not yet implemented")
#Want to compare all methods, average offsets
#m, int_offset, sp_offset = coreglib.compute_offset_sad(ref_dem, src_dem)
#m, int_offset, sp_offset = coreglib.compute_offset_ncc(ref_dem, src_dem)
elif mode == "none":
print("Skipping alignment, writing out DEM with median bias over static surfaces removed")
dst_fn = outprefix+'_med%0.1f.tif' % dz
iolib.writeGTiff(src_dem_orig + dz, dst_fn, src_dem_ds)
sys.exit()
#Note: minus signs here since we are computing dz=(src-ref), but adjusting src
return -dx, -dy, -dz, static_mask, fig
def getparser():
parser = argparse.ArgumentParser(description="Perform DEM co-registration using multiple algorithms", \
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('ref_fn', type=str, help='Reference DEM filename')
parser.add_argument('src_fn', type=str, help='Source DEM filename to be shifted')
parser.add_argument('-mode', type=str, default='nuth', choices=['ncc', 'sad', 'nuth', 'none'], \
help='Type of co-registration to use')
parser.add_argument('-mask_list', nargs='+', type=str, default=[], choices=dem_mask.mask_choices, \
help='Define masks to use to limit reference surfaces for co-registration')
parser.add_argument('-tiltcorr', action='store_true', \
help='After preliminary translation, fit polynomial to residual elevation offsets and remove')
parser.add_argument('-polyorder', type=int, default=1, \
help='Specify order of polynomial fit')
parser.add_argument('-tol', type=float, default=0.02, \
help='When iterative translation magnitude is below this tolerance (meters), break and write out corrected DEM')
parser.add_argument('-max_offset', type=float, default=100, \
help='Maximum expected horizontal offset in meters, used to set search range for ncc and sad modes')
parser.add_argument('-max_dz', type=float, default=100, \
help='Maximum expected vertical offset in meters, used to filter outliers')
res_choices = ['min', 'max', 'mean', 'common_scale_factor']
parser.add_argument('-res', type=str, default='mean', choices=res_choices, \
help='Warp intputs to this resolution')
parser.add_argument('-slope_lim', type=float, nargs=2, default=(0.1, 40), \
help='Minimum and maximum surface slope limits to consider')
parser.add_argument('-max_iter', type=int, default=30, \
help='Maximum number of iterations, if tol is not reached')
parser.add_argument('-outdir', default=None, help='Output directory')
return parser
def main(argv=None):
parser = getparser()
args = parser.parse_args()
#Should check that files exist
ref_dem_fn = args.ref_fn
src_dem_fn = args.src_fn
mode = args.mode
mask_list = args.mask_list
max_offset = args.max_offset
max_dz = args.max_dz
slope_lim = tuple(args.slope_lim)
tiltcorr = args.tiltcorr
polyorder = args.polyorder
res = args.res
#Maximum number of iterations
max_iter = args.max_iter
#These are tolerances (in meters) to stop iteration
tol = args.tol
min_dx = tol
min_dy = tol
min_dz = tol
outdir = args.outdir
if outdir is None:
outdir = os.path.splitext(src_dem_fn)[0] + '_dem_align'
if tiltcorr:
outdir += '_tiltcorr'
tiltcorr_done = False
#Relax tolerance for initial round of co-registration
#tiltcorr_tol = 0.1
#if tol < tiltcorr_tol:
# tol = tiltcorr_tol
if not os.path.exists(outdir):
os.makedirs(outdir)
outprefix = '%s_%s' % (os.path.splitext(os.path.split(src_dem_fn)[-1])[0], \
os.path.splitext(os.path.split(ref_dem_fn)[-1])[0])
outprefix = os.path.join(outdir, outprefix)
print("\nReference: %s" % ref_dem_fn)
print("Source: %s" % src_dem_fn)
print("Mode: %s" % mode)
print("Output: %s\n" % outprefix)
src_dem_ds = gdal.Open(src_dem_fn)
ref_dem_ds = gdal.Open(ref_dem_fn)
#Get local cartesian coordinate system
#local_srs = geolib.localtmerc_ds(src_dem_ds)
#Use original source dataset coordinate system
#Potentially issues with distortion and xyz/tiltcorr offsets for DEM with large extent
local_srs = geolib.get_ds_srs(src_dem_ds)
#local_srs = geolib.get_ds_srs(ref_dem_ds)
#Resample to common grid
ref_dem_res = float(geolib.get_res(ref_dem_ds, t_srs=local_srs, square=True)[0])
#Create a copy to be updated in place
src_dem_ds_align = iolib.mem_drv.CreateCopy('', src_dem_ds, 0)
src_dem_res = float(geolib.get_res(src_dem_ds, t_srs=local_srs, square=True)[0])
src_dem_ds = None
#Resample to user-specified resolution
ref_dem_ds, src_dem_ds_align = warplib.memwarp_multi([ref_dem_ds, src_dem_ds_align], \
extent='intersection', res=args.res, t_srs=local_srs, r='cubic')
res = float(geolib.get_res(src_dem_ds_align, square=True)[0])
print("\nReference DEM res: %0.2f" % ref_dem_res)
print("Source DEM res: %0.2f" % src_dem_res)
print("Resolution for coreg: %s (%0.2f m)\n" % (args.res, res))
#Iteration number
n = 1
#Cumulative offsets
dx_total = 0
dy_total = 0
dz_total = 0
#Now iteratively update geotransform and vertical shift
while True:
print("*** Iteration %i ***" % n)
dx, dy, dz, static_mask, fig = compute_offset(ref_dem_ds, src_dem_ds_align, src_dem_fn, mode, max_offset, \
mask_list=mask_list, max_dz=max_dz, slope_lim=slope_lim, plot=True)
xyz_shift_str_iter = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx, dy, dz)
print("Incremental offset: %s" % xyz_shift_str_iter)
dx_total += dx
dy_total += dy
dz_total += dz
xyz_shift_str_cum = "dx=%+0.2fm, dy=%+0.2fm, dz=%+0.2fm" % (dx_total, dy_total, dz_total)
print("Cumulative offset: %s" % xyz_shift_str_cum)
#String to append to output filenames
xyz_shift_str_cum_fn = '_%s_x%+0.2f_y%+0.2f_z%+0.2f' % (mode, dx_total, dy_total, dz_total)
#Should make an animation of this converging
if n == 1:
#static_mask_orig = static_mask
if fig is not None:
dst_fn = outprefix + '_%s_iter%02i_plot.png' % (mode, n)
print("Writing offset plot: %s" % dst_fn)
fig.gca().set_title("Incremental: %s\nCumulative: %s" % (xyz_shift_str_iter, xyz_shift_str_cum))
fig.savefig(dst_fn, dpi=300)
#Apply the horizontal shift to the original dataset
src_dem_ds_align = coreglib.apply_xy_shift(src_dem_ds_align, dx, dy, createcopy=False)
#Should
src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, dz, createcopy=False)
n += 1
print("\n")
#If magnitude of shift in all directions is less than tol
#if n > max_iter or (abs(dx) <= min_dx and abs(dy) <= min_dy and abs(dz) <= min_dz):
#If magnitude of shift is less than tol
dm = np.sqrt(dx**2 + dy**2 + dz**2)
dm_total = np.sqrt(dx_total**2 + dy_total**2 + dz_total**2)
if dm_total > max_offset:
sys.exit("Total offset exceeded specified max_offset (%0.2f m). Consider increasing -max_offset argument" % max_offset)
#Stop iteration
if n > max_iter or dm < tol:
if fig is not None:
dst_fn = outprefix + '_%s_iter%02i_plot.png' % (mode, n)
print("Writing offset plot: %s" % dst_fn)
fig.gca().set_title("Incremental:%s\nCumulative:%s" % (xyz_shift_str_iter, xyz_shift_str_cum))
fig.savefig(dst_fn, dpi=300)
#Compute final elevation difference
if True:
ref_dem_clip_ds_align, src_dem_clip_ds_align = warplib.memwarp_multi([ref_dem_ds, src_dem_ds_align], \
res=res, extent='intersection', t_srs=local_srs, r='cubic')
ref_dem_align = iolib.ds_getma(ref_dem_clip_ds_align, 1)
src_dem_align = iolib.ds_getma(src_dem_clip_ds_align, 1)
ref_dem_clip_ds_align = None
diff_align = src_dem_align - ref_dem_align
src_dem_align = None
ref_dem_align = None
#Get updated, final mask
static_mask_final = get_mask(src_dem_clip_ds_align, mask_list, src_dem_fn)
static_mask_final = np.logical_or(np.ma.getmaskarray(diff_align), static_mask_final)
#Final stats, before outlier removal
diff_align_compressed = diff_align[~static_mask_final]
diff_align_stats = malib.get_stats_dict(diff_align_compressed, full=True)
#Prepare filtered version for tiltcorr fit
diff_align_filt = np.ma.array(diff_align, mask=static_mask_final)
diff_align_filt = outlier_filter(diff_align_filt, f=3, max_dz=max_dz)
#diff_align_filt = outlier_filter(diff_align_filt, perc=(12.5, 87.5), max_dz=max_dz)
slope = get_filtered_slope(src_dem_clip_ds_align)
diff_align_filt = np.ma.array(diff_align_filt, mask=np.ma.getmaskarray(slope))
diff_align_filt_stats = malib.get_stats_dict(diff_align_filt, full=True)
#Fit 2D polynomial to residuals and remove
#To do: add support for along-track and cross-track artifacts
if tiltcorr and not tiltcorr_done:
print("\n************")
print("Calculating 'tiltcorr' 2D polynomial fit to residuals with order %i" % polyorder)
print("************\n")
gt = src_dem_clip_ds_align.GetGeoTransform()
#Need to apply the mask here, so we're only fitting over static surfaces
#Note that the origmask=False will compute vals for all x and y indices, which is what we want
vals, resid, coeff = geolib.ma_fitpoly(diff_align_filt, order=polyorder, gt=gt, perc=(0,100), origmask=False)
#vals, resid, coeff = geolib.ma_fitplane(diff_align_filt, gt, perc=(12.5, 87.5), origmask=False)
#Should write out coeff or grid with correction
vals_stats = malib.get_stats_dict(vals)
#Want to have max_tilt check here
#max_tilt = 4.0 #m
#Should do percentage
#vals.ptp() > max_tilt
#Note: dimensions of ds and vals will be different as vals are computed for clipped intersection
#Need to recompute planar offset for full src_dem_ds_align extent and apply
xgrid, ygrid = geolib.get_xy_grids(src_dem_ds_align)
valgrid = geolib.polyval2d(xgrid, ygrid, coeff)
#For results of ma_fitplane
#valgrid = coeff[0]*xgrid + coeff[1]*ygrid + coeff[2]
src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, -valgrid, createcopy=False)
if True:
print("Creating plot of polynomial fit to residuals")
fig, axa = plt.subplots(1,2, figsize=(8, 4))
dz_clim = malib.calcperc_sym(vals, (2, 98))
ax = pltlib.iv(diff_align_filt, ax=axa[0], cmap='RdBu', clim=dz_clim, \
label='Residual dz (m)', scalebar=False)
ax = pltlib.iv(valgrid, ax=axa[1], cmap='RdBu', clim=dz_clim, \
label='Polyfit dz (m)', ds=src_dem_ds_align)
#if tiltcorr:
#xyz_shift_str_cum_fn += "_tiltcorr"
tiltcorr_fig_fn = outprefix + '%s_polyfit.png' % xyz_shift_str_cum_fn
print("Writing out figure: %s\n" % tiltcorr_fig_fn)
fig.savefig(tiltcorr_fig_fn, dpi=300)
print("Applying tilt correction to difference map")
diff_align -= vals
#Should iterate until tilts are below some threshold
#For now, only do one tiltcorr
tiltcorr_done=True
#Now use original tolerance, and number of iterations
tol = args.tol
max_iter = n + args.max_iter
else:
break
if True:
#Write out aligned difference map for clipped extent with vertial offset removed
align_diff_fn = outprefix + '%s_align_diff.tif' % xyz_shift_str_cum_fn
print("Writing out aligned difference map with median vertical offset removed")
iolib.writeGTiff(diff_align, align_diff_fn, src_dem_clip_ds_align)
if True:
#Write out fitered aligned difference map
align_diff_filt_fn = outprefix + '%s_align_diff_filt.tif' % xyz_shift_str_cum_fn
print("Writing out filtered aligned difference map with median vertical offset removed")
iolib.writeGTiff(diff_align_filt, align_diff_filt_fn, src_dem_clip_ds_align)
#Extract final center coordinates for intersection
center_coord_ll = geolib.get_center(src_dem_clip_ds_align, t_srs=geolib.wgs_srs)
center_coord_xy = geolib.get_center(src_dem_clip_ds_align)
src_dem_clip_ds_align = None
#Write out final aligned src_dem
align_fn = outprefix + '%s_align.tif' % xyz_shift_str_cum_fn
print("Writing out shifted src_dem with median vertical offset removed: %s" % align_fn)
#Open original uncorrected dataset at native resolution
src_dem_ds = gdal.Open(src_dem_fn)
src_dem_ds_align = iolib.mem_drv.CreateCopy('', src_dem_ds, 0)
#Apply final horizontal and vertial shift to the original dataset
#Note: potentially issues if we used a different projection during coregistration!
src_dem_ds_align = coreglib.apply_xy_shift(src_dem_ds_align, dx_total, dy_total, createcopy=False)
src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, dz_total, createcopy=False)
if tiltcorr:
xgrid, ygrid = geolib.get_xy_grids(src_dem_ds_align)
valgrid = geolib.polyval2d(xgrid, ygrid, coeff)
#For results of ma_fitplane
#valgrid = coeff[0]*xgrid + coeff[1]*ygrid + coeff[2]
src_dem_ds_align = coreglib.apply_z_shift(src_dem_ds_align, -valgrid, createcopy=False)
#Might be cleaner way to write out MEM ds directly to disk
src_dem_full_align = iolib.ds_getma(src_dem_ds_align)
iolib.writeGTiff(src_dem_full_align, align_fn, src_dem_ds_align)
if True:
#Output final aligned src_dem, masked so only best pixels are preserved
#Useful if creating a new reference product
#Can also use apply_mask.py
print("Applying filter to shiftec src_dem")
align_diff_filt_full_ds = warplib.memwarp_multi_fn([align_diff_filt_fn,], res=src_dem_ds_align, extent=src_dem_ds_align, \
t_srs=src_dem_ds_align)[0]
align_diff_filt_full = iolib.ds_getma(align_diff_filt_full_ds)
align_diff_filt_full_ds = None
align_fn_masked = outprefix + '%s_align_filt.tif' % xyz_shift_str_cum_fn
iolib.writeGTiff(np.ma.array(src_dem_full_align, mask=np.ma.getmaskarray(align_diff_filt_full)), \
align_fn_masked, src_dem_ds_align)
src_dem_full_align = None
src_dem_ds_align = None
#Compute original elevation difference
if True:
ref_dem_clip_ds, src_dem_clip_ds = warplib.memwarp_multi([ref_dem_ds, src_dem_ds], \
res=res, extent='intersection', t_srs=local_srs, r='cubic')
src_dem_ds = None
ref_dem_ds = None
ref_dem_orig = iolib.ds_getma(ref_dem_clip_ds)
src_dem_orig = iolib.ds_getma(src_dem_clip_ds)
#Needed for plotting
ref_dem_hs = geolib.gdaldem_mem_ds(ref_dem_clip_ds, processing='hillshade', returnma=True, computeEdges=True)
src_dem_hs = geolib.gdaldem_mem_ds(src_dem_clip_ds, processing='hillshade', returnma=True, computeEdges=True)
diff_orig = src_dem_orig - ref_dem_orig
#Only compute stats over valid surfaces
static_mask_orig = get_mask(src_dem_clip_ds, mask_list, src_dem_fn)
#Note: this doesn't include outlier removal or slope mask!
static_mask_orig = np.logical_or(np.ma.getmaskarray(diff_orig), static_mask_orig)
#For some reason, ASTER DEM diff have a spike near the 0 bin, could be an issue with masking?
diff_orig_compressed = diff_orig[~static_mask_orig]
diff_orig_stats = malib.get_stats_dict(diff_orig_compressed, full=True)
#Prepare filtered version for comparison
diff_orig_filt = np.ma.array(diff_orig, mask=static_mask_orig)
diff_orig_filt = outlier_filter(diff_orig_filt, f=3, max_dz=max_dz)
#diff_orig_filt = outlier_filter(diff_orig_filt, perc=(12.5, 87.5), max_dz=max_dz)
slope = get_filtered_slope(src_dem_clip_ds)
diff_orig_filt = np.ma.array(diff_orig_filt, mask=np.ma.getmaskarray(slope))
diff_orig_filt_stats = malib.get_stats_dict(diff_orig_filt, full=True)
#Write out original difference map
print("Writing out original difference map for common intersection before alignment")
orig_diff_fn = outprefix + '_orig_diff.tif'
iolib.writeGTiff(diff_orig, orig_diff_fn, ref_dem_clip_ds)
src_dem_clip_ds = None
ref_dem_clip_ds = None
if True:
align_stats_fn = outprefix + '%s_align_stats.json' % xyz_shift_str_cum_fn
align_stats = {}
align_stats['src_fn'] = src_dem_fn
align_stats['ref_fn'] = ref_dem_fn
align_stats['align_fn'] = align_fn
align_stats['res'] = {}
align_stats['res']['src'] = src_dem_res
align_stats['res']['ref'] = ref_dem_res
align_stats['res']['coreg'] = res
align_stats['center_coord'] = {'lon':center_coord_ll[0], 'lat':center_coord_ll[1], \
'x':center_coord_xy[0], 'y':center_coord_xy[1]}
align_stats['shift'] = {'dx':dx_total, 'dy':dy_total, 'dz':dz_total, 'dm':dm_total}
#This tiltcorr flag gets set to false, need better flag
if tiltcorr:
align_stats['tiltcorr'] = {}
align_stats['tiltcorr']['coeff'] = coeff.tolist()
align_stats['tiltcorr']['val_stats'] = vals_stats
align_stats['before'] = diff_orig_stats
align_stats['before_filt'] = diff_orig_filt_stats
align_stats['after'] = diff_align_stats
align_stats['after_filt'] = diff_align_filt_stats
import json
with open(align_stats_fn, 'w') as f:
json.dump(align_stats, f)
#Create output plot
if True:
print("Creating final plot")
kwargs = {'interpolation':'none'}
#f, axa = plt.subplots(2, 4, figsize=(11, 8.5))
f, axa = plt.subplots(2, 4, figsize=(16, 8))
for ax in axa.ravel()[:-1]:
ax.set_facecolor('k')
pltlib.hide_ticks(ax)
dem_clim = malib.calcperc(ref_dem_orig, (2,98))
axa[0,0].imshow(ref_dem_hs, cmap='gray', **kwargs)
im = axa[0,0].imshow(ref_dem_orig, cmap='cpt_rainbow', clim=dem_clim, alpha=0.6, **kwargs)
pltlib.add_cbar(axa[0,0], im, arr=ref_dem_orig, clim=dem_clim, label=None)
pltlib.add_scalebar(axa[0,0], res=res)
axa[0,0].set_title('Reference DEM')
axa[0,1].imshow(src_dem_hs, cmap='gray', **kwargs)
im = axa[0,1].imshow(src_dem_orig, cmap='cpt_rainbow', clim=dem_clim, alpha=0.6, **kwargs)
pltlib.add_cbar(axa[0,1], im, arr=src_dem_orig, clim=dem_clim, label=None)
axa[0,1].set_title('Source DEM')
#axa[0,2].imshow(~static_mask_orig, clim=(0,1), cmap='gray')
axa[0,2].imshow(~static_mask, clim=(0,1), cmap='gray', **kwargs)
axa[0,2].set_title('Surfaces for co-registration')
dz_clim = malib.calcperc_sym(diff_orig_compressed, (5, 95))
im = axa[1,0].imshow(diff_orig, cmap='RdBu', clim=dz_clim, **kwargs)
pltlib.add_cbar(axa[1,0], im, arr=diff_orig, clim=dz_clim, label=None)
axa[1,0].set_title('Elev. Diff. Before (m)')
im = axa[1,1].imshow(diff_align, cmap='RdBu', clim=dz_clim, **kwargs)
pltlib.add_cbar(axa[1,1], im, arr=diff_align, clim=dz_clim, label=None)
axa[1,1].set_title('Elev. Diff. After (m)')
#tight_dz_clim = (-1.0, 1.0)
tight_dz_clim = (-2.0, 2.0)
#tight_dz_clim = (-10.0, 10.0)
#tight_dz_clim = malib.calcperc_sym(diff_align_filt, (5, 95))
im = axa[1,2].imshow(diff_align_filt, cmap='RdBu', clim=tight_dz_clim, **kwargs)
pltlib.add_cbar(axa[1,2], im, arr=diff_align_filt, clim=tight_dz_clim, label=None)
axa[1,2].set_title('Elev. Diff. After (m)')
#Tried to insert Nuth fig here
#ax_nuth.change_geometry(1,2,1)
#f.axes.append(ax_nuth)
bins = np.linspace(dz_clim[0], dz_clim[1], 128)
axa[1,3].hist(diff_orig_compressed, bins, color='g', label='Before', alpha=0.5)
axa[1,3].hist(diff_align_compressed, bins, color='b', label='After', alpha=0.5)
axa[1,3].set_xlim(*dz_clim)
axa[1,3].axvline(0, color='k', linewidth=0.5, linestyle=':')
axa[1,3].set_xlabel('Elev. Diff. (m)')
axa[1,3].set_ylabel('Count (px)')
axa[1,3].set_title("Source - Reference")
before_str = 'Before\nmed: %0.2f\nnmad: %0.2f' % (diff_orig_stats['med'], diff_orig_stats['nmad'])
axa[1,3].text(0.05, 0.95, before_str, va='top', color='g', transform=axa[1,3].transAxes, fontsize=8)
after_str = 'After\nmed: %0.2f\nnmad: %0.2f' % (diff_align_stats['med'], diff_align_stats['nmad'])
axa[1,3].text(0.65, 0.95, after_str, va='top', color='b', transform=axa[1,3].transAxes, fontsize=8)
#This is empty
axa[0,3].axis('off')
suptitle = '%s\nx: %+0.2fm, y: %+0.2fm, z: %+0.2fm' % (os.path.split(outprefix)[-1], dx_total, dy_total, dz_total)
f.suptitle(suptitle)
f.tight_layout()
plt.subplots_adjust(top=0.90)
fig_fn = outprefix + '%s_align.png' % xyz_shift_str_cum_fn
print("Writing out figure: %s" % fig_fn)
f.savefig(fig_fn, dpi=300)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
import argparse
import contextlib
import gzip
import os
import sys
import math
import random
from os.path import join
from functools import reduce
import logging
from gensim.models import Word2Vec
import numpy as np
sys.path.append('.')
from discoutils.misc import mkdirs_if_not_exists, is_gzipped
from discoutils.tokens import DocumentFeature
from discoutils.thesaurus_loader import Vectors
from joblib import Parallel, delayed
from eval.pipeline.tokenizers import pos_coarsification_map
from builder.composers.vectorstore import (AdditiveComposer, MultiplicativeComposer,
LeftmostWordComposer, RightmostWordComposer,
VerbComposer, compose_and_write_vectors, default_row_filter_nopos,
default_row_filter)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# unigram extraction parameters
MIN_COUNT = 50
WORKERS = 10
# composition parameters
composer_algos = [AdditiveComposer, MultiplicativeComposer, LeftmostWordComposer,
RightmostWordComposer, VerbComposer]
class MySentences(object):
def __init__(self, dirname, file_percentage, repeat_num=0, remove_pos=False):
self.dirname = dirname
self.limit = file_percentage / 100
self.remove_pos = remove_pos
files = [x for x in sorted(os.listdir(self.dirname)) if not x.startswith('.')]
count = math.ceil(self.limit * len(files))
if repeat_num == 0:
# always take the same files for the first repetition so we can plot a learning curve that shows the
# effect of adding a bit of extra data, e.g. going from 50% to 60% of corpus.
self.files = files[:count]
else:
# the other repetitions are over random samples, to quantify the effect of the sample, not its size
random.seed(repeat_num)
self.files = random.sample(files, count)
logging.info('Will use the following %d files for training\n %s', len(self.files), self.files)
def __iter__(self):
for fname in self.files:
filename = join(self.dirname, fname)
infile = gzip.open(filename) if is_gzipped(filename) else open(filename)
with contextlib.closing(infile):
for line in infile:
# yield gensim.utils.tokenize(line, lower=True)
if isinstance(line, bytes):
line = line.decode()
res = [DocumentFeature.smart_lower(w) for w in line.split() if
DocumentFeature.from_string(w).type != 'EMPTY']
if len(res) > 8:
# ignore short sentences, they are probably noise
if self.remove_pos:
yield [x.split('/')[0] for x in res]
else:
yield res
def _train_model(percent, data_dir, repeat_num, remove_pos):
# train a word2vec model
logging.info('Training word2vec on %d percent of %s', percent, data_dir)
sentences = MySentences(data_dir, percent, repeat_num=repeat_num, remove_pos=remove_pos)
model = Word2Vec(sentences, workers=WORKERS, min_count=MIN_COUNT, seed=repeat_num)
return model
def write_gensim_vectors_to_tsv(model, output_path, vocab=None):
# get word2vec vectors for each word, write to TSV
if not vocab:
vocab = model.vocab.keys()
vectors = dict()
dims = len(model[next(iter(vocab))]) # vector dimensionality
dimension_names = ['f%02d' % i for i in range(dims)]
for word in vocab:
# watch for non-DocumentFeatures, these break to_tsv
# also ignore words with non-ascii characters
# if DocumentFeature.from_string(word).type == 'EMPTY': # todo assumes there is a PoS tag
# logging.info('Ignoring vector for %s', word)
# continue
vectors[word] = zip(dimension_names, model[word])
vectors = Vectors(vectors)
vectors.to_tsv(output_path, gzipped=True,
enforce_word_entry_pos_format=True,
entry_filter=lambda _: True,
dense_hd5=True)
del model
return vectors
def reformat_data(conll_data_dir, text_only_data_dir, remove_pos):
"""
Data formatting
=========
`word2vec` produces vectors for words, such as `computer`, whereas the rest of my experiments assume there are
augmented with a PoS tag, e.g. `computer/N`. To get around that, start with a directory of conll-formatted
files such as
```
1 Anarchism Anarchism NNP MISC 5 nsubj
2 is be VBZ O 5 cop
3 a a DT O 5 det
4 political political JJ O 5 amod
5 philosophy philosophy NN O 0 root
```
and convert them to pos-augmented format (using coarse tags like Petrov's):
```
Anarchism/N is/V a/DET ....
```
:param conll_data_dir: input directory in CONLL format
:param text_only_data_dir: output directory
"""
mkdirs_if_not_exists(text_only_data_dir)
Parallel(n_jobs=5)(delayed(_reformat_single_file)(conll_data_dir, filename, text_only_data_dir, remove_pos)
for filename in os.listdir(conll_data_dir))
def _reformat_single_file(conll_data_dir, filename, text_only_data_dir, remove_pos):
outfile_name = join(text_only_data_dir, filename + '.txt')
logging.info('Reformatting %s to %s', filename, outfile_name)
with open(join(conll_data_dir, filename)) as infile, open(outfile_name, 'w') as outfile:
for i, line in enumerate(infile):
if not line.strip(): # conll empty line = sentence boundary
outfile.write('\n')
continue
try:
idx, word, lemma, pos, *rest = line.strip().split('\t')
except ValueError:
# some words in david's data are missing a PoS tag
logging.warning('ignoring messed up token in', filename)
if remove_pos:
outfile.write(lemma.lower() + ' ')
else:
outfile.write('%s/%s ' % (lemma.lower(), pos_coarsification_map[pos]))
def compute_and_write_vectors(corpus_name, stages, percent, repeat, remove_pos):
prefix = os.path.abspath(os.path.join(__file__, '..', '..'))
output_dir = join(prefix, 'outputs', 'word2vec')
mkdirs_if_not_exists(output_dir)
# inputs
conll_data_dir = join(prefix, 'data/%s-conll' % corpus_name)
# outputs
if remove_pos:
text_only_data_dir = join(prefix, 'data/%s-nopos' % corpus_name)
unigram_events_file = join(output_dir, '%s-nopos-%dperc.unigr.strings'%(corpus_name, percent))
else:
text_only_data_dir = join(prefix, 'data/%s' % corpus_name)
unigram_events_file = join(output_dir, '%s-%dperc.unigr.strings'%(corpus_name, percent))
if percent > 90 and repeat > 1:
raise ValueError('Repeating with a different sample of corpus only makes sense when '
'the samples are sufficiently distinct. This requires that the sample'
' size is fairly small to minimise overlap between samples')
if 'reformat' in stages:
reformat_data(conll_data_dir, text_only_data_dir, remove_pos)
if 'vectors' in stages:
models = [_train_model(percent, text_only_data_dir, i, remove_pos) for i in range(repeat)]
vectors = []
# write the output of each run separately
for i in range(repeat):
output_path = unigram_events_file + '.rep%d' % i
vectors.append(write_gensim_vectors_to_tsv(models[i], output_path))
if 'average' in stages and repeat > 1:
# average vectors and append to list to be written
shared_vocab = set.intersection(*[set(model.vocab.keys()) for model in models])
output_path = unigram_events_file + '.avg%d' % repeat
model = {}
for k in shared_vocab:
model[k] = reduce(np.add, [m[k] for m in models])
vectors.append(write_gensim_vectors_to_tsv(model, output_path, vocab=shared_vocab))
else:
# let's just pretend something was written above. just need this so the loop below will run
vectors = [None] * repeat + ([None] if 'average' in stages and repeat > 1 else [])
if 'compose' in stages:
for i, v in enumerate(vectors):
# if we'll also be composing we don't have to write the unigram vectors to disk
# just to read them back later.
if 'average' in stages and i == (len(vectors) - 1) and len(vectors) > 1:
# last set of vectors in the list, these are the averages ones
out_path = 'word2vec-%s_%dpercent-avg%d' % (corpus_name, percent, repeat)
input_thing = v if 'vectors' in stages else unigram_events_file + '.avg%d' % repeat
else:
out_path = 'word2vec-%s_%dpercent-rep%d' % (corpus_name, percent, i)
input_thing = v if 'vectors' in stages else unigram_events_file + '.rep%d' % i
row_filter = default_row_filter_nopos if remove_pos else default_row_filter
compose_and_write_vectors(input_thing,
out_path,
composer_algos,
output_dir=output_dir,
row_filter=row_filter,
remove_pos=remove_pos,
dense_hd5=True)
def get_args_from_cmd_line():
parser = argparse.ArgumentParser()
parser.add_argument('--stages', choices=('reformat', 'vectors', 'average', 'compose'),
required=True, nargs='+')
parser.add_argument('--corpus', choices=('gigaw', 'wiki'), required=True)
# percent of files to use. SGE makes it easy for this to be 1, 2, ...
parser.add_argument('--percent', default=100, type=int)
# multiplier for args.percent. Set to 0.1 to use fractional percentages of corpus
parser.add_argument('--repeat', default=1, type=int)
parser.add_argument('--remove-pos', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = get_args_from_cmd_line()
logging.info('Params are: %r', args)
compute_and_write_vectors(args.corpus, args.stages, args.percent, args.repeat, args.remove_pos)
|
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Training script for causal model for Adult dataset, using PSCF."""
import functools
import time
from typing import Any, List, Mapping, NamedTuple, Sequence
from absl import app
from absl import flags
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
from ml_collections.config_flags import config_flags
import numpy as np
import optax
import pandas as pd
from sklearn import metrics
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_probability.substrates import jax as tfp
from counterfactual_fairness import adult
from counterfactual_fairness import causal_network
from counterfactual_fairness import utils
from counterfactual_fairness import variational
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
'config', 'adult_pscf_config.py', 'Training configuration.')
LOG_EVERY = 100
# These are all aliases to callables which will return instances of
# particular distribution modules, or a Node itself. This is used to make
# subsequent code more legible.
Node = causal_network.Node
Gaussian = causal_network.Gaussian
MLPMultinomial = causal_network.MLPMultinomial
def build_input(train_data: pd.DataFrame, batch_size: int,
training_steps: int, shuffle_size: int = 10000):
"""See base class."""
num_epochs = (training_steps // batch_size) + 1
ds = utils.get_dataset(train_data, batch_size, shuffle_size,
num_epochs=num_epochs)
ds = ds.prefetch(tf.data.AUTOTUNE)
return iter(tfds.as_numpy(ds))
class CausalNetOutput(NamedTuple):
q_hidden_obs: Sequence[tfp.distributions.Distribution]
p_hidden: Sequence[tfp.distributions.Distribution]
hidden_samples: Sequence[jnp.ndarray]
log_p_obs_hidden: jnp.ndarray
is_male: jnp.ndarray # indicates which elements of the batch correspond to
# male individuals
def build_causal_graph(train_data: pd.DataFrame, column_names: List[str],
inputs: jnp.ndarray):
"""Build the causal graph of the model."""
make_multinomial = functools.partial(
causal_network.MLPMultinomial.from_frame, hidden_shape=(100,))
make_gaussian = functools.partial(
causal_network.Gaussian, hidden_shape=(100,))
# Construct the graphical model. Each random variable is represented by an
# instance of the `Node` class, as discussed in that class's docstring.
# The following nodes have no parents, and thus the distribution modules
# will not be conditional on anything -- they simply represent priors.
node_a = Node(MLPMultinomial.from_frame(train_data, 'sex'))
node_c1 = Node(MLPMultinomial.from_frame(train_data, 'native-country'))
node_c2 = Node(Gaussian('age', column_names.index('age')))
# These are all hidden nodes, that do not correspond to any actual data in
# pandas dataframe loaded previously. We therefore are permitted to control
# the dimensionality of these nodes as we wish (with the `dim` argument).
# The distribution module here should be interpreted as saying that we are
# imposing a multi-modal prior (a mixture of Gaussians) on each latent
# variable.
node_hm = Node(causal_network.GaussianMixture('hm', 10, dim=2), hidden=True)
node_hl = Node(causal_network.GaussianMixture('hl', 10, dim=2), hidden=True)
node_hr1 = Node(
causal_network.GaussianMixture('hr1', 10, dim=2), hidden=True)
node_hr2 = Node(
causal_network.GaussianMixture('hr2', 10, dim=2), hidden=True)
node_hr3 = Node(
causal_network.GaussianMixture('hr3', 10, dim=2), hidden=True)
# The rest of the graph is now constructed; the order of construction is
# important, so we can inform each node of its parents.
# Note that in the paper we simply have one node called "R", but here it is
# separated into three separate `Node` instances. This is necessary since
# each node can only represent a single quantity in the dataframe.
node_m = Node(
make_multinomial(train_data, 'marital-status'),
[node_a, node_hm, node_c1, node_c2])
node_l = Node(
make_gaussian('education-num', column_names.index('education-num')),
[node_a, node_hl, node_c1, node_c2, node_m])
node_r1 = Node(
make_multinomial(train_data, 'occupation'),
[node_a, node_c1, node_c2, node_m, node_l])
node_r2 = Node(
make_gaussian('hours-per-week', column_names.index('hours-per-week')),
[node_a, node_c1, node_c2, node_m, node_l])
node_r3 = Node(
make_multinomial(train_data, 'workclass'),
[node_a, node_c1, node_c2, node_m, node_l])
node_y = Node(
MLPMultinomial.from_frame(train_data, 'income'),
[node_a, node_c1, node_c2, node_m, node_l, node_r1, node_r2, node_r3])
# We now construct several (self-explanatory) collections of nodes. These
# will be used at various points later in the code, and serve to provide
# greater semantic interpretability.
observable_nodes = (node_a, node_c1, node_c2, node_l, node_m, node_r1,
node_r2, node_r3, node_y)
# The nodes on which each latent variable is conditionally dependent.
# Note that Y is not in this list, since all of its dependencies are
# included below, and further it does not depend directly on Hm.
nodes_on_which_hm_depends = (node_a, node_c1, node_c2, node_m)
nodes_on_which_hl_depends = (node_a, node_c1, node_c2, node_m, node_l)
nodes_on_which_hr1_depends = (node_a, node_c1, node_c2, node_m, node_l,
node_r1)
nodes_on_which_hr2_depends = (node_a, node_c1, node_c2, node_m, node_l,
node_r2)
nodes_on_which_hr3_depends = (node_a, node_c1, node_c2, node_m, node_l,
node_r3)
hidden_nodes = (node_hm, node_hl, node_hr1, node_hr2, node_hr3)
# Function to create the distribution needed for variational inference. This
# is the same for each latent variable.
def make_q_x_obs_module(node):
"""Make a Variational module for the given hidden variable."""
assert node.hidden
return variational.Variational(
common_layer_sizes=(20, 20), output_dim=node.dim)
# For each latent variable, we first construct a Haiku module (using the
# function above), and then connect it to the graph using the node's
# value. As described in more detail in the documentation for `Node`,
# these values represent actual observed data. Therefore we will later
# be connecting these same modules to the graph in different ways in order
# to perform fair inference.
q_hm_obs_module = make_q_x_obs_module(node_hm)
q_hl_obs_module = make_q_x_obs_module(node_hl)
q_hr1_obs_module = make_q_x_obs_module(node_hr1)
q_hr2_obs_module = make_q_x_obs_module(node_hr2)
q_hr3_obs_module = make_q_x_obs_module(node_hr3)
causal_network.populate(observable_nodes, inputs)
q_hm_obs = q_hm_obs_module(
*(node.observed_value for node in nodes_on_which_hm_depends))
q_hl_obs = q_hl_obs_module(
*(node.observed_value for node in nodes_on_which_hl_depends))
q_hr1_obs = q_hr1_obs_module(
*(node.observed_value for node in nodes_on_which_hr1_depends))
q_hr2_obs = q_hr2_obs_module(
*(node.observed_value for node in nodes_on_which_hr2_depends))
q_hr3_obs = q_hr3_obs_module(
*(node.observed_value for node in nodes_on_which_hr3_depends))
q_hidden_obs = (q_hm_obs, q_hl_obs, q_hr1_obs, q_hr2_obs, q_hr3_obs)
return observable_nodes, hidden_nodes, q_hidden_obs
def build_forward_fn(train_data: pd.DataFrame, column_names: List[str],
likelihood_multiplier: float):
"""Create the model's forward pass."""
def forward_fn(inputs: jnp.ndarray) -> CausalNetOutput:
"""Forward pass."""
observable_nodes, hidden_nodes, q_hidden = build_causal_graph(
train_data, column_names, inputs)
(node_hm, node_hl, node_hr1, node_hr2, node_hr3) = hidden_nodes
(node_a, _, _, _, _, _, _, _, node_y) = observable_nodes
# Log-likelihood function.
def log_p_obs_h(hm_value, hl_value, hr1_value, hr2_value, hr3_value):
"""Compute log P(A, C, M, L, R, Y | H)."""
# In order to create distributions like P(M | H_m, A, C), we need
# the value of H_m that we've been provided as an argument, rather than
# the value stored on H_m (which, in fact, will never be populated
# since H_m is unobserved).
# For compactness, we first construct the complete list of replacements.
node_to_replacement = {
node_hm: hm_value,
node_hl: hl_value,
node_hr1: hr1_value,
node_hr2: hr2_value,
node_hr3: hr3_value,
}
def log_prob_for_node(node):
"""Given a node, compute it's log probability for the given latents."""
log_prob = jnp.squeeze(
node.make_distribution(node_to_replacement).log_prob(
node.observed_value))
return log_prob
# We apply the likelihood multiplier to all likelihood terms except that
# for Y, the target. This is then added on separately in the line below.
sum_no_y = likelihood_multiplier * sum(
log_prob_for_node(node)
for node in observable_nodes
if node is not node_y)
return sum_no_y + log_prob_for_node(node_y)
q_hidden_obs = tuple(q_hidden)
p_hidden = tuple(node.distribution for node in hidden_nodes)
rnd_key = hk.next_rng_key()
hidden_samples = tuple(
q_hidden.sample(seed=rnd_key) for q_hidden in q_hidden_obs)
log_p_obs_hidden = log_p_obs_h(*hidden_samples)
# We need to split our batch of data into male and female parts.
is_male = jnp.equal(node_a.observed_value[:, 1], 1)
return CausalNetOutput(
q_hidden_obs=q_hidden_obs,
p_hidden=p_hidden,
hidden_samples=hidden_samples,
log_p_obs_hidden=log_p_obs_hidden,
is_male=is_male)
def fair_inference_fn(inputs: jnp.ndarray, batch_size: int,
num_prediction_samples: int):
"""Get the fair and unfair predictions for the given input."""
observable_nodes, hidden_nodes, q_hidden_obs = build_causal_graph(
train_data, column_names, inputs)
(node_hm, node_hl, node_hr1, node_hr2, node_hr3) = hidden_nodes
(node_a, node_c1, node_c2, node_l, node_m, node_r1, node_r2, node_r3,
node_y) = observable_nodes
(q_hm_obs, q_hl_obs, q_hr1_obs, q_hr2_obs, q_hr3_obs) = q_hidden_obs
rnd_key = hk.next_rng_key()
# *** FAIR INFERENCE ***
# To predict Y in a fair sense:
# * Infer Hm given observations.
# * Infer M using inferred Hm, baseline A, real C
# * Infer L using inferred Hl, M, real A, C
# * Infer Y using inferred M, baseline A, real C
# This is done by numerical integration, i.e. draw samples from
# p_fair(Y | A, C, M, L).
a_all_male = jnp.concatenate(
(jnp.zeros((batch_size, 1)), jnp.ones((batch_size, 1))),
axis=1)
# Here we take a num_samples per observation. This results to
# an array of shape:
# (num_samples, batch_size, hm_dim).
# However, forward pass is easier by reshaping to:
# (num_samples * batch_size, hm_dim).
hm_dim = 2
def expanded_sample(distribution):
return distribution.sample(
num_prediction_samples, seed=rnd_key).reshape(
(batch_size * num_prediction_samples, hm_dim))
hm_pred_sample = expanded_sample(q_hm_obs)
hl_pred_sample = expanded_sample(q_hl_obs)
hr1_pred_sample = expanded_sample(q_hr1_obs)
hr2_pred_sample = expanded_sample(q_hr2_obs)
hr3_pred_sample = expanded_sample(q_hr3_obs)
# The values of the observed nodes need to be tiled to match the dims
# of the above hidden samples. The `expand` function achieves this.
def expand(observed_value):
return jnp.tile(observed_value, (num_prediction_samples, 1))
expanded_a = expand(node_a.observed_value)
expanded_a_baseline = expand(a_all_male)
expanded_c1 = expand(node_c1.observed_value)
expanded_c2 = expand(node_c2.observed_value)
# For M, and all subsequent variables, we only generate one sample. This
# is because we already have *many* samples from the latent variables, and
# all we require is an independent sample from the distribution.
m_pred_sample = node_m.make_distribution({
node_a: expanded_a_baseline,
node_hm: hm_pred_sample,
node_c1: expanded_c1,
node_c2: expanded_c2}).sample(seed=rnd_key)
l_pred_sample = node_l.make_distribution({
node_a: expanded_a,
node_hl: hl_pred_sample,
node_c1: expanded_c1,
node_c2: expanded_c2,
node_m: m_pred_sample}).sample(seed=rnd_key)
r1_pred_sample = node_r1.make_distribution({
node_a: expanded_a,
node_hr1: hr1_pred_sample,
node_c1: expanded_c1,
node_c2: expanded_c2,
node_m: m_pred_sample,
node_l: l_pred_sample}).sample(seed=rnd_key)
r2_pred_sample = node_r2.make_distribution({
node_a: expanded_a,
node_hr2: hr2_pred_sample,
node_c1: expanded_c1,
node_c2: expanded_c2,
node_m: m_pred_sample,
node_l: l_pred_sample}).sample(seed=rnd_key)
r3_pred_sample = node_r3.make_distribution({
node_a: expanded_a,
node_hr3: hr3_pred_sample,
node_c1: expanded_c1,
node_c2: expanded_c2,
node_m: m_pred_sample,
node_l: l_pred_sample}).sample(seed=rnd_key)
# Finally, we sample from the distribution for Y. Like above, we only
# draw one sample per element in the array.
y_pred_sample = node_y.make_distribution({
node_a: expanded_a_baseline,
# node_a: expanded_a,
node_c1: expanded_c1,
node_c2: expanded_c2,
node_m: m_pred_sample,
node_l: l_pred_sample,
node_r1: r1_pred_sample,
node_r2: r2_pred_sample,
node_r3: r3_pred_sample}).sample(seed=rnd_key)
# Reshape back to (num_samples, batch_size, y_dim), undoing the expanding
# operation used for sampling.
y_pred_sample = y_pred_sample.reshape(
(num_prediction_samples, batch_size, -1))
# Now form an array of shape (batch_size, y_dim) by taking an expectation
# over the sample dimension. This represents the probability that the
# result is in each class.
y_pred_expectation = jnp.mean(y_pred_sample, axis=0)
# Find out the predicted y, for later use in a confusion matrix.
predicted_class_y_fair = utils.multinomial_class(y_pred_expectation)
# *** NAIVE INFERENCE ***
predicted_class_y_unfair = utils.multinomial_class(node_y.distribution)
return predicted_class_y_fair, predicted_class_y_unfair
return forward_fn, fair_inference_fn
def _loss_fn(
forward_fn,
beta: float,
mmd_sample_size: int,
constraint_multiplier: float,
constraint_ratio: float,
params: hk.Params,
rng: jnp.ndarray,
inputs: jnp.ndarray,
) -> jnp.ndarray:
"""Loss function definition."""
outputs = forward_fn(params, rng, inputs)
loss = _loss_klqp(outputs, beta)
# if (constraint_ratio * constraint_multiplier) > 0:
constraint_loss = 0.
# Create constraint penalty and add to overall loss term.
for distribution in outputs.q_hidden_obs:
constraint_loss += (constraint_ratio * constraint_multiplier *
utils.mmd_loss(distribution,
outputs.is_male,
mmd_sample_size,
rng))
# Optimisation - don't do the computation if the multiplier is set to zero.
loss += constraint_loss
return loss
def _evaluate(
fair_inference_fn,
params: hk.Params,
rng: jnp.ndarray,
inputs: jnp.ndarray,
batch_size: int,
num_prediction_samples: int,
):
"""Perform evaluation of fair inference."""
output = fair_inference_fn(params, rng, inputs,
batch_size, num_prediction_samples)
return output
def _loss_klqp(outputs: CausalNetOutput, beta: float) -> jnp.ndarray:
"""Compute the loss on data wrt params."""
expected_log_q_hidden_obs = sum(
jnp.sum(q_hidden_obs.log_prob(hidden_sample), axis=1) for q_hidden_obs,
hidden_sample in zip(outputs.q_hidden_obs, outputs.hidden_samples))
assert expected_log_q_hidden_obs.ndim == 1
# For log probabilities computed from distributions, we need to sum along
# the last axis, which takes the product of distributions for
# multi-dimensional hidden variables.
log_p_hidden = sum(
jnp.sum(p_hidden.log_prob(hidden_sample), axis=1) for p_hidden,
hidden_sample in zip(outputs.p_hidden, outputs.hidden_samples))
assert outputs.log_p_obs_hidden.ndim == 1
kl_divergence = (
beta * (expected_log_q_hidden_obs - log_p_hidden) -
outputs.log_p_obs_hidden)
return jnp.mean(kl_divergence)
class Updater:
"""A stateless abstraction around an init_fn/update_fn pair.
This extracts some common boilerplate from the training loop.
"""
def __init__(self, net_init, loss_fn, eval_fn,
optimizer: optax.GradientTransformation,
constraint_turn_on_step):
self._net_init = net_init
self._loss_fn = loss_fn
self._eval_fn = eval_fn
self._opt = optimizer
self._constraint_turn_on_step = constraint_turn_on_step
@functools.partial(jax.jit, static_argnums=0)
def init(self, init_rng, data):
"""Initializes state of the updater."""
params = self._net_init(init_rng, data)
opt_state = self._opt.init(params)
out = dict(
step=np.array(0),
rng=init_rng,
opt_state=opt_state,
params=params,
)
return out
@functools.partial(jax.jit, static_argnums=0)
def update(self, state: Mapping[str, Any], data: jnp.ndarray):
"""Updates the state using some data and returns metrics."""
rng = state['rng']
params = state['params']
constraint_ratio = (state['step'] > self._constraint_turn_on_step).astype(
float)
loss, g = jax.value_and_grad(self._loss_fn, argnums=1)(
constraint_ratio, params, rng, data)
updates, opt_state = self._opt.update(g, state['opt_state'])
params = optax.apply_updates(params, updates)
new_state = {
'step': state['step'] + 1,
'rng': rng,
'opt_state': opt_state,
'params': params,
}
new_metrics = {
'step': state['step'],
'loss': loss,
}
return new_state, new_metrics
@functools.partial(jax.jit, static_argnums=(0, 3, 4))
def evaluate(self, state: Mapping[str, Any], inputs: jnp.ndarray,
batch_size: int, num_prediction_samples: int):
"""Evaluate fair inference."""
rng = state['rng']
params = state['params']
fair_pred, unfair_pred = self._eval_fn(params, rng, inputs, batch_size,
num_prediction_samples)
return fair_pred, unfair_pred
def main(_):
flags_config = FLAGS.config
# Create the dataset.
train_data, test_data = adult.read_all_data(FLAGS.dataset_dir)
column_names = list(train_data.columns)
train_input = build_input(train_data, flags_config.batch_size,
flags_config.num_steps)
# Set up the model, loss, and updater.
forward_fn, fair_inference_fn = build_forward_fn(
train_data, column_names, flags_config.likelihood_multiplier)
forward_fn = hk.transform(forward_fn)
fair_inference_fn = hk.transform(fair_inference_fn)
loss_fn = functools.partial(_loss_fn, forward_fn.apply,
flags_config.beta,
flags_config.mmd_sample_size,
flags_config.constraint_multiplier)
eval_fn = functools.partial(_evaluate, fair_inference_fn.apply)
optimizer = optax.adam(flags_config.learning_rate)
updater = Updater(forward_fn.init, loss_fn, eval_fn,
optimizer, flags_config.constraint_turn_on_step)
# Initialize parameters.
logging.info('Initializing parameters...')
rng = jax.random.PRNGKey(42)
train_data = next(train_input)
state = updater.init(rng, train_data)
# Training loop.
logging.info('Starting train loop...')
prev_time = time.time()
for step in range(flags_config.num_steps):
train_data = next(train_input)
state, stats = updater.update(state, train_data)
if step % LOG_EVERY == 0:
steps_per_sec = LOG_EVERY / (time.time() - prev_time)
prev_time = time.time()
stats.update({'steps_per_sec': steps_per_sec})
logging.info({k: float(v) for k, v in stats.items()})
# Evaluate.
logging.info('Starting evaluation...')
test_input = build_input(test_data, flags_config.batch_size,
training_steps=0,
shuffle_size=0)
predicted_test_y = []
corrected_test_y = []
while True:
try:
eval_data = next(test_input)
# Now run the fair prediction; this projects the input to the latent space
# and then performs sampling.
predicted_class_y_fair, predicted_class_y_unfair = updater.evaluate(
state, eval_data, flags_config.batch_size,
flags_config.num_prediction_samples)
predicted_test_y.append(predicted_class_y_unfair)
corrected_test_y.append(predicted_class_y_fair)
# logging.info('Completed evaluation step %d', step)
except StopIteration:
logging.info('Finished evaluation')
break
# Join together the predictions from each batch.
test_y = np.concatenate(predicted_test_y, axis=0)
tweaked_test_y = np.concatenate(corrected_test_y, axis=0)
# Note the true values for computing accuracy and confusion matrices.
y_true = test_data['income'].cat.codes
# Make sure y_true is the same size
y_true = y_true[:len(test_y)]
test_accuracy = metrics.accuracy_score(y_true, test_y)
tweaked_test_accuracy = metrics.accuracy_score(
y_true, tweaked_test_y)
# Print out accuracy and confusion matrices.
logging.info('Accuracy (full model): %f', test_accuracy)
logging.info('Confusion matrix:')
logging.info(metrics.confusion_matrix(y_true, test_y))
logging.info('')
logging.info('Accuracy (tweaked with baseline: Male): %f',
tweaked_test_accuracy)
logging.info('Confusion matrix:')
logging.info(metrics.confusion_matrix(y_true, tweaked_test_y))
if __name__ == '__main__':
app.run(main)
|
|
"""
FlexGet build and development utilities - unfortunately this file is somewhat messy
"""
from __future__ import print_function
import os
import sys
from paver.easy import *
import paver.virtual
import paver.setuputils
from paver.setuputils import setup, find_package_data, find_packages
sphinxcontrib = False
try:
from sphinxcontrib import paverutils
sphinxcontrib = True
except ImportError:
pass
sys.path.insert(0, '')
options = environment.options
# There is a bug in sqlalchemy 0.9.0, see gh#127
# There is a bug in beautifulsoup 4.2.0 that breaks imdb parsing, see http://flexget.com/ticket/2091
# There is a bug in requests 2.4.0 where it leaks urllib3 exceptions
install_requires = [
'FeedParser>=5.1.3', 'SQLAlchemy >=0.7.5, !=0.9.0, <1.999', 'PyYAML',
'beautifulsoup4>=4.1, !=4.2.0, <4.4', 'html5lib>=0.11', 'PyRSS2Gen', 'pynzb', 'progressbar', 'rpyc',
'jinja2', 'requests>=1.0, !=2.4.0, <2.99', 'python-dateutil!=2.0, !=2.2', 'jsonschema>=2.0',
'python-tvrage', 'tmdb3', 'path.py', 'guessit>=0.9.3, <0.10.4', 'apscheduler',
'flask>=0.7', 'flask-restful>=0.3.3', 'ordereddict>=1.1', 'flask-restplus==0.7.2', 'cherrypy>=3.7.0',
'flask-assets>=0.11', 'cssmin>=0.2.0', 'flask-compress>=1.2.1', 'flask-login>=0.3.2'
]
if sys.version_info < (2, 7):
# argparse is part of the standard library in python 2.7+
install_requires.append('argparse')
entry_points = {'console_scripts': ['flexget = flexget:main']}
# Provide an alternate exe on windows which does not cause a pop-up when scheduled
if sys.platform.startswith('win'):
entry_points.setdefault('gui_scripts', []).append('flexget-headless = flexget:main')
with open("README.rst") as readme:
long_description = readme.read()
# Populates __version__ without importing the package
__version__ = None
execfile('flexget/_version.py')
if not __version__:
print('Could not find __version__ from flexget/_version.py')
sys.exit(1)
setup(
name='FlexGet',
version=__version__, # release task may edit this
description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '
'from different sources like RSS-feeds, html-pages, various sites and more.',
long_description=long_description,
author='Marko Koivusalo',
author_email='marko.koivusalo@gmail.com',
license='MIT',
url='http://flexget.com',
download_url='http://download.flexget.com',
install_requires=install_requires,
packages=find_packages(exclude=['tests']),
package_data=find_package_data('flexget', package='flexget',
exclude=['FlexGet.egg-info', '*.pyc'],
exclude_directories=['node_modules', 'bower_components'],
only_in_packages=False), # NOTE: the exclude does not seem to work
zip_safe=False,
test_suite='nose.collector',
extras_require={
'memusage': ['guppy'],
'NZB': ['pynzb'],
'TaskTray': ['pywin32'],
},
entry_points=entry_points,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
)
options(
minilib=Bunch(
# 'version' is included as workaround to https://github.com/paver/paver/issues/112, TODO: remove
extra_files=['virtual', 'svn', 'version']
),
virtualenv=Bunch(
paver_command_line='develop'
),
# sphinxcontrib.paverutils
sphinx=Bunch(
docroot='docs',
builddir='build',
builder='html',
confdir='docs'
),
)
def set_init_version(ver):
"""Replaces the version with ``ver`` in _version.py"""
import fileinput
for line in fileinput.FileInput('flexget/_version.py', inplace=1):
if line.startswith('__version__ = '):
line = "__version__ = '%s'\n" % ver
print(line, end='')
@task
def version():
"""Prints the version number of the source"""
print(__version__)
@task
@cmdopts([('dev', None, 'Bumps to new development version instead of release version.')])
def increment_version(options):
"""Increments either release or dev version by 1"""
print('current version: %s' % __version__)
ver_split = __version__.split('.')
dev = options.increment_version.get('dev')
if 'dev' in ver_split[-1]:
if dev:
# If this is already a development version, increment the dev count by 1
ver_split[-1] = 'dev%d' % (int(ver_split[-1].strip('dev') or 0) + 1)
else:
# Just strip off dev tag for next release version
ver_split = ver_split[:-1]
else:
# Increment the revision number by one
if len(ver_split) == 2:
# We don't have a revision number, assume 0
ver_split.append('1')
else:
ver_split[-1] = str(int(ver_split[-1]) + 1)
if dev:
ver_split.append('dev')
new_version = '.'.join(ver_split)
print('new version: %s' % new_version)
set_init_version(new_version)
@task
@cmdopts([
('online', None, 'Run online tests')
])
def test(options):
"""Run FlexGet unit tests"""
options.setdefault('test', Bunch())
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
args = []
# Adding the -v flag makes the tests fail in python 2.7
#args.append('-v')
args.append('--processes=4')
args.append('-x')
if not options.test.get('online'):
args.append('--attr=!online')
args.append('--where=tests')
# Store current path since --where changes it, restore when leaving
cwd = os.getcwd()
try:
return nose.run(argv=args, config=cfg)
finally:
os.chdir(cwd)
@task
def clean():
"""Cleans up the virtualenv"""
import os
import glob
for p in ('bin', 'Scripts', 'build', 'dist', 'include', 'lib', 'man',
'share', 'FlexGet.egg-info', 'paver-minilib.zip', 'setup.py'):
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
for pkg in set(options.setup.packages) | set(('tests',)):
for filename in glob.glob(pkg.replace('.', os.sep) + "/*.py[oc~]"):
path(filename).remove()
@task
@cmdopts([
('dist-dir=', 'd', 'directory to put final built distributions in'),
('revision=', 'r', 'minor revision number of this build')
])
def sdist(options):
"""Build tar.gz distribution package"""
print('sdist version: %s' % __version__)
# clean previous build
print('Cleaning build...')
for p in ['build']:
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
else:
print('Unable to remove %s' % pth)
# remove pre-compiled pycs from tests, I don't know why paver even tries to include them ...
# seems to happen only with sdist though
for pyc in path('tests/').files('*.pyc'):
pyc.remove()
for t in ['minilib', 'generate_setup', 'setuptools.command.sdist']:
call_task(t)
@task
def coverage():
"""Make coverage.flexget.com"""
# --with-coverage --cover-package=flexget --cover-html --cover-html-dir /var/www/flexget_coverage/
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
argv = ['bin/paver']
argv.extend(['--attr=!online'])
argv.append('--with-coverage')
argv.append('--cover-html')
argv.extend(['--cover-package', 'flexget'])
argv.extend(['--cover-html-dir', '/var/www/flexget_coverage/'])
nose.run(argv=argv, config=cfg)
print('Coverage generated')
@task
@cmdopts([
('docs-dir=', 'd', 'directory to put the documetation in')
])
def docs():
if not sphinxcontrib:
print('ERROR: requires sphinxcontrib-paverutils')
sys.exit(1)
from paver import tasks
if not os.path.exists('build'):
os.mkdir('build')
if not os.path.exists(os.path.join('build', 'sphinx')):
os.mkdir(os.path.join('build', 'sphinx'))
setup_section = tasks.environment.options.setdefault("sphinx", Bunch())
setup_section.update(outdir=options.docs.get('docs_dir', 'build/sphinx'))
call_task('sphinxcontrib.paverutils.html')
@task
@might_call('test', 'sdist')
@cmdopts([('no-tests', None, 'skips unit tests')])
def release(options):
"""Run tests then make an sdist if successful."""
if not options.release.get('no_tests'):
if not test():
print('Unit tests did not pass')
sys.exit(1)
print('Making src release')
sdist()
@task
def install_tools():
"""Install development / jenkins tools and dependencies"""
try:
import pip
except ImportError:
print('FATAL: Unable to import pip, please install it and run this again!')
sys.exit(1)
try:
import sphinxcontrib
print('sphinxcontrib INSTALLED')
except ImportError:
pip.main(['install', 'sphinxcontrib-paverutils'])
pip.main(['install', '-r', 'jenkins-requirements.txt'])
@task
def clean_compiled():
for root, dirs, files in os.walk('flexget'):
for name in files:
fqn = os.path.join(root, name)
if fqn[-3:] == 'pyc' or fqn[-3:] == 'pyo' or fqn[-5:] == 'cover':
print('Deleting %s' % fqn)
os.remove(fqn)
@task
@consume_args
def pep8(args):
try:
import pep8
except:
print('Run bin/paver install_tools')
sys.exit(1)
# Ignoring certain errors
ignore = [
'E711', 'E712', # These are comparisons to singletons i.e. == False, and == None. We need these for sqlalchemy.
'W291', 'W293', 'E261',
'E128' # E128 continuation line under-indented for visual indent
]
styleguide = pep8.StyleGuide(show_source=True, ignore=ignore, repeat=1, max_line_length=120,
parse_argv=args)
styleguide.input_dir('flexget')
@task
@cmdopts([
('file=', 'f', 'name of the requirements file to create')
])
def requirements(options):
filename = options.requirements.get('file', 'requirements.txt')
with open(filename, mode='w') as req_file:
req_file.write('\n'.join(options.install_requires))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
module for web framework
'''
import cgi
import datetime
import functools
import json
import logging
import mimetypes
import os
import re
import sys
import threading
import traceback
import types
import urllib
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from tool import SimpleDict, UTC
# response status and headers
_RE_RESPONSE_STATUS = re.compile(r'^\d\d\d(\ [\w\ ]+)?$')
_RESPONSE_STATUSES = {
# Informational
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
# Successful
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used',
# Redirection
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
# Client Error
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: "I'm a teapot",
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
# Server Error
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended',
}
_RESPONSE_HEADERS = (
'Accept-Ranges',
'Age',
'Allow',
'Cache-Control',
'Connection',
'Content-Encoding',
'Content-Language',
'Content-Length',
'Content-Location',
'Content-MD5',
'Content-Disposition',
'Content-Range',
'Content-Type',
'Date',
'ETag',
'Expires',
'Last-Modified',
'Link',
'Location',
'P3P',
'Pragma',
'Proxy-Authenticate',
'Refresh',
'Retry-After',
'Server',
'Set-Cookie',
'Strict-Transport-Security',
'Trailer',
'Transfer-Encoding',
'Vary',
'Via',
'Warning',
'WWW-Authenticate',
'X-Frame-Options',
'X-XSS-Protection',
'X-Content-Type-Options',
'X-Forwarded-Proto',
'X-Powered-By',
'X-UA-Compatible',
)
_RESPONSE_HEADER_DICT = dict(zip(map(lambda x: x.upper(), _RESPONSE_HEADERS), _RESPONSE_HEADERS))
_HEADER_X_POWERED_BY = ('X-Powered-By', 'transwarp/1.0')
# request error and response info
class HttpError(Exception):
'''
http base error
>>> e = HttpError(404)
>>> e.status
'404 Not Found'
'''
def __init__(self, code):
super(HttpError, self).__init__()
self.status = '%d %s' % (code, _RESPONSE_STATUSES[code])
def header(self, name, value):
if not hasattr(self, '_headers'):
self._headers = [_HEADER_X_POWERED_BY]
self._headers.append((name, value))
@property
def headers(self):
if hasattr(self, '_headers'):
return self._headers
return []
def __str__(self):
return self.status
__repr__ = __str__
def _default_error_handler(e, start_response, is_debug):
if isinstance(e, HttpError):
logging.info('[WEB] [HttpError: %s]' % e.status)
headers = e.headers
headers.append(('Content-Type', 'text/html'))
start_response(e.status, headers)
return ('<html><body><h1>%s</h1></body></html>' % e.status)
logging.exception('[WEB] [Exception:]')
start_response('500 Internal Server Error', [('Content-Type', 'text/html'), _HEADER_X_POWERED_BY])
if is_debug:
# not define
return _debug()
return ('<html><body><h1>500 Internal Server Error</h1><h3>%s</h3></body></html>' % str(e))
def badrequesterror():
'''
bad request error
>>> raise badrequesterror()
Traceback (most recent call last):
...
HttpError: 400 Bad Request
'''
return HttpError(400)
def unauthorizederror():
'''
unauthorized error
>>> raise unauthorizederror()
Traceback (most recent call last):
...
HttpError: 401 Unauthorized
'''
return HttpError(401)
def forbiddenerror():
'''
forbidden error
>>> raise forbiddenerror()
Traceback (most recent call last):
...
HttpError: 403 Forbidden
'''
return HttpError(403)
def notfounderror():
'''
not found error
>>> raise notfounderror()
Traceback (most recent call last):
...
HttpError: 404 Not Found
'''
return HttpError(404)
def conflicterror():
'''
conflict error
>>> raise conflicterror()
Traceback (most recent call last):
...
HttpError: 409 Conflict
'''
return HttpError(409)
def internalerror():
'''
internal error
>>> raise internalerror()
Traceback (most recent call last):
...
HttpError: 500 Internal Server Error
'''
return HttpError(500)
class RedirectError(HttpError):
'''
http redirect error
>>> e = RedirectError(302, 'http://www.apple.com/')
>>> e.status
'302 Found'
>>> e.location
'http://www.apple.com/'
'''
def __init__(self, code, location):
super(RedirectError, self).__init__(code)
self.location = location
def __str__(self):
return '%s, %s' % (self.status, self.location)
__repr__ = __str__
def redirecterror(location):
'''
permanent redirect error
>>> raise redirecterror('http://www.itranswarp.com/')
Traceback (most recent call last):
...
RedirectError: 301 Moved Permanently, http://www.itranswarp.com/
'''
return RedirectError(301, location)
def founderror(location):
'''
temporary redirect error
>>> raise founderror('http://www.itranswarp.com/')
Traceback (most recent call last):
...
RedirectError: 302 Found, http://www.itranswarp.com/
'''
return RedirectError(302, location)
def seeothererror(location):
'''
temporary redirect error
>>> raise seeothererror('http://www.itranswarp.com/')
Traceback (most recent call last):
...
RedirectError: 303 See Other, http://www.itranswarp.com/
>>> e = seeothererror('http://www.itranswarp.com/seeother?r=123')
>>> e.location
'http://www.itranswarp.com/seeother?r=123'
'''
return RedirectError(303, location)
# define request container
def favicon_handler():
return static_file_handler('/favicon.ico')
def _to_unicode(s, encoding='utf-8'):
'''
convert to unicode
>>> _to_unicode('\xe4\xb8\xad\xe6\x96\x87') == u'\u4e2d\u6587'
True
'''
return s.decode('utf-8')
def _unquote(s, encoding='utf-8'):
'''
url unquote as unicode
>>> _unquote('http%3A//example/test%3Fa%3D1+')
u'http://example/test?a=1+'
'''
return urllib.unquote(s).decode(encoding)
class MultipartFile(object):
'''
multipart file storage, get from request input
f = ctx.request['file']
f.filename # 'test.png'
f.file # file-like object
'''
def __init__(self, storage):
self.filename = _to_unicode(storage.filename)
self.file = storage.file
class Request(object):
'''
Request object, obtain http request information
'''
def __init__(self, environ):
self._environ = environ
@property
def environ(self):
'''
get request environ as dict
>>> r = Request({'REQUEST_METHOD': 'GET', 'wsgi.url_scheme':'http'})
>>> r.environ.get('REQUEST_METHOD')
'GET'
>>> r.environ.get('wsgi.url_scheme')
'http'
>>> r.environ.get('SERVER_NAME')
>>> r.environ.get('SERVER_NAME', 'unamed')
'unamed'
'''
return self._environ
def get_body(self):
'''
get raw data from HTTP POST and return as str
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('<xml><raw/>')})
>>> r.get_body()
'<xml><raw/>'
'''
fp = self._environ['wsgi.input']
return fp.read()
@property
def remote_addr(self):
'''
get remote addr from environ
>>> r = Request({'REMOTE_ADDR': '192.168.0.100'})
>>> r.remote_addr
'192.168.0.100'
'''
return self._environ.get('REMOTE_ADDR', '0.0.0.0')
@property
def document_root(self):
'''
get document_root from environ
>>> r = Request({'DOCUMENT_ROOT': '/srv/path/to/doc'})
>>> r.document_root
'/srv/path/to/doc'
'''
return self._environ.get('DOCUMENT_ROOT', '')
@property
def request_method(self):
'''
get method from environ
>>> r = Request({'REQUEST_METHOD': 'GET'})
>>> r.request_method
'GET'
>>> r = Request({'REQUEST_METHOD': 'POST'})
>>> r.request_method
'POST'
'''
return self._environ.get('REQUEST_METHOD')
@property
def host(self):
'''
get host from environ
>>> r = Request({'HTTP_HOST': 'localhost:8080'})
>>> r.host
'localhost:8080'
'''
return self._environ.get('HTTP_HOST', '')
@property
def path_info(self):
'''
get path from environ
>>> r = Request({'PATH_INFO': '/test/a%20b.html'})
>>> r.path_info
'/test/a b.html'
'''
return urllib.unquote(self._environ.get('PATH_INFO', ''))
@property
def query_string(self):
'''
get query string from environ
>>> r = Request({'QUERY_STRING': 'a=1&c=2'})
>>> r.query_string
'a=1&c=2'
>>> r = Request({})
>>> r.query_string
''
'''
return self._environ.get('QUERY_STRING', '')
def _get_headers(self):
if not hasattr(self, '_headers'):
hdrs = {}
for k ,v in self._environ.iteritems():
if k.startswith('HTTP_'):
# convert 'HTTP_ACCEPT_ENCODING' to 'ACCEPT-ENCODING'
hdrs[k[5:].replace('_', '-').upper()] = v.decode('utf-8')
self._headers = hdrs
return self._headers
def header(self, header, default=None):
'''
get header by key from request
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> r.header('User-Agent')
u'Mozilla/5.0'
>>> r.header('USER-AGENT')
u'Mozilla/5.0'
>>> r.header('Accept')
u'text/html'
>>> r.header('Test')
>>> r.header('Test', u'DEFAULT')
u'DEFAULT'
'''
return self._get_headers().get(header.upper(), default)
@property
def headers(self):
'''
get all HTTP headers from request
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> H = r.headers
>>> H['ACCEPT']
u'text/html'
>>> H['USER-AGENT']
u'Mozilla/5.0'
>>> L = H.items()
>>> L.sort()
>>> L
[('ACCEPT', u'text/html'), ('USER-AGENT', u'Mozilla/5.0')]
'''
return dict(**self._get_headers())
def _get_cookies(self):
if not hasattr(self, '_cookies'):
cookies = {}
cookie_str = self._environ.get('HTTP_COOKIE')
if cookie_str:
for c in cookie_str.split(';'):
pos = c.find('=')
if pos > 0:
cookies[c[:pos].strip()] = _unquote(c[pos + 1:])
self._cookies = cookies
return self._cookies
def cookie(self, name, default=None):
'''
get cookie by name
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookie('A')
u'123'
>>> r.cookie('url')
u'http://www.example.com/'
>>> r.cookie('test')
>>> r.cookie('test', u'DEFAULT')
u'DEFAULT'
'''
return self._get_cookies().get(name, default)
@property
def cookies(self):
'''
get all cookies
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookies['A']
u'123'
>>> r.cookies['url']
u'http://www.example.com/'
'''
return SimpleDict(**self._get_cookies())
def _parse_input(self):
def _convert(item):
if isinstance(item, list):
return [ _to_unicode(i.value) for i in item ]
if item.filename:
return MultipartFile(item)
return _to_unicode(item.value)
fs = cgi.FieldStorage(fp=self._environ['wsgi.input'], environ=self._environ, keep_blank_values=True)
inputs = dict()
for key in fs:
inputs[key] = _convert(fs[key])
return inputs
def _get_raw_input(self):
if not hasattr(self, '_raw_input'):
self._raw_input = self._parse_input()
return self._raw_input
def get(self, key, default=None):
'''
get value by key
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.get('a')
u'1'
>>> r.get('c')
u'ABC'
>>> r.get('empty')
>>> r.get('empty', 'DEFAULT')
'DEFAULT'
'''
r = self._get_raw_input().get(key, default)
if isinstance(r, list):
return r[0]
return r
def gets(self, key):
'''
get multiple values by key
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.gets('a')
[u'1']
>>> r.gets('c')
[u'ABC', u'XYZ']
>>> r.gets('empty')
Traceback (most recent call last):
...
KeyError: 'empty'
'''
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[:]
return [r]
def __getitem__(self, key):
'''
get input parameter value
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r['a']
u'1'
>>> r['c']
u'ABC'
>>> r['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> b = '----WebKitFormBoundaryQQ3J8kPsjFpTmqNz'
>>> pl = ['--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Scofield', '--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Lincoln', '--%s' % b, 'Content-Disposition: form-data; name=\\"file\\"; filename=\\"test.txt\\"', 'Content-Type: text/plain\\n', 'just a test', '--%s' % b, 'Content-Disposition: form-data; name=\\"id\\"\\n', '4008009001', '--%s--' % b, '']
>>> payload = '\\n'.join(pl)
>>> r = Request({'REQUEST_METHOD':'POST', 'CONTENT_LENGTH':str(len(payload)), 'CONTENT_TYPE':'multipart/form-data; boundary=%s' % b, 'wsgi.input':StringIO(payload)})
>>> r.get('name')
u'Scofield'
>>> r.gets('name')
[u'Scofield', u'Lincoln']
>>> r['name']
u'Scofield'
>>> f = r.get('file')
>>> f.filename
u'test.txt'
>>> f.file.read()
'just a test'
'''
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[0]
return r
def input(self, **kw):
'''
Get input as dict from request, fill dict using provided default value if key not exist.
i = ctx.request.input(role='guest')
i.role ==> 'guest'
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> i = r.input(x=2008)
>>> i.a
u'1'
>>> i.b
u'M M'
>>> i.c
u'ABC'
>>> i.x
2008
>>> i.get('d', u'100')
u'100'
>>> i.x
2008
'''
copy = SimpleDict(**kw)
raw = self._get_raw_input()
for k, v in raw.iteritems():
copy[k] = v[0] if isinstance(v, list) else v
return copy
# define response container
UTC_0 = UTC('+00:00')
def _to_str(s):
'''
convert to str
>>> _to_str('s123') == 's123'
True
>>> _to_str(u'\u4e2d\u6587') == '\xe4\xb8\xad\xe6\x96\x87'
True
>>> _to_str(-123) == '-123'
True
'''
if isinstance(s, str):
return s
if isinstance(s, unicode):
return s.encode('utf-8')
return str(s)
def _quote(s, encoding='utf-8'):
'''
url quote as str
>>> _quote('http://example/test?a=1+')
'http%3A//example/test%3Fa%3D1%2B'
>>> _quote(u'hello world!')
'hello%20world%21'
'''
if isinstance(s, unicode):
s = s.encode(encoding)
return urllib.quote(s)
class Response(object):
def __init__(self):
self._status = '200 OK'
self._headers = {'CONTENT-TYPE': 'text/html; charset=utf-8'}
@property
def status(self):
'''
get status
>>> r = Response()
>>> r.status
'200 OK'
'''
return self._status
@status.setter
def status(self, value):
'''
set status as int or str
>>> r = Response()
>>> r.status = 404
>>> r.status
'404 Not Found'
>>> r.status = '500 ERR'
>>> r.status
'500 ERR'
>>> r.status = u'403 Denied'
>>> r.status
'403 Denied'
>>> r.status = 99
Traceback (most recent call last):
...
ValueError: Bad response code: 99
>>> r.status = 'ok'
Traceback (most recent call last):
...
ValueError: Bad response code: ok
>>> r.status = [1, 2, 3]
Traceback (most recent call last):
...
TypeError: Bad type of response code.
'''
if isinstance(value, (int, long)):
if value >= 100 and value <= 999:
st = _RESPONSE_STATUSES.get(value, '')
if st:
self._status = '%d %s' % (value, st)
else:
self._status = str(value)
else:
raise ValueError('Bad response code: %d' % value)
elif isinstance(value, basestring):
if isinstance(value, unicode):
value = value.encode('utf-8')
if _RE_RESPONSE_STATUS.match(value):
self._status = value
else:
raise ValueError('Bad response code: %d' % value)
else:
raise TypeError('Bad type of response code.')
@property
def status_code(self):
'''
get status code
>>> r = Response()
>>> r.status_code
200
>>> r.status = 404
>>> r.status_code
404
>>> r.status = '500 Internal Error'
>>> r.status_code
500
'''
return int(self._status[:3])
def set_cookie(self, name, value, max_age=None, expires=None, path='/', domain=None, secure=False, http_only=True):
'''
set cookie by name and value
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.set_cookie('company', r'Example="Limited"', expires=1342274794.123, path='/sub/')
>>> r._cookies
{'company': 'company=Example%3D%22Limited%22; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/sub/; HttpOnly'}
>>> dt = datetime.datetime(2012, 7, 14, 22, 6, 34, tzinfo=UTC('+8:00'))
>>> r.set_cookie('company', 'Expires', expires=dt)
>>> r._cookies
{'company': 'company=Expires; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/; HttpOnly'}
'''
if not hasattr(self, '_cookies'):
self._cookies = {}
L = [ '%s=%s' % (_quote(name), _quote(value)) ]
if expires is not None:
if isinstance(expires, (float, int, long)):
L.append('Expires=%s' % datetime.datetime.fromtimestamp(expires, UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
if isinstance(expires, (datetime.date, datetime.datetime)):
L.append('Expires=%s' % expires.astimezone(UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
elif isinstance(max_age, (int, long)):
L.append('Max-Age=%d' % max_age)
L.append('Path=%s' % path)
if domain:
L.append('Domain=%s' % domain)
if secure:
L.append('Secure')
if http_only:
L.append('HttpOnly')
self._cookies[name] = '; '.join(L)
def delete_cookie(self, name):
'''
set cookie expires as 0 by name
'''
self.set_cookie(name, '__delete__', expires=0)
def unset_cookie(self, name):
'''
unsetcookie by name
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.unset_cookie('company')
>>> r._cookies
{}
'''
if hasattr(self, '_cookies'):
if name in self._cookies:
del self._cookies[name]
def header(self, name):
'''
get header by name
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.header('CONTENT-type')
'text/html; charset=utf-8'
>>> r.header('X-Powered-By')
'''
key = name.upper()
if not key in _RESPONSE_HEADER_DICT:
key = name
return self._headers.get(key)
def set_header(self, name, value):
'''
set header by name and value
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.set_header('CONTENT-type', 'image/png')
>>> r.header('content-TYPE')
'image/png'
'''
key = name.upper()
if not key in _RESPONSE_HEADER_DICT:
key = name
self._headers[key] = _to_str(value)
def unset_header(self, name):
'''
unset header by name
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.unset_header('CONTENT-type')
>>> r.header('content-type')
'''
key = name.upper()
if not key in _RESPONSE_HEADER_DICT:
key = name
if key in self._headers:
del self._headers[key]
@property
def headers(self):
'''
get all response headers and cookies
>>> r = Response()
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('X-Powered-By', 'transwarp/1.0')]
>>> r.set_cookie('s1', 'ok', 3600)
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('Set-Cookie', 's1=ok; Max-Age=3600; Path=/; HttpOnly'), ('X-Powered-By', 'transwarp/1.0')]
'''
L = [ (_RESPONSE_HEADER_DICT.get(k, k), v) for k, v in self._headers.iteritems() ]
if hasattr(self, '_cookies'):
for v in self._cookies.itervalues():
L.append(('Set-Cookie', v))
L.append(_HEADER_X_POWERED_BY)
return L
@property
def content_type(self):
'''
get content type from response header
>>> r = Response()
>>> r.content_type
'text/html; charset=utf-8'
>>> r.content_type = 'application/json'
>>> r.content_type
'application/json'
'''
return self.header('CONTENT-TYPE')
@content_type.setter
def content_type(self, value):
'''
set content type for response header
'''
if value:
self.set_header('CONTENT-TYPE', value)
else:
self.unset_header('CONTENT-TYPE')
@property
def content_length(self):
'''
get content length from response header
>>> r = Response()
>>> r.content_length
>>> r.content_length = 100
>>> r.content_length
'100'
'''
return self.header('CONTENT-LENGTH')
@content_length.setter
def content_length(self, value):
'''
set content length for response header
>>> r = Response()
>>> r.content_length = '1024'
>>> r.content_length
'1024'
>>> r.content_length = 1024 * 8
>>> r.content_length
'8192'
'''
self.set_header('CONTENT-LENGTH', str(value))
# define template
class TemplateEngine(object):
'''
base template engine
'''
def __call__(self, path, model):
return '<!-- override this method to render template -->'
class Jinja2TemplateEngine(TemplateEngine):
'''
render using Jinja2 template engine
>>> templ_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'templates')
>>> engine = Jinja2TemplateEngine(templ_path)
>>> engine.add_filter('datetime', lambda dt: dt.strftime('%Y-%m-%d %H:%M:%S'))
>>> engine('test_jinja2.html', dict(name='Michael', posted_at=datetime.datetime(2014, 6, 1, 10, 11, 12)))
'<p>Hello, Michael.</p><span>2014-06-01 10:11:12</span>'
'''
def __init__(self, templ_dir, **kw):
from jinja2 import Environment, FileSystemLoader
if not 'autoescape' in kw:
kw['autoescape'] = True
self._env = Environment(loader=FileSystemLoader(templ_dir), **kw)
def add_filter(self, name, fn_filter):
self._env.filters[name] = fn_filter
def __call__(self, path, model):
return self._env.get_template(path).render(**model).encode('utf-8')
class Template(object):
'''
Template object
>>> t = Template('hello.html', title='Hello', copyright='@2012')
>>> t.model['title']
'Hello'
>>> t.model['copyright']
'@2012'
>>> t = Template('test.html', abc=u'ABC', xyz=u'XYZ')
>>> t.model['abc']
u'ABC'
'''
def __init__(self, template_name, **kw):
self.template_name = template_name
self.model = dict(**kw)
def view(path):
'''
decorator for render Template
>>> @view('test/view.html')
... def hello():
... return dict(name='Bob')
>>> t = hello()
>>> isinstance(t, Template)
True
>>> t.template_name
'test/view.html'
>>> @view('test/view.html')
... def hello2():
... return ['a list']
>>> t = hello2()
Traceback (most recent call last):
...
ValueError: Expect return a dict when using @view() decorator.
'''
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kw):
r = func(*args, **kw)
if isinstance(r, dict):
logging.info('[WEB] [return Template]')
return Template(path, **r)
raise ValueError('Expect return a dict when using @view() decorator.')
return _wrapper
return _decorator
# define route
_RE_ROUTE = re.compile(r'(\:[a-zA-Z_]\w*)')
def _build_regex(path):
r'''
convert route path to regex
>>> _build_regex('/path/to')
'^\\/path\\/to$'
>>> _build_regex('/path/to/:file')
'^\\/path\\/to\\/(?P<file>[^\\/]+)$'
>>> _build_regex('/:user/:comments/list')
'^\\/(?P<user>[^\\/]+)\\/(?P<comments>[^\\/]+)\\/list$'
>>> _build_regex(':id-:pid/:w')
'^(?P<id>[^\\/]+)\\-(?P<pid>[^\\/]+)\\/(?P<w>[^\\/]+)$'
'''
re_list = ['^']
var_list = []
is_var = False
for v in _RE_ROUTE.split(path):
if is_var:
var_name = v[1:]
var_list.append(var_name)
re_list.append(r'(?P<%s>[^\/]+)' % var_name)
else:
s = ''
for ch in v:
if ch>='0' and ch<='9':
s = s + ch
elif ch>='A' and ch<='Z':
s = s + ch
elif ch>='a' and ch<='z':
s = s + ch
else:
s = s + '\\' + ch
re_list.append(s)
is_var = not is_var
re_list.append('$')
return ''.join(re_list)
class Route(object):
'''
Route object, callable
'''
def __init__(self, func):
self.path = func.__web_route__
self.method = func.__web_method__
self.is_static = _RE_ROUTE.search(self.path) is None
if not self.is_static:
self.route = re.compile(_build_regex(self.path))
self.func = func
def match(self, url):
m = self.route.match(url)
if m:
return m.groups()
return None
def __call__(self, *args):
return self.func(*args)
def __str__(self):
if self.is_static:
return 'Route(static,%s,path=%s)' % (self.method, self.path)
return 'Route(dynamic,%s,path=%s)' % (self.method, self.path)
__repr__ = __str__
def get(path):
'''
decorator for GET method
>>> @get('/test/:id')
... def test():
... return 'ok'
...
>>> test.__web_route__
'/test/:id'
>>> test.__web_method__
'GET'
>>> test()
'ok'
'''
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'GET'
return func
return _decorator
def post(path):
'''
decorator for POST method
>>> @post('/test/:id')
... def test():
... return 'ok'
...
>>> test.__web_route__
'/test/:id'
>>> test.__web_method__
'POST'
>>> test()
'ok'
'''
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'POST'
return func
return _decorator
def _generate_static_file(fpath):
BLOCK_SIZE = 8192
with open(fpath, 'rb') as f:
block = f.read(BLOCK_SIZE)
while block:
yield block
block = f.read(BLOCK_SIZE)
class StaticFileRoute(object):
def __init__(self):
self.method = 'GET'
self.route = re.compile('^/static/(.+)$')
self.is_static = True
def match(self, url):
if url.startswith('/static/'):
return (url[1:],)
return None
def __call__(self, *args):
fpath = os.path.join(ctx.application.document_root, args[0])
if not os.path.isfile(fpath):
raise notfounderror()
fext = os.path.splitext(fpath)[1]
ctx.response.content_type = mimetypes.types_map.get(fext.lower(), 'application/octet-stream')
return _generate_static_file(fpath)
# define RESTful API
class APIError(StandardError):
'''
base APIError
'''
def __init__(self, error, data='', message=''):
super(APIError, self).__init__(message)
self.error = error
self.data = data
self.message = message
class APIValueError(APIError):
def __init__(self, field, message=''):
super(APIValueError, self).__init__('value:invalid', field, message)
class APIResourceNotFoundError(APIError):
def __init__(self, field, message=''):
super(APIResourceNotFoundError, self).__init__('value:notfound', field, message)
class APIPermissionError(APIError):
def __init__(self, message=''):
super(APIPermissionError, self).__init__('permission:forbidden', 'permission', message)
class Page(object):
'''
Page object for display pages
'''
def __init__(self, item_count, page_index=1, page_size=10):
'''
init pagination by item_count, page_index and page_size
>>> p1 = Page(100, 1)
>>> p1.page_count
10
>>> p1.offset
0
>>> p1.limit
10
>>> p2 = Page(90, 9, 10)
>>> p2.page_count
9
>>> p2.offset
80
>>> p2.limit
10
>>> p3 = Page(91, 10, 10)
>>> p3.page_count
10
>>> p3.offset
90
>>> p3.limit
10
'''
self.item_count = item_count
self.page_size = page_size
self.page_count = (item_count / page_size) + (1 if item_count % page_size > 0 else 0)
if (item_count == 0) or (page_index < 1) or (page_index > self.page_count):
self.offset = 0
self.limit = 0
self.page_index = 1
else:
self.page_index = page_index
self.offset = self.page_size * (self.page_index - 1)
self.limit = self.page_size
self.has_next = self.page_index < self.page_count
self.has_previous = self.page_index > 1
def __str__(self):
return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % (self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)
__repr__ = __str__
def _page_dump(obj):
if isinstance(obj, Page):
return {
'page_index': obj.page_index,
'page_count': obj.page_count,
'item_count': obj.item_count,
'has_next': obj.has_next,
'has_previous': obj.has_previous
}
raise TypeError('%s is not JSON serializable' % obj)
def restful_api_dumps(obj):
return json.dumps(obj, default=_page_dump)
def api(func):
'''
decorator for RESTful API
@api
@get('/api/test')
def test():
return dict(result='123', item=[])
'''
@functools.wraps(func)
def _wrapper(*args, **kw):
try:
r = restful_api_dumps(func(*args, **kw))
except APIError, e:
r = json.dumps(dict(error=e.error, data=e.data, message=e.message))
except Exception, e:
logging.exception(e)
r = json.dumps(dict(error='internalerror', data=e.__class__.__name__, message=e.message))
ctx.response.content_type = 'application/json'
return r
return _wrapper
# define interceptor
_RE_INTERCEPTOR_STARTS_WITH = re.compile(r'^([^\*\?]+)\*?$')
_RE_INTERCEPTOR_ENDS_WITH = re.compile(r'^\*([^\*\?]+)$')
def _build_pattern_fn(pattern):
m = _RE_INTERCEPTOR_STARTS_WITH.match(pattern)
if m:
return lambda p: p.startswith(m.group(1))
m = _RE_INTERCEPTOR_ENDS_WITH.match(pattern)
if m:
return lambda p: p.endswith(m.group(1))
raise ValueError('Invalid pattern definition in interceptor.')
def interceptor(pattern='/'):
'''
decorator for interceptor
'''
def _decorator(func):
func.__interceptor__ = _build_pattern_fn(pattern)
return func
return _decorator
def _build_interceptor_fn(func, next):
def _wrapper():
if func.__interceptor__(ctx.request.path_info):
return func(next)
else:
return next()
return _wrapper
def _build_interceptor_chain(last_fn, *interceptors):
'''
build interceptors chain
>>> def target():
... print 'target'
... return 123
>>> @interceptor('/')
... def f1(next):
... print 'before f1()'
... return next()
>>> @interceptor('/test')
... def f2(next):
... print 'before f2()'
... try:
... return next()
... finally:
... print 'after f2()'
>>> @interceptor('/test/abc')
... def f3(next):
... print 'before f3()'
... try:
... return next()
... finally:
... print 'after f3()'
>>> chain = _build_interceptor_chain(target, f1, f2, f3)
>>> ctx.request = SimpleDict(path_info='/test/abc')
>>> chain()
before f1()
before f2()
before f3()
target
after f3()
after f2()
123
>>> ctx.request = SimpleDict(path_info='/api/test/abc')
>>> chain()
before f1()
target
123
'''
L = list(interceptors)
L.reverse()
fn = last_fn
for f in L:
fn = _build_interceptor_fn(f, fn)
return fn
# global threading.local object, store request and response
ctx = threading.local()
# define wsgi application
def _load_module(module_name):
'''
load module from name as str
>>> m = _load_module('xml')
>>> m.__name__
'xml'
>>> m = _load_module('xml.sax')
>>> m.__name__
'xml.sax'
>>> m = _load_module('xml.sax.handler')
>>> m.__name__
'xml.sax.handler'
'''
last_dot = module_name.rfind('.')
if last_dot == (-1):
return __import__(module_name, globals(), locals())
from_module = module_name[:last_dot]
import_module = module_name[last_dot + 1:]
m = __import__(from_module, globals(), locals(), [import_module])
return getattr(m, import_module)
class WSGIApplication(object):
def __init__(self, document_root=None, **kw):
self._running = False
self._document_root = document_root
self._template_engine = None
self._interceptors = []
self._get_static = {}
self._get_dynamic = []
self._post_static = {}
self._post_dynamic = []
def _check_not_running(self):
if self._running:
raise RuntimeError('Cannot modify WSGIApplication when running.')
@property
def template_engine(self):
return self._template_engine
@template_engine.setter
def template_engine(self, engine):
self._check_not_running()
self._template_engine = engine
def add_interceptor(self, func):
self._check_not_running()
self._interceptors.append(func)
logging.info('[WEB] [add interceptor: %s]' % str(func))
def add_url(self, func):
self._check_not_running()
route = Route(func)
if route.is_static:
if route.method == 'GET':
self._get_static[route.path] = route
if route.method == 'POST':
self._post_static[route.path] = route
else:
if route.method == 'GET':
self._get_dynamic.append(route)
if route.method == 'POST':
self._post_dynamic.append(route)
logging.info('[WEB] [add route: %s]' % str(route))
def add_module(self, mod):
self._check_not_running()
m = mod if type(mod) == types.ModuleType else _load_module(mod)
logging.info('[WEB] [add module: %s]' % m.__name__)
for name in dir(m):
fn = getattr(m, name)
if callable(fn) and hasattr(fn, '__web_route__') and hasattr(fn, '__web_method__'):
self.add_url(fn)
def get_wsgi_application(self, debug=False):
self._check_not_running()
if debug:
self._get_dynamic.append(StaticFileRoute())
self._running = True
_application = SimpleDict(document_root=self._document_root)
def fn_route():
request_method = ctx.request.request_method
path_info = ctx.request.path_info
if request_method == 'GET':
fn = self._get_static.get(path_info, None)
if fn:
return fn()
for fn in self._get_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise notfounderror()
if request_method == 'POST':
fn = self._post_static.get(path_info, None)
if fn:
return fn()
for fn in self._post_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise notfounderror()
raise badrequesterror()
fn_exec = _build_interceptor_chain(fn_route, *self._interceptors)
def wsgi(env, start_response):
ctx.application = _application
ctx.request = Request(env)
_response = ctx.response = Response()
try:
r = fn_exec()
if isinstance(r, Template):
r = self._template_engine(r.template_name, r.model)
if isinstance(r, unicode):
r = r.encode('utf-8')
if r is None:
r = []
start_response(_response.status, _response.headers)
return r
except RedirectError, e:
_response.set_header('Location', e.location)
start_response(e.status, _response.headers)
return []
except HttpError, e:
start_response(e.status, _response.headers)
return ['<html><body><h1>', e.status, '</h1></body></html>']
except Exception, e:
logging.exception(e)
if not debug:
start_response('500 Internal Server Error', [])
return ['<html><body><h1>500 Internal Server Error</h1></body></html>']
exc_type, exc_value, exc_traceback = sys.exc_info()
fp = StringIO()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=fp)
stacks = fp.getvalue()
fp.close()
start_response('500 Internal Server Error', [])
return [r'''<html><body><h1>500 Internal Server Error</h1>
<div style="font-family:Monaco, Menlo, Consolas, 'Courier New', monospace;"><pre>''',
stacks.replace('<', '<').replace('>', '>'),
'</pre></div></body></html>']
finally:
del ctx.application
del ctx.request
del ctx.response
return wsgi
def run(self, port=9000, host='127.0.0.1'):
from wsgiref.simple_server import make_server
logging.info('[WEB] [application (%s) will start at %s:%s...]' % (self._document_root, host, port))
server = make_server(host, port, self.get_wsgi_application(debug=True))
server.serve_forever()
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
"""Starts the vtcombo process."""
import json
import logging
import os
import socket
import subprocess
import time
import urllib
from google.protobuf import text_format
from vttest import environment
class VtProcess(object):
"""Base class for a vt process, vtcombo only now."""
START_RETRIES = 5
def __init__(self, name, directory, binary, port_name):
self.name = name
self.directory = directory
self.binary = binary
self.extraparams = []
self.port_name = port_name
self.process = None
def wait_start(self):
"""Start the process and wait for it to respond on HTTP."""
for _ in xrange(0, self.START_RETRIES):
self.port = environment.get_port(self.port_name)
if environment.get_protocol() == 'grpc':
self.grpc_port = environment.get_port(self.port_name, protocol='grpc')
else:
self.grpc_port = None
logs_subdirectory = environment.get_logs_directory(self.directory)
cmd = [
self.binary,
'-port', '%u' % self.port,
'-log_dir', logs_subdirectory,
]
if environment.get_protocol() == 'grpc':
cmd.extend(['-grpc_port', '%u' % self.grpc_port])
cmd.extend(self.extraparams)
logging.info('Starting process: %s', cmd)
stdout = os.path.join(logs_subdirectory, '%s.%d.log' %
(self.name, self.port))
self.stdout = open(stdout, 'w')
self.process = subprocess.Popen(cmd,
stdout=self.stdout,
stderr=subprocess.STDOUT)
timeout = time.time() + 60.0
while time.time() < timeout:
if environment.process_is_healthy(
self.name, self.addr()) and self.get_vars():
logging.info('%s started.', self.name)
return
elif self.process.poll() is not None:
logging.error('%s process exited prematurely.', self.name)
break
time.sleep(0.3)
logging.error('cannot start %s process on time: %s ',
self.name, socket.getfqdn())
self.kill()
raise Exception('Failed %d times to run %s' % (
self.START_RETRIES,
self.name))
def addr(self):
"""Return the host:port of the process."""
return '%s:%u' % (socket.getfqdn(), self.port)
def grpc_addr(self):
"""Get the grpc address of the process.
Returns:
the grpc host:port of the process.
Only call this is environment.get_protocol() == 'grpc'.
"""
return '%s:%u' % (socket.getfqdn(), self.grpc_port)
def get_vars(self):
"""Return the debug vars."""
data = None
try:
url = 'http://%s/debug/vars' % self.addr()
f = urllib.urlopen(url)
data = f.read()
f.close()
except IOError:
return None
try:
return json.loads(data)
except ValueError:
logging.error('%s', data)
raise
def kill(self):
"""Kill the process."""
# These will proceed without error even if the process is already gone.
self.process.terminate()
def wait(self):
"""Wait for the process to end."""
self.process.wait()
class VtcomboProcess(VtProcess):
"""Represents a vtcombo subprocess."""
QUERYSERVER_PARAMETERS = [
'-queryserver-config-pool-size', '4',
'-queryserver-config-query-timeout', '300',
'-queryserver-config-schema-reload-time', '60',
'-queryserver-config-stream-pool-size', '4',
'-queryserver-config-transaction-cap', '4',
'-queryserver-config-transaction-timeout', '300',
'-queryserver-config-txpool-timeout', '300',
]
def __init__(self, directory, topology, mysql_db, schema_dir, charset,
web_dir=None, web_dir2=None):
VtProcess.__init__(self, 'vtcombo-%s' % os.environ['USER'], directory,
environment.vtcombo_binary, port_name='vtcombo')
self.extraparams = [
'-db-config-app-charset', charset,
'-db-config-app-uname', mysql_db.username(),
'-db-config-app-pass', mysql_db.password(),
'-db-config-dba-charset', charset,
'-db-config-dba-uname', mysql_db.username(),
'-db-config-dba-pass', mysql_db.password(),
'-proto_topo', text_format.MessageToString(topology, as_one_line=True),
'-mycnf_server_id', '1',
'-mycnf_socket_file', mysql_db.unix_socket(),
] + self.QUERYSERVER_PARAMETERS + environment.extra_vtcombo_parameters()
if schema_dir:
self.extraparams.extend(['-schema_dir', schema_dir])
if web_dir:
self.extraparams.extend(['-web_dir', web_dir])
if web_dir2:
self.extraparams.extend(['-web_dir2', web_dir2])
if mysql_db.unix_socket():
self.extraparams.extend(
['-db-config-app-unixsocket', mysql_db.unix_socket(),
'-db-config-dba-unixsocket', mysql_db.unix_socket()])
else:
self.extraparams.extend(
['-db-config-app-host', mysql_db.hostname(),
'-db-config-app-port', str(mysql_db.port()),
'-db-config-dba-host', mysql_db.hostname(),
'-db-config-dba-port', str(mysql_db.port())])
vtcombo_process = None
def start_vt_processes(directory, topology, mysql_db, schema_dir,
charset='utf8', web_dir=None, web_dir2=None):
"""Start the vt processes.
Args:
directory: the toplevel directory for the processes (logs, ...)
topology: a vttest.VTTestTopology object.
mysql_db: an instance of the mysql_db.MySqlDB class.
schema_dir: the directory that contains the schema / vschema.
charset: the character set for the database connections.
web_dir: contains the web app for vtctld side of vtcombo.
web_dir2: contains the web app for vtctld side of vtcombo.
"""
global vtcombo_process
logging.info('start_vt_processes(directory=%s,vtcombo_binary=%s)',
directory, environment.vtcombo_binary)
vtcombo_process = VtcomboProcess(directory, topology, mysql_db, schema_dir,
charset, web_dir=web_dir, web_dir2=web_dir2)
vtcombo_process.wait_start()
def kill_vt_processes():
"""Call kill() on all processes."""
logging.info('kill_vt_processes()')
if vtcombo_process:
vtcombo_process.kill()
def wait_vt_processes():
"""Call wait() on all processes."""
logging.info('wait_vt_processes()')
if vtcombo_process:
vtcombo_process.wait()
def kill_and_wait_vt_processes():
"""Call kill() and then wait() on all processes."""
kill_vt_processes()
wait_vt_processes()
# wait_step is a helper for looping until a condition is true.
# use as follow:
# timeout = 10
# while True:
# if done:
# break
# timeout = utils.wait_step('condition', timeout)
def wait_step(msg, timeout, sleep_time=1.0):
timeout -= sleep_time
if timeout <= 0:
raise Exception("timeout waiting for condition '%s'" % msg)
logging.debug("Sleeping for %f seconds waiting for condition '%s'",
sleep_time, msg)
time.sleep(sleep_time)
return timeout
|
|
# swift_build_support/products/product.py -----------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
import abc
import os
from build_swift.build_swift.wrappers import xcrun
from .. import cmake
from .. import shell
from .. import targets
def is_release_variant(build_variant):
return build_variant in ['Release', 'RelWithDebInfo']
class Product(object):
@classmethod
def product_name(cls):
"""product_name() -> str
The identifier-style name to use for this product.
"""
return cls.__name__.lower()
@classmethod
def product_source_name(cls):
"""product_source_name() -> str
The name of the source code directory of this product.
It provides a customization point for Product subclasses. It is set to
the value of product_name() by default for this reason.
"""
llvm_projects = ['clang',
'clang-tools-extra',
'compiler-rt',
'libcxx',
'lldb',
'llvm']
if cls.product_name() in llvm_projects:
return "llvm-project/{}".format(cls.product_name())
return cls.product_name()
@classmethod
def is_build_script_impl_product(cls):
"""is_build_script_impl_product -> bool
Whether this product is produced by build-script-impl.
"""
raise NotImplementedError
@classmethod
def is_before_build_script_impl_product(cls):
"""is_before_build_script_impl_product -> bool
Whether this product is build before any build-script-impl products.
Such products must be non-build_script_impl products.
Because such products are built ahead of the compiler, they are
built using the host toolchain.
"""
raise NotImplementedError
@classmethod
def is_ignore_install_all_product(cls):
"""is_ignore_install_all_product -> bool
Whether this product is to ignore the install-all directive
and insted always respect its own should_install.
This is useful when we run -install-all but have products
which should never be installed into the toolchain
(e.g. earlyswiftdriver)
"""
return False
@classmethod
def is_swiftpm_unified_build_product(cls):
"""is_swiftpm_unified_build_product -> bool
Whether this product should be built in the unified build of SwiftPM
products.
"""
return False
@classmethod
def is_nondarwin_only_build_product(cls):
"""Returns true if this target should be skipped in darwin builds when
inferring dependencies.
"""
return False
@classmethod
def get_dependencies(cls):
"""Return a list of products that this product depends upon"""
raise NotImplementedError
def should_clean(self, host_target):
"""should_clean() -> Bool
Whether or not this product should be cleaned before being built
"""
return False
def clean(self, host_target):
"""clean() -> void
Perform the clean, for a non-build-script-impl product.
"""
raise NotImplementedError
def should_build(self, host_target):
"""should_build() -> Bool
Whether or not this product should be built with the given arguments.
"""
raise NotImplementedError
def build(self, host_target):
"""build() -> void
Perform the build, for a non-build-script-impl product.
"""
raise NotImplementedError
def should_test(self, host_target):
"""should_test() -> Bool
Whether or not this product should be tested with the given arguments.
"""
raise NotImplementedError
def test(self, host_target):
"""test() -> void
Run the tests, for a non-build-script-impl product.
"""
raise NotImplementedError
def should_install(self, host_target):
"""should_install() -> Bool
Whether or not this product should be installed with the given
arguments.
"""
raise NotImplementedError
def install(self, host_target):
"""install() -> void
Install to the toolchain, for a non-build-script-impl product.
"""
raise NotImplementedError
def __init__(self, args, toolchain, source_dir, build_dir):
"""
Parameters
----------
args : `argparse.Namespace`
The arguments passed by the user to the invocation of the script.
toolchain : `swift_build_support.toolchain.Toolchain`
The toolchain being used to build the product. The toolchain will
point to the tools that the builder should use to build (like the
compiler or the linker).
build_dir: string
The directory in which the product should put all of its build
products.
"""
self.args = args
self.toolchain = toolchain
self.source_dir = source_dir
self.build_dir = build_dir
self.cmake_options = cmake.CMakeOptions()
self.common_c_flags = ['-Wno-unknown-warning-option',
'-Werror=unguarded-availability-new']
def is_release(self):
"""is_release() -> Bool
Whether or not this target is built as a release variant
"""
return is_release_variant(self.args.build_variant)
def install_toolchain_path(self, host_target):
"""toolchain_path() -> string
Returns the path to the toolchain that is being created as part of this
build, or to a native prebuilt toolchain that was passed in.
"""
if self.args.native_swift_tools_path is not None:
return os.path.split(self.args.native_swift_tools_path)[0]
install_destdir = self.args.install_destdir
if self.args.cross_compile_hosts:
build_root = os.path.dirname(self.build_dir)
install_destdir = '%s/intermediate-install/%s' % (build_root, host_target)
return targets.toolchain_path(install_destdir,
self.args.install_prefix)
def is_darwin_host(self, host_target):
return host_target.startswith("macosx") or \
host_target.startswith("iphone") or \
host_target.startswith("appletv") or \
host_target.startswith("watch")
def should_include_host_in_lipo(self, host_target):
return self.args.cross_compile_hosts and \
self.is_darwin_host(host_target)
def host_install_destdir(self, host_target):
if self.args.cross_compile_hosts:
# If cross compiling tools, install into a host-specific subdirectory.
if self.should_include_host_in_lipo(host_target):
# If this is one of the hosts we should lipo,
# install in to a temporary subdirectory.
return '%s/intermediate-install/%s' % \
(os.path.dirname(self.build_dir), host_target)
elif host_target == "merged-hosts":
# This assumes that all hosts are merged to the lipo.
return self.args.install_destdir
else:
return '%s/%s' % (self.args.install_destdir, host_target)
else:
return self.args.install_destdir
def is_cross_compile_target(self, host_target):
return self.args.cross_compile_hosts and \
host_target in self.args.cross_compile_hosts
def generate_darwin_toolchain_file(self, platform, arch):
shell.makedirs(self.build_dir)
toolchain_file = os.path.join(self.build_dir, 'BuildScriptToolchain.cmake')
cmake_osx_sysroot = xcrun.sdk_path(platform)
target = None
if platform == 'macosx':
target = '{}-apple-macosx{}'.format(
arch, self.args.darwin_deployment_version_osx)
elif platform == 'iphonesimulator':
target = '{}-apple-ios{}'.format(
arch, self.args.darwin_deployment_version_ios)
elif platform == 'iphoneos':
target = '{}-apple-ios{}'.format(
arch, self.args.darwin_deployment_version_ios)
elif platform == 'appletvsimulator':
target = '{}-apple-tvos{}'.format(
arch, self.args.darwin_deployment_version_tvos)
elif platform == 'appletvos':
target = '{}-apple-tvos{}'.format(
arch, self.args.darwin_deployment_version_tvos)
elif platform == 'watchsimulator':
target = '{}-apple-watchos{}'.format(
arch, self.args.darwin_deployment_version_watchos)
elif platform == 'watchos':
target = '{}-apple-watchos{}'.format(
arch, self.args.darwin_deployment_version_watchos)
else:
raise RuntimeError("Unhandled platform?!")
toolchain_args = {}
toolchain_args['CMAKE_SYSTEM_NAME'] = 'Darwin'
toolchain_args['CMAKE_OSX_SYSROOT'] = cmake_osx_sysroot
toolchain_args['CMAKE_OSX_ARCHITECTURES'] = arch
if self.toolchain.cc.endswith('clang'):
toolchain_args['CMAKE_C_COMPILER_TARGET'] = target
if self.toolchain.cxx.endswith('clang++'):
toolchain_args['CMAKE_CXX_COMPILER_TARGET'] = target
# Swift always supports cross compiling.
toolchain_args['CMAKE_Swift_COMPILER_TARGET'] = target
# Sort by the key so that we always produce the same toolchain file
data = sorted(toolchain_args.items(), key=lambda x: x[0])
if not self.args.dry_run:
with open(toolchain_file, 'w') as f:
f.writelines("set({} {})\n".format(k, v) for k, v in data)
else:
print("DRY_RUN! Writing Toolchain file to path: {}".format(toolchain_file))
return toolchain_file
def get_linux_abi(self, arch):
# Map tuples of (platform, arch) to ABI
#
# E.x.: Hard ABI or Soft ABI for Linux map to gnueabihf
arch_platform_to_abi = {
# For now always map to hard float ABI.
'armv7': ('arm', 'gnueabihf')
}
# Default is just arch, gnu
sysroot_arch, abi = arch_platform_to_abi.get(arch, (arch, 'gnu'))
return sysroot_arch, abi
def get_linux_sysroot(self, platform, arch):
if not self.is_cross_compile_target('{}-{}'.format(platform, arch)):
return None
sysroot_arch, abi = self.get_linux_abi(arch)
# $ARCH-$PLATFORM-$ABI
# E.x.: aarch64-linux-gnu
sysroot_dirname = '{}-{}-{}'.format(sysroot_arch, platform, abi)
return os.path.join(os.sep, 'usr', sysroot_dirname)
def get_linux_target(self, platform, arch):
sysroot_arch, abi = self.get_linux_abi(arch)
return '{}-unknown-linux-{}'.format(sysroot_arch, abi)
def generate_linux_toolchain_file(self, platform, arch):
shell.makedirs(self.build_dir)
toolchain_file = os.path.join(self.build_dir, 'BuildScriptToolchain.cmake')
toolchain_args = {}
toolchain_args['CMAKE_SYSTEM_NAME'] = 'Linux'
toolchain_args['CMAKE_SYSTEM_PROCESSOR'] = arch
# We only set the actual sysroot if we are actually cross
# compiling. This is important since otherwise cmake seems to change the
# RUNPATH to be a relative rather than an absolute path, breaking
# certain cmark tests (and maybe others).
maybe_sysroot = self.get_linux_sysroot(platform, arch)
if maybe_sysroot is not None:
toolchain_args['CMAKE_SYSROOT'] = maybe_sysroot
target = self.get_linux_target(platform, arch)
if self.toolchain.cc.endswith('clang'):
toolchain_args['CMAKE_C_COMPILER_TARGET'] = target
if self.toolchain.cxx.endswith('clang++'):
toolchain_args['CMAKE_CXX_COMPILER_TARGET'] = target
# Swift always supports cross compiling.
toolchain_args['CMAKE_Swift_COMPILER_TARGET'] = target
toolchain_args['CMAKE_FIND_ROOT_PATH_MODE_PROGRAM'] = 'NEVER'
toolchain_args['CMAKE_FIND_ROOT_PATH_MODE_LIBRARY'] = 'ONLY'
toolchain_args['CMAKE_FIND_ROOT_PATH_MODE_INCLUDE'] = 'ONLY'
toolchain_args['CMAKE_FIND_ROOT_PATH_MODE_PACKAGE'] = 'ONLY'
# Sort by the key so that we always produce the same toolchain file
data = sorted(toolchain_args.items(), key=lambda x: x[0])
if not self.args.dry_run:
with open(toolchain_file, 'w') as f:
f.writelines("set({} {})\n".format(k, v) for k, v in data)
else:
print("DRY_RUN! Writing Toolchain file to path: {}".format(toolchain_file))
return toolchain_file
def common_cross_c_flags(self, platform, arch):
cross_flags = []
if self.is_release():
cross_flags.append('-fno-stack-protector')
return self.common_c_flags + cross_flags
class ProductBuilder(object):
"""
Abstract base class for all ProductBuilders.
An specific ProductBuilder will implement the interface methods depending
how the product want to be build. Multiple products can use the same
product builder if parametrized right (for example all the products build
using CMake).
Ideally a ProductBuilder will be initialized with references to the
invocation arguments, the calculated toolchain, the calculated workspace,
and the target host, but the base class doesn't impose those requirements
in order to be flexible.
NOTE: Python doesn't need an explicit abstract base class, but it helps
documenting the interface.
"""
@abc.abstractmethod
def __init__(self, product_class, args, toolchain, workspace):
"""
Create a product builder for the given product class.
Parameters
----------
product_class : class
A subtype of `Product` which describes the product being built by
this builder.
args : `argparse.Namespace`
The arguments passed by the user to the invocation of the script. A
builder should consider this argument read-only.
toolchain : `swift_build_support.toolchain.Toolchain`
The toolchain being used to build the product. The toolchain will
point to the tools that the builder should use to build (like the
compiler or the linker).
workspace : `swift_build_support.workspace.Workspace`
The workspace where the source code and the build directories have
to be located. A builder should use the workspace to access its own
source/build directory, as well as other products source/build
directories.
"""
pass
@abc.abstractmethod
def build(self):
"""
Perform the build phase for the product.
This phase might also imply a configuration phase, but each product
builder is free to determine how to do it.
"""
pass
@abc.abstractmethod
def test(self):
"""
Perform the test phase for the product.
This phase might build and execute the product tests.
"""
pass
@abc.abstractmethod
def install(self):
"""
Perform the install phase for the product.
This phase might copy the artifacts from the previous phases into a
destination directory.
"""
pass
|
|
# -*- coding: utf-8 -*-
"""
wakatime.main
~~~~~~~~~~~~~
wakatime module entry point.
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import base64
import logging
import os
import platform
import re
import sys
import time
import traceback
import socket
try:
import ConfigParser as configparser
except ImportError: # pragma: nocover
import configparser
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages'))
from .__about__ import __version__
from .compat import u, open, is_py3
from .logger import setup_logging
from .offlinequeue import Queue
from .packages import argparse
from .packages.requests.exceptions import RequestException
from .project import get_project_info
from .session_cache import SessionCache
from .stats import get_file_stats
try:
from .packages import simplejson as json # pragma: nocover
except (ImportError, SyntaxError): # pragma: nocover
import json
try:
from .packages import tzlocal
except: # pragma: nocover
from .packages import tzlocal3 as tzlocal
log = logging.getLogger('WakaTime')
class FileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
try:
if os.path.isfile(values):
values = os.path.realpath(values)
except: # pragma: nocover
pass
setattr(namespace, self.dest, values)
def parseConfigFile(configFile=None):
"""Returns a configparser.SafeConfigParser instance with configs
read from the config file. Default location of the config file is
at ~/.wakatime.cfg.
"""
if not configFile:
configFile = os.path.join(os.path.expanduser('~'), '.wakatime.cfg')
configs = configparser.SafeConfigParser()
try:
with open(configFile, 'r', encoding='utf-8') as fh:
try:
configs.readfp(fh)
except configparser.Error:
print(traceback.format_exc())
return None
except IOError:
print(u('Error: Could not read from config file {0}').format(u(configFile)))
return configs
def parseArguments():
"""Parse command line arguments and configs from ~/.wakatime.cfg.
Command line arguments take precedence over config file settings.
Returns instances of ArgumentParser and SafeConfigParser.
"""
# define supported command line arguments
parser = argparse.ArgumentParser(
description='Common interface for the WakaTime api.')
parser.add_argument('--entity', dest='entity', metavar='FILE',
action=FileAction,
help='absolute path to file for the heartbeat; can also be a '+
'url, domain, or app when --entitytype is not file')
parser.add_argument('--file', dest='file', action=FileAction,
help=argparse.SUPPRESS)
parser.add_argument('--key', dest='key',
help='your wakatime api key; uses api_key from '+
'~/.wakatime.conf by default')
parser.add_argument('--write', dest='isWrite',
action='store_true',
help='when set, tells api this heartbeat was triggered from '+
'writing to a file')
parser.add_argument('--plugin', dest='plugin',
help='optional text editor plugin name and version '+
'for User-Agent header')
parser.add_argument('--time', dest='timestamp', metavar='time',
type=float,
help='optional floating-point unix epoch timestamp; '+
'uses current time by default')
parser.add_argument('--lineno', dest='lineno',
help='optional line number; current line being edited')
parser.add_argument('--cursorpos', dest='cursorpos',
help='optional cursor position in the current file')
parser.add_argument('--entitytype', dest='entity_type',
help='entity type for this heartbeat. can be one of "file", '+
'"url", "domain", or "app"; defaults to file.')
parser.add_argument('--proxy', dest='proxy',
help='optional https proxy url; for example: '+
'https://user:pass@localhost:8080')
parser.add_argument('--project', dest='project',
help='optional project name')
parser.add_argument('--alternate-project', dest='alternate_project',
help='optional alternate project name; auto-discovered project takes priority')
parser.add_argument('--hostname', dest='hostname', help='hostname of current machine.')
parser.add_argument('--disableoffline', dest='offline',
action='store_false',
help='disables offline time logging instead of queuing logged time')
parser.add_argument('--hidefilenames', dest='hidefilenames',
action='store_true',
help='obfuscate file names; will not send file names to api')
parser.add_argument('--exclude', dest='exclude', action='append',
help='filename patterns to exclude from logging; POSIX regex '+
'syntax; can be used more than once')
parser.add_argument('--include', dest='include', action='append',
help='filename patterns to log; when used in combination with '+
'--exclude, files matching include will still be logged; '+
'POSIX regex syntax; can be used more than once')
parser.add_argument('--ignore', dest='ignore', action='append',
help=argparse.SUPPRESS)
parser.add_argument('--logfile', dest='logfile',
help='defaults to ~/.wakatime.log')
parser.add_argument('--apiurl', dest='api_url',
help='heartbeats api url; for debugging with a local server')
parser.add_argument('--config', dest='config',
help='defaults to ~/.wakatime.conf')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='turns on debug messages in log file')
parser.add_argument('--version', action='version', version=__version__)
# parse command line arguments
args = parser.parse_args()
# use current unix epoch timestamp by default
if not args.timestamp:
args.timestamp = time.time()
# parse ~/.wakatime.cfg file
configs = parseConfigFile(args.config)
if configs is None:
return args, configs
# update args from configs
if not args.key:
default_key = None
if configs.has_option('settings', 'api_key'):
default_key = configs.get('settings', 'api_key')
elif configs.has_option('settings', 'apikey'):
default_key = configs.get('settings', 'apikey')
if default_key:
args.key = default_key
else:
parser.error('Missing api key')
if not args.entity_type:
args.entity_type = 'file'
if not args.entity:
if args.file:
args.entity = args.file
else:
parser.error('argument --entity is required')
if not args.exclude:
args.exclude = []
if configs.has_option('settings', 'ignore'):
try:
for pattern in configs.get('settings', 'ignore').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError:
pass
if configs.has_option('settings', 'exclude'):
try:
for pattern in configs.get('settings', 'exclude').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError:
pass
if not args.include:
args.include = []
if configs.has_option('settings', 'include'):
try:
for pattern in configs.get('settings', 'include').split("\n"):
if pattern.strip() != '':
args.include.append(pattern)
except TypeError:
pass
if args.offline and configs.has_option('settings', 'offline'):
args.offline = configs.getboolean('settings', 'offline')
if not args.hidefilenames and configs.has_option('settings', 'hidefilenames'):
args.hidefilenames = configs.getboolean('settings', 'hidefilenames')
if not args.proxy and configs.has_option('settings', 'proxy'):
args.proxy = configs.get('settings', 'proxy')
if not args.verbose and configs.has_option('settings', 'verbose'):
args.verbose = configs.getboolean('settings', 'verbose')
if not args.verbose and configs.has_option('settings', 'debug'):
args.verbose = configs.getboolean('settings', 'debug')
if not args.logfile and configs.has_option('settings', 'logfile'):
args.logfile = configs.get('settings', 'logfile')
if not args.api_url and configs.has_option('settings', 'api_url'):
args.api_url = configs.get('settings', 'api_url')
return args, configs
def should_exclude(entity, include, exclude):
if entity is not None and entity.strip() != '':
try:
for pattern in include:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(entity):
return False
except re.error as ex:
log.warning(u('Regex error ({msg}) for include pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
except TypeError: # pragma: nocover
pass
try:
for pattern in exclude:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(entity):
return pattern
except re.error as ex:
log.warning(u('Regex error ({msg}) for exclude pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
except TypeError: # pragma: nocover
pass
return False
def get_user_agent(plugin):
ver = sys.version_info
python_version = '%d.%d.%d.%s.%d' % (ver[0], ver[1], ver[2], ver[3], ver[4])
user_agent = u('wakatime/{ver} ({platform}) Python{py_ver}').format(
ver=u(__version__),
platform=u(platform.platform()),
py_ver=python_version,
)
if plugin:
user_agent = u('{user_agent} {plugin}').format(
user_agent=user_agent,
plugin=u(plugin),
)
else:
user_agent = u('{user_agent} Unknown/0').format(
user_agent=user_agent,
)
return user_agent
def send_heartbeat(project=None, branch=None, hostname=None, stats={}, key=None, entity=None,
timestamp=None, isWrite=None, plugin=None, offline=None, entity_type='file',
hidefilenames=None, proxy=None, api_url=None, **kwargs):
"""Sends heartbeat as POST request to WakaTime api server.
"""
if not api_url:
api_url = 'https://wakatime.com/api/v1/heartbeats'
log.debug('Sending heartbeat to api at %s' % api_url)
data = {
'time': timestamp,
'entity': entity,
'type': entity_type,
}
if hidefilenames and entity is not None and entity_type == 'file':
extension = u(os.path.splitext(data['entity'])[1])
data['entity'] = u('HIDDEN{0}').format(extension)
if stats.get('lines'):
data['lines'] = stats['lines']
if stats.get('language'):
data['language'] = stats['language']
if stats.get('dependencies'):
data['dependencies'] = stats['dependencies']
if stats.get('lineno'):
data['lineno'] = stats['lineno']
if stats.get('cursorpos'):
data['cursorpos'] = stats['cursorpos']
if isWrite:
data['is_write'] = isWrite
if project:
data['project'] = project
if branch:
data['branch'] = branch
log.debug(data)
# setup api request
request_body = json.dumps(data)
api_key = u(base64.b64encode(str.encode(key) if is_py3 else key))
auth = u('Basic {api_key}').format(api_key=api_key)
headers = {
'User-Agent': get_user_agent(plugin),
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': auth,
}
if hostname:
headers['X-Machine-Name'] = hostname
proxies = {}
if proxy:
proxies['https'] = proxy
# add Olson timezone to request
try:
tz = tzlocal.get_localzone()
except:
tz = None
if tz:
headers['TimeZone'] = u(tz.zone)
session_cache = SessionCache()
session = session_cache.get()
# log time to api
response = None
try:
response = session.post(api_url, data=request_body, headers=headers,
proxies=proxies)
except RequestException:
exception_data = {
sys.exc_info()[0].__name__: u(sys.exc_info()[1]),
}
if log.isEnabledFor(logging.DEBUG):
exception_data['traceback'] = traceback.format_exc()
if offline:
queue = Queue()
queue.push(data, json.dumps(stats), plugin)
if log.isEnabledFor(logging.DEBUG):
log.warn(exception_data)
else:
log.error(exception_data)
else:
response_code = response.status_code if response is not None else None
response_content = response.text if response is not None else None
if response_code == 201:
log.debug({
'response_code': response_code,
})
session_cache.save(session)
return True
if offline:
if response_code != 400:
queue = Queue()
queue.push(data, json.dumps(stats), plugin)
if response_code == 401:
log.error({
'response_code': response_code,
'response_content': response_content,
})
elif log.isEnabledFor(logging.DEBUG):
log.warn({
'response_code': response_code,
'response_content': response_content,
})
else:
log.error({
'response_code': response_code,
'response_content': response_content,
})
else:
log.error({
'response_code': response_code,
'response_content': response_content,
})
session_cache.delete()
return False
def execute(argv=None):
if argv:
sys.argv = ['wakatime'] + argv
args, configs = parseArguments()
if configs is None:
return 103 # config file parsing error
setup_logging(args, __version__)
exclude = should_exclude(args.entity, args.include, args.exclude)
if exclude is not False:
log.debug(u('Skipping because matches exclude pattern: {pattern}').format(
pattern=u(exclude),
))
return 0
if args.entity_type != 'file' or os.path.isfile(args.entity):
stats = get_file_stats(args.entity, entity_type=args.entity_type,
lineno=args.lineno, cursorpos=args.cursorpos)
project = args.project or args.alternate_project
branch = None
if args.entity_type == 'file':
project, branch = get_project_info(configs, args)
kwargs = vars(args)
kwargs['project'] = project
kwargs['branch'] = branch
kwargs['stats'] = stats
kwargs['hostname'] = args.hostname or socket.gethostname()
if send_heartbeat(**kwargs):
queue = Queue()
while True:
heartbeat = queue.pop()
if heartbeat is None:
break
sent = send_heartbeat(
project=heartbeat['project'],
entity=heartbeat['entity'],
timestamp=heartbeat['time'],
branch=heartbeat['branch'],
hostname=kwargs['hostname'],
stats=json.loads(heartbeat['stats']),
key=args.key,
isWrite=heartbeat['is_write'],
plugin=heartbeat['plugin'],
offline=args.offline,
hidefilenames=args.hidefilenames,
entity_type=heartbeat['type'],
proxy=args.proxy,
api_url=args.api_url,
)
if not sent:
break
return 0 # success
return 102 # api error
else:
log.debug('File does not exist; ignoring this heartbeat.')
return 0
|
|
# -*- coding: utf-8 -*-
"""The :program:`celery` umbrella command.
.. program:: celery
.. _preload-options:
Preload Options
---------------
These options are supported by all commands,
and usually parsed before command-specific arguments.
.. cmdoption:: -A, --app
app instance to use (e.g., ``module.attr_name``)
.. cmdoption:: -b, --broker
URL to broker. default is ``amqp://guest@localhost//``
.. cmdoption:: --loader
name of custom loader class to use.
.. cmdoption:: --config
Name of the configuration module
.. cmdoption:: -C, --no-color
Disable colors in output.
.. cmdoption:: -q, --quiet
Give less verbose output (behavior depends on the sub command).
.. cmdoption:: --help
Show help and exit.
.. _daemon-options:
Daemon Options
--------------
These options are supported by commands that can detach
into the background (daemon). They will be present
in any command that also has a `--detach` option.
.. cmdoption:: -f, --logfile
Path to log file. If no logfile is specified, `stderr` is used.
.. cmdoption:: --pidfile
Optional file used to store the process pid.
The program won't start if this file already exists
and the pid is still alive.
.. cmdoption:: --uid
User id, or user name of the user to run as after detaching.
.. cmdoption:: --gid
Group id, or group name of the main group to change to after
detaching.
.. cmdoption:: --umask
Effective umask (in octal) of the process after detaching. Inherits
the umask of the parent process by default.
.. cmdoption:: --workdir
Optional directory to change to after detaching.
.. cmdoption:: --executable
Executable to use for the detached process.
``celery inspect``
------------------
.. program:: celery inspect
.. cmdoption:: -t, --timeout
Timeout in seconds (float) waiting for reply
.. cmdoption:: -d, --destination
Comma separated list of destination node names.
.. cmdoption:: -j, --json
Use json as output format.
``celery control``
------------------
.. program:: celery control
.. cmdoption:: -t, --timeout
Timeout in seconds (float) waiting for reply
.. cmdoption:: -d, --destination
Comma separated list of destination node names.
.. cmdoption:: -j, --json
Use json as output format.
``celery migrate``
------------------
.. program:: celery migrate
.. cmdoption:: -n, --limit
Number of tasks to consume (int).
.. cmdoption:: -t, -timeout
Timeout in seconds (float) waiting for tasks.
.. cmdoption:: -a, --ack-messages
Ack messages from source broker.
.. cmdoption:: -T, --tasks
List of task names to filter on.
.. cmdoption:: -Q, --queues
List of queues to migrate.
.. cmdoption:: -F, --forever
Continually migrate tasks until killed.
``celery upgrade``
------------------
.. program:: celery upgrade
.. cmdoption:: --django
Upgrade a Django project.
.. cmdoption:: --compat
Maintain backwards compatibility.
.. cmdoption:: --no-backup
Don't backup original files.
``celery shell``
----------------
.. program:: celery shell
.. cmdoption:: -I, --ipython
Force :pypi:`iPython` implementation.
.. cmdoption:: -B, --bpython
Force :pypi:`bpython` implementation.
.. cmdoption:: -P, --python
Force default Python shell.
.. cmdoption:: -T, --without-tasks
Don't add tasks to locals.
.. cmdoption:: --eventlet
Use :pypi:`eventlet` monkey patches.
.. cmdoption:: --gevent
Use :pypi:`gevent` monkey patches.
``celery result``
-----------------
.. program:: celery result
.. cmdoption:: -t, --task
Name of task (if custom backend).
.. cmdoption:: --traceback
Show traceback if any.
``celery purge``
----------------
.. program:: celery purge
.. cmdoption:: -f, --force
Don't prompt for verification before deleting messages (DANGEROUS)
``celery call``
---------------
.. program:: celery call
.. cmdoption:: -a, --args
Positional arguments (json format).
.. cmdoption:: -k, --kwargs
Keyword arguments (json format).
.. cmdoption:: --eta
Scheduled time in ISO-8601 format.
.. cmdoption:: --countdown
ETA in seconds from now (float/int).
.. cmdoption:: --expires
Expiry time in float/int seconds, or a ISO-8601 date.
.. cmdoption:: --serializer
Specify serializer to use (default is json).
.. cmdoption:: --queue
Destination queue.
.. cmdoption:: --exchange
Destination exchange (defaults to the queue exchange).
.. cmdoption:: --routing-key
Destination routing key (defaults to the queue routing key).
"""
from __future__ import absolute_import, print_function, unicode_literals
import numbers
import sys
from functools import partial
# Import commands from other modules
from celery.bin.amqp import amqp
# Cannot use relative imports here due to a Windows issue (#1111).
from celery.bin.base import Command, Extensions
from celery.bin.beat import beat
from celery.bin.call import call
from celery.bin.control import _RemoteControl # noqa
from celery.bin.control import control, inspect, status
from celery.bin.events import events
from celery.bin.graph import graph
from celery.bin.list import list_
from celery.bin.logtool import logtool
from celery.bin.migrate import migrate
from celery.bin.purge import purge
from celery.bin.result import result
from celery.bin.shell import shell
from celery.bin.upgrade import upgrade
from celery.bin.worker import worker
from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE
from celery.utils import term, text
__all__ = ('CeleryCommand', 'main')
HELP = """
---- -- - - ---- Commands- -------------- --- ------------
{commands}
---- -- - - --------- -- - -------------- --- ------------
Type '{prog_name} <command> --help' for help using a specific command.
"""
command_classes = [
('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'),
('Remote Control', ['status', 'inspect', 'control'], 'blue'),
('Utils',
['purge', 'list', 'call', 'result', 'migrate', 'graph', 'upgrade'],
None),
('Debugging', ['report', 'logtool'], 'red'),
]
def determine_exit_status(ret):
if isinstance(ret, numbers.Integral):
return ret
return EX_OK if ret else EX_FAILURE
def main(argv=None):
"""Start celery umbrella command."""
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
try:
if __name__ != '__main__': # pragma: no cover
sys.modules['__main__'] = sys.modules[__name__]
cmd = CeleryCommand()
cmd.maybe_patch_concurrency()
from billiard import freeze_support
freeze_support()
cmd.execute_from_commandline(argv)
except KeyboardInterrupt:
pass
class multi(Command):
"""Start multiple worker instances."""
respects_app_option = False
def run_from_argv(self, prog_name, argv, command=None):
from celery.bin.multi import MultiTool
cmd = MultiTool(quiet=self.quiet, no_color=self.no_color)
return cmd.execute_from_commandline([command] + argv)
class help(Command):
"""Show help screen and exit."""
def usage(self, command):
return '%(prog)s <command> [options] {0.args}'.format(self)
def run(self, *args, **kwargs):
self.parser.print_help()
self.out(HELP.format(
prog_name=self.prog_name,
commands=CeleryCommand.list_commands(
colored=self.colored, app=self.app),
))
return EX_USAGE
class report(Command):
"""Shows information useful to include in bug-reports."""
def __init__(self, *args, **kwargs):
"""Custom initialization for report command.
We need this custom initialization to make sure that
everything is loaded when running a report.
There has been some issues when printing Django's
settings because Django is not properly setup when
running the report.
"""
super(report, self).__init__(*args, **kwargs)
self.app.loader.import_default_modules()
def run(self, *args, **kwargs):
self.out(self.app.bugreport())
return EX_OK
class CeleryCommand(Command):
"""Base class for commands."""
commands = {
'amqp': amqp,
'beat': beat,
'call': call,
'control': control,
'events': events,
'graph': graph,
'help': help,
'inspect': inspect,
'list': list_,
'logtool': logtool,
'migrate': migrate,
'multi': multi,
'purge': purge,
'report': report,
'result': result,
'shell': shell,
'status': status,
'upgrade': upgrade,
'worker': worker,
}
ext_fmt = '{self.namespace}.commands'
enable_config_from_cmdline = True
prog_name = 'celery'
namespace = 'celery'
@classmethod
def register_command(cls, fun, name=None):
cls.commands[name or fun.__name__] = fun
return fun
def execute(self, command, argv=None):
try:
cls = self.commands[command]
except KeyError:
cls, argv = self.commands['help'], ['help']
cls = self.commands.get(command) or self.commands['help']
try:
return cls(
app=self.app, on_error=self.on_error,
no_color=self.no_color, quiet=self.quiet,
on_usage_error=partial(self.on_usage_error, command=command),
).run_from_argv(self.prog_name, argv[1:], command=argv[0])
except self.UsageError as exc:
self.on_usage_error(exc)
return exc.status
except self.Error as exc:
self.on_error(exc)
return exc.status
def on_usage_error(self, exc, command=None):
if command:
helps = '{self.prog_name} {command} --help'
else:
helps = '{self.prog_name} --help'
self.error(self.colored.magenta('Error: {0}'.format(exc)))
self.error("""Please try '{0}'""".format(helps.format(
self=self, command=command,
)))
def _relocate_args_from_start(self, argv, index=0):
if argv:
rest = []
while index < len(argv):
value = argv[index]
if value.startswith('--'):
rest.append(value)
elif value.startswith('-'):
# we eat the next argument even though we don't know
# if this option takes an argument or not.
# instead we'll assume what's the command name in the
# return statements below.
try:
nxt = argv[index + 1]
if nxt.startswith('-'):
# is another option
rest.append(value)
else:
# is (maybe) a value for this option
rest.extend([value, nxt])
index += 1
except IndexError: # pragma: no cover
rest.append(value)
break
else:
break
index += 1
if argv[index:]: # pragma: no cover
# if there are more arguments left then divide and swap
# we assume the first argument in argv[i:] is the command
# name.
return argv[index:] + rest
# if there are no more arguments then the last arg in rest'
# must be the command.
[rest.pop()] + rest
return []
def prepare_prog_name(self, name):
if name == '__main__.py':
return sys.modules['__main__'].__file__
return name
def handle_argv(self, prog_name, argv, **kwargs):
self.prog_name = self.prepare_prog_name(prog_name)
argv = self._relocate_args_from_start(argv)
_, argv = self.prepare_args(None, argv)
try:
command = argv[0]
except IndexError:
command, argv = 'help', ['help']
return self.execute(command, argv)
def execute_from_commandline(self, argv=None):
argv = sys.argv if argv is None else argv
if 'multi' in argv[1:3]: # Issue 1008
self.respects_app_option = False
try:
sys.exit(determine_exit_status(
super(CeleryCommand, self).execute_from_commandline(argv)))
except KeyboardInterrupt:
sys.exit(EX_FAILURE)
@classmethod
def get_command_info(cls, command, indent=0,
color=None, colored=None, app=None):
colored = term.colored() if colored is None else colored
colored = colored.names[color] if color else lambda x: x
obj = cls.commands[command]
cmd = 'celery {0}'.format(colored(command))
if obj.leaf:
return '|' + text.indent(cmd, indent)
return text.join([
' ',
'|' + text.indent('{0} --help'.format(cmd), indent),
obj.list_commands(indent, 'celery {0}'.format(command), colored,
app=app),
])
@classmethod
def list_commands(cls, indent=0, colored=None, app=None):
colored = term.colored() if colored is None else colored
white = colored.white
ret = []
for command_cls, commands, color in command_classes:
ret.extend([
text.indent('+ {0}: '.format(white(command_cls)), indent),
'\n'.join(
cls.get_command_info(
command, indent + 4, color, colored, app=app)
for command in commands),
''
])
return '\n'.join(ret).strip()
def with_pool_option(self, argv):
if len(argv) > 1 and 'worker' in argv[0:3]:
# this command supports custom pools
# that may have to be loaded as early as possible.
return (['-P'], ['--pool'])
def on_concurrency_setup(self):
self.load_extension_commands()
def load_extension_commands(self):
names = Extensions(self.ext_fmt.format(self=self),
self.register_command).load()
if names:
command_classes.append(('Extensions', names, 'magenta'))
if __name__ == '__main__': # pragma: no cover
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.version import LooseVersion
from kafkatest.utils import kafkatest_version
class KafkaVersion(LooseVersion):
"""Container for kafka versions which makes versions simple to compare.
distutils.version.LooseVersion (and StrictVersion) has robust comparison and ordering logic.
Example:
v10 = KafkaVersion("0.10.0")
v9 = KafkaVersion("0.9.0.1")
assert v10 > v9 # assertion passes!
"""
def __init__(self, version_string):
self.is_dev = (version_string.lower() == "dev")
if self.is_dev:
version_string = kafkatest_version()
# Drop dev suffix if present
dev_suffix_index = version_string.find(".dev")
if dev_suffix_index >= 0:
version_string = version_string[:dev_suffix_index]
# Don't use the form super.(...).__init__(...) because
# LooseVersion is an "old style" python class
LooseVersion.__init__(self, version_string)
def __str__(self):
if self.is_dev:
return "dev"
else:
return LooseVersion.__str__(self)
def _cmp(self, other):
if isinstance(other, str):
other = KafkaVersion(other)
if other.is_dev:
if self.is_dev:
return 0
return -1
elif self.is_dev:
return 1
return LooseVersion._cmp(self, other)
def consumer_supports_bootstrap_server(self):
"""
Kafka supported a new consumer beginning with v0.9.0 where
we can specify --bootstrap-server instead of --zookeeper.
This version also allowed a --consumer-config file where we could specify
a security protocol other than PLAINTEXT.
:return: true if the version of Kafka supports a new consumer with --bootstrap-server
"""
return self >= V_0_9_0_0
def supports_named_listeners(self):
return self >= V_0_10_2_0
def acl_command_supports_bootstrap_server(self):
return self >= V_2_1_0
def topic_command_supports_bootstrap_server(self):
return self >= V_2_3_0
def topic_command_supports_if_not_exists_with_bootstrap_server(self):
return self >= V_2_6_0
def supports_tls_to_zookeeper(self):
# indicate if KIP-515 is available
return self >= V_2_5_0
def reassign_partitions_command_supports_bootstrap_server(self):
return self >= V_2_5_0
def kafka_configs_command_uses_bootstrap_server(self):
# everything except User SCRAM Credentials (KIP-554)
return self >= V_2_6_0
def kafka_configs_command_uses_bootstrap_server_scram(self):
# User SCRAM Credentials (KIP-554)
return self >= V_2_7_0
def supports_topic_ids_when_using_zk(self):
# Supports topic IDs as described by KIP-516.
# Self-managed clusters always support topic ID, so this method only applies to ZK clusters.
return self >= V_2_8_0
def get_version(node=None):
"""Return the version attached to the given node.
Default to DEV_BRANCH if node or node.version is undefined (aka None)
"""
if node is not None and hasattr(node, "version") and node.version is not None:
return node.version
else:
return DEV_BRANCH
DEV_BRANCH = KafkaVersion("dev")
DEV_VERSION = KafkaVersion("3.0.0-SNAPSHOT")
# 0.8.2.x versions
V_0_8_2_1 = KafkaVersion("0.8.2.1")
V_0_8_2_2 = KafkaVersion("0.8.2.2")
LATEST_0_8_2 = V_0_8_2_2
# 0.9.0.x versions
V_0_9_0_0 = KafkaVersion("0.9.0.0")
V_0_9_0_1 = KafkaVersion("0.9.0.1")
LATEST_0_9 = V_0_9_0_1
# 0.10.0.x versions
V_0_10_0_0 = KafkaVersion("0.10.0.0")
V_0_10_0_1 = KafkaVersion("0.10.0.1")
LATEST_0_10_0 = V_0_10_0_1
# 0.10.1.x versions
V_0_10_1_0 = KafkaVersion("0.10.1.0")
V_0_10_1_1 = KafkaVersion("0.10.1.1")
LATEST_0_10_1 = V_0_10_1_1
# 0.10.2.x versions
V_0_10_2_0 = KafkaVersion("0.10.2.0")
V_0_10_2_1 = KafkaVersion("0.10.2.1")
V_0_10_2_2 = KafkaVersion("0.10.2.2")
LATEST_0_10_2 = V_0_10_2_2
LATEST_0_10 = LATEST_0_10_2
# 0.11.0.x versions
V_0_11_0_0 = KafkaVersion("0.11.0.0")
V_0_11_0_1 = KafkaVersion("0.11.0.1")
V_0_11_0_2 = KafkaVersion("0.11.0.2")
V_0_11_0_3 = KafkaVersion("0.11.0.3")
LATEST_0_11_0 = V_0_11_0_3
LATEST_0_11 = LATEST_0_11_0
# 1.0.x versions
V_1_0_0 = KafkaVersion("1.0.0")
V_1_0_1 = KafkaVersion("1.0.1")
V_1_0_2 = KafkaVersion("1.0.2")
LATEST_1_0 = V_1_0_2
# 1.1.x versions
V_1_1_0 = KafkaVersion("1.1.0")
V_1_1_1 = KafkaVersion("1.1.1")
LATEST_1_1 = V_1_1_1
# 2.0.x versions
V_2_0_0 = KafkaVersion("2.0.0")
V_2_0_1 = KafkaVersion("2.0.1")
LATEST_2_0 = V_2_0_1
# 2.1.x versions
V_2_1_0 = KafkaVersion("2.1.0")
V_2_1_1 = KafkaVersion("2.1.1")
LATEST_2_1 = V_2_1_1
# 2.2.x versions
V_2_2_0 = KafkaVersion("2.2.0")
V_2_2_1 = KafkaVersion("2.2.1")
V_2_2_2 = KafkaVersion("2.2.2")
LATEST_2_2 = V_2_2_2
# 2.3.x versions
V_2_3_0 = KafkaVersion("2.3.0")
V_2_3_1 = KafkaVersion("2.3.1")
LATEST_2_3 = V_2_3_1
# 2.4.x versions
V_2_4_0 = KafkaVersion("2.4.0")
V_2_4_1 = KafkaVersion("2.4.1")
LATEST_2_4 = V_2_4_1
# 2.5.x versions
V_2_5_0 = KafkaVersion("2.5.0")
V_2_5_1 = KafkaVersion("2.5.1")
LATEST_2_5 = V_2_5_1
# 2.6.x versions
V_2_6_0 = KafkaVersion("2.6.0")
V_2_6_1 = KafkaVersion("2.6.1")
LATEST_2_6 = V_2_6_1
# 2.7.x versions
V_2_7_0 = KafkaVersion("2.7.0")
LATEST_2_7 = V_2_7_0
# 2.8.x versions
V_2_8_0 = KafkaVersion("2.8.0")
LATEST_2_8 = V_2_8_0
# 3.0.x versions
V_3_0_0 = KafkaVersion("3.0.0")
LATEST_3_0 = V_3_0_0
|
|
from django.shortcuts import render, redirect, get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import ListView
from django.utils.crypto import get_random_string
from django.core.serializers.json import DjangoJSONEncoder
from proteil.models import Protein, Structure, Helix, Sequence, Sheet
from proteil.forms import UploadFile, UploadListIds
from Parsers import utils, PdbParser, UnitprotParser
from proteil import settings
import os
from datetime import datetime
import json
import re
# Create your views here.
def index(request):
return render(request, 'proteil/index.html')
def protein_add(request):
uploadPiscesForm = UploadFile()
uploadListIds = UploadListIds()
return render(request, 'proteil/proteins/new.html',
{'pisces_form': uploadPiscesForm, 'list_form': uploadListIds})
def upload_pisces_file(request):
if request.method == 'POST':
form = UploadFile(request.POST, request.FILES)
if form.is_valid():
result = handle_uploaded_file(request.FILES['file'])
if result['status'] == "success":
con = {}
con['existing_ids'] = []
con['non_existing_ids'] = []
con['total'] = len(result['payload'])
for id in result['payload']:
try:
structure = Structure.objects.get(pdb_id=id)
con['existing_ids'].append(id)
except Structure.DoesNotExist:
con['non_existing_ids'].append(id)
return render(request, 'proteil/proteins/ids_list.html', con)
return HttpResponse(result['error_msg'])
else:
return redirect('protein_add')
return render(request, 'proteil/proteins/new.html', {'pisces_form': form})
def upload_ids_list(request):
if request.method == 'POST':
form = UploadListIds(request.POST)
if form.is_valid():
result = handle_uploaded_list_ids(form.cleaned_data)
if result['status'] == "success":
con = {}
con['existing_ids'] = []
con['non_existing_ids'] = []
con['total'] = len(result['payload'])
for id in result['payload']:
try:
if form.cleaned_data['ids_type'] == "pdb":
Structure = Structure.objects.get(pdb_id=id)
else:
pass
con['existing_ids'].append(id)
except Structure.DoesNotExist:
con['non_existing_ids'].append(id)
return render(request, 'proteil/proteins/ids_list.html', con)
return HttpResponse(result['error_msg'])
else:
return redirect('protein_add')
return render(request, 'proteil/proteins/new.html', {'ids_list_form': form})
def add_protein(request, id):
response = {
"status": "error",
"error_msg": "",
}
if request.is_ajax():
if re.search(r'^[A-Za-z0-9]{4}$', id):
add_by_pdb(response, id)
elif re.search(r'^[A-Za-z0-9]{6}$', id):
add_by_uniprotkb(response, id)
else:
response["error_msg"] = "API can only be used with AJAX requests";
return HttpResponse(json.dumps(response), content_type="application/json")
class ProteinList(ListView):
model = Protein
template_name = "proteil/proteins/protein_list.html"
context_object_name = "proteins"
queryset = Protein.objects.order_by('uniprotkb_id')
def add_by_pdb(response, pdb_id):
content = utils.fetch_pdb_file(pdb_id)
if content:
try:
parser = PdbParser(pdb_id, content)
uniprotkbIds = utils.idmapping(pdb_id, True)
structure = Structure(
pdb_id=pdb_id,
classification=parser.classification,
title=parser.title
)
structure.save()
if uniprotkbIds:
protein = Protein(uniprotkb_id=uniprotkbIds[0])
protein.save()
structure.protein = protein
for h in parser.helices:
helix = Helix(
comment=h['comment'],
helix_class=h['helixClass'],
end_i_code=h['endICode'],
helix_id=h['helixID'],
end_seq_num=h['endSeqNum'],
init_seq_num=h['initSeqNum'],
init_res_name=h['initResName'],
ser_num=h['serNum'],
init_chain_id=h['initChainID'],
init_i_code=h['initICode'],
length=h['length'],
end_chain_id=h['endChainID'],
end_res_name=h['endResName'],
classification=h['helixClass']
)
helix.structure = structure
helix.save()
for s in parser.sequence:
sequence = Sequence(
chain_id=s['chainID'],
num_res=s['numRes'],
residues=s['residues']
)
sequence.structure = structure
sequence.save()
for s in parser.sheets:
sheet = Sheet(
strand=s['strand'],
sheet_id=s['sheetID'],
numStrands=s['numStrands'],
init_res_name=s['initResName'],
init_chain_id=s['initChainID'],
init_seq_num=s['initSeqNum'],
init_i_code=s['initICode'],
end_res_name=s['endResName'],
end_chain_id=s['endChainID'],
end_seq_num=s['endSeqNum'],
end_i_code=s['endICode'],
sense=s['sense']
)
sheet.structure = structure
if sheet.sense != 0:
sheet.cur_atom = s['curAtom']
sheet.cur_res_name = s['curResName']
sheet.cur_chain_id = s['curChainID']
sheet.cur_res_seq = s['curResSeq']
sheet.cur_i_code = s['curICode']
sheet.prev_atom = s['prevAtom']
sheet.prev_res_name = s['prevResName']
sheet.prev_chain_id = s['prevChainID']
sheet.prev_res_seq = s['prevResSeq']
sheet.prev_i_code = s['prevICode']
sheet.save()
response['status'] = "success"
response['uniprotkbId'] = uniprotkbIds[0];
except Exception, e:
response['status'] = "error"
response['error_msg'] = str(e) # debug mode only
else:
response['error_msg'] = "PDB file not found"
def add_by_uniprotkb(response, uniprotkb_id):
pass
def handle_uploaded_file(f):
result = {}
# get possible list of protein structures
ids = utils.parse_pisces_file(f)
if not ids:
result['status'] = "error"
result['error_msg'] = "Not a valid file. No ids recognized."
return result
f.seek(0)
# create dir for pdb files
if not os.path.isdir(settings.PATHS['UPLOADS']):
os.mkdir(settings.PATHS['UPLOADS'])
filename = get_random_string(16, "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")
with open(os.path.join(settings.PATHS['UPLOADS'], filename), "w") as fout:
fout.write(f.read())
result['status'] = "success"
result['payload'] = ids
return result
def handle_uploaded_list_ids(data):
result = {}
ids = utils.parse_list_ids(data['ids'], data['ids_type'])
if not ids:
result['status'] = "error"
result['error_msg'] = "Invalid input. No ids recognized."
else:
result['status'] = "success"
result['payload'] = ids
return result
|
|
"""
ANSI - Gives colour to text.
Use the codes defined in ANSIPARSER in your text
to apply colour to text according to the ANSI standard.
Examples:
This is %crRed text%cn and this is normal again.
This is {rRed text{n and this is normal again.
Mostly you should not need to call parse_ansi() explicitly;
it is run by Evennia just before returning data to/from the
user.
"""
from builtins import object, range
import re
from evennia.utils import utils
from evennia.utils.utils import to_str, to_unicode
from future.utils import with_metaclass
# ANSI definitions
ANSI_BEEP = "\07"
ANSI_ESCAPE = "\033"
ANSI_NORMAL = "\033[0m"
ANSI_UNDERLINE = "\033[4m"
ANSI_HILITE = "\033[1m"
ANSI_UNHILITE = "\033[22m"
ANSI_BLINK = "\033[5m"
ANSI_INVERSE = "\033[7m"
ANSI_INV_HILITE = "\033[1;7m"
ANSI_INV_BLINK = "\033[7;5m"
ANSI_BLINK_HILITE = "\033[1;5m"
ANSI_INV_BLINK_HILITE = "\033[1;5;7m"
# Foreground colors
ANSI_BLACK = "\033[30m"
ANSI_RED = "\033[31m"
ANSI_GREEN = "\033[32m"
ANSI_YELLOW = "\033[33m"
ANSI_BLUE = "\033[34m"
ANSI_MAGENTA = "\033[35m"
ANSI_CYAN = "\033[36m"
ANSI_WHITE = "\033[37m"
# Background colors
ANSI_BACK_BLACK = "\033[40m"
ANSI_BACK_RED = "\033[41m"
ANSI_BACK_GREEN = "\033[42m"
ANSI_BACK_YELLOW = "\033[43m"
ANSI_BACK_BLUE = "\033[44m"
ANSI_BACK_MAGENTA = "\033[45m"
ANSI_BACK_CYAN = "\033[46m"
ANSI_BACK_WHITE = "\033[47m"
# Formatting Characters
ANSI_RETURN = "\r\n"
ANSI_TAB = "\t"
ANSI_SPACE = " "
# Escapes
ANSI_ESCAPES = ("{{", "\\\\", "\|\|")
from collections import OrderedDict
_PARSE_CACHE = OrderedDict()
_PARSE_CACHE_SIZE = 10000
class ANSIParser(object):
"""
A class that parses ANSI markup
to ANSI command sequences
We also allow to escape colour codes
by prepending with a \ for xterm256,
an extra | for Merc-style codes
"""
def sub_ansi(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
markers with correct ANSI sequences
Args:
ansimatch (re.matchobject): The match.
Returns:
processed (str): The processed match string.
"""
return self.ansi_map.get(ansimatch.group(), "")
def sub_brightbg(self, ansimatch):
"""
Replacer used by `re.sub` to replace ANSI
bright background markers with Xterm256 replacement
Args:
ansimatch (re.matchobject): The match.
Returns:
processed (str): The processed match string.
"""
return self.ansi_bright_bgs_map.get(ansimatch.group(), "")
def sub_xterm256(self, rgbmatch, use_xterm256=False):
"""
This is a replacer method called by `re.sub` with the matched
tag. It must return the correct ansi sequence.
It checks `self.do_xterm256` to determine if conversion
to standard ANSI should be done or not.
Args:
rgbmatch (re.matchobject): The match.
use_xterm256 (bool, optional): Don't convert 256-colors to 16.
Returns:
processed (str): The processed match string.
"""
if not rgbmatch:
return ""
# get tag, stripping the initial marker
rgbtag = rgbmatch.group()[1:]
background = rgbtag[0] == '['
if background:
red, green, blue = int(rgbtag[1]), int(rgbtag[2]), int(rgbtag[3])
else:
red, green, blue = int(rgbtag[0]), int(rgbtag[1]), int(rgbtag[2])
if use_xterm256:
colval = 16 + (red * 36) + (green * 6) + blue
return "\033[%s8;5;%sm" % (3 + int(background), colval)
# replaced since some cliens (like Potato) does not accept codes with leading zeroes, see issue #1024.
#return "\033[%s8;5;%s%s%sm" % (3 + int(background), colval // 100, (colval % 100) // 10, colval%10)
else:
# xterm256 not supported, convert the rgb value to ansi instead
if red == green == blue and red < 3:
if background:
return ANSI_BACK_BLACK
elif red >= 1:
return ANSI_HILITE + ANSI_BLACK
else:
return ANSI_NORMAL + ANSI_BLACK
elif red == green == blue:
if background:
return ANSI_BACK_WHITE
elif red >= 4:
return ANSI_HILITE + ANSI_WHITE
else:
return ANSI_NORMAL + ANSI_WHITE
elif red > green and red > blue:
if background:
return ANSI_BACK_RED
elif red >= 3:
return ANSI_HILITE + ANSI_RED
else:
return ANSI_NORMAL + ANSI_RED
elif red == green and red > blue:
if background:
return ANSI_BACK_YELLOW
elif red >= 3:
return ANSI_HILITE + ANSI_YELLOW
else:
return ANSI_NORMAL + ANSI_YELLOW
elif red == blue and red > green:
if background:
return ANSI_BACK_MAGENTA
elif red >= 3:
return ANSI_HILITE + ANSI_MAGENTA
else:
return ANSI_NORMAL + ANSI_MAGENTA
elif green > blue:
if background:
return ANSI_BACK_GREEN
elif green >= 3:
return ANSI_HILITE + ANSI_GREEN
else:
return ANSI_NORMAL + ANSI_GREEN
elif green == blue:
if background:
return ANSI_BACK_CYAN
elif green >= 3:
return ANSI_HILITE + ANSI_CYAN
else:
return ANSI_NORMAL + ANSI_CYAN
else: # mostly blue
if background:
return ANSI_BACK_BLUE
elif blue >= 3:
return ANSI_HILITE + ANSI_BLUE
else:
return ANSI_NORMAL + ANSI_BLUE
def strip_raw_codes(self, string):
"""
Strips raw ANSI codes from a string.
Args:
string (str): The string to strip.
Returns:
string (str): The processed string.
"""
return self.ansi_regex.sub("", string)
def strip_mxp(self, string):
"""
Strips all MXP codes from a string.
Args:
string (str): The string to strip.
Returns:
string (str): The processed string.
"""
return self.mxp_sub.sub(r'\2', string)
def parse_ansi(self, string, strip_ansi=False, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes according to the stored
mapping.
Args:
string (str): The string to parse.
strip_ansi (boolean, optional): Strip all found ansi markup.
xterm256 (boolean, optional): If actually using xterm256 or if
these values should be converted to 16-color ANSI.
mxp (boolean, optional): Parse MXP commands in string.
Returns:
string (str): The parsed string.
"""
if hasattr(string, '_raw_string'):
if strip_ansi:
return string.clean()
else:
return string.raw()
if not string:
return ''
# check cached parsings
global _PARSE_CACHE
cachekey = "%s-%s-%s-%s" % (string, strip_ansi, xterm256, mxp)
if cachekey in _PARSE_CACHE:
return _PARSE_CACHE[cachekey]
# pre-convert bright colors to xterm256 color tags
string = self.brightbg_sub.sub(self.sub_brightbg, string)
def do_xterm256(part):
return self.sub_xterm256(part, xterm256)
in_string = utils.to_str(string)
# do string replacement
parsed_string = ""
parts = self.ansi_escapes.split(in_string) + [" "]
for part, sep in zip(parts[::2], parts[1::2]):
pstring = self.xterm256_sub.sub(do_xterm256, part)
pstring = self.ansi_sub.sub(self.sub_ansi, pstring)
parsed_string += "%s%s" % (pstring, sep[0].strip())
if not mxp:
parsed_string = self.strip_mxp(parsed_string)
if strip_ansi:
# remove all ansi codes (including those manually
# inserted in string)
return self.strip_raw_codes(parsed_string)
# cache and crop old cache
_PARSE_CACHE[cachekey] = parsed_string
if len(_PARSE_CACHE) > _PARSE_CACHE_SIZE:
_PARSE_CACHE.popitem(last=False)
return parsed_string
# Mapping using {r {n etc
hilite = ANSI_HILITE
unhilite = ANSI_UNHILITE
ext_ansi_map = [
(r'{n', ANSI_NORMAL), # reset
(r'{/', ANSI_RETURN), # line break
(r'{-', ANSI_TAB), # tab
(r'{_', ANSI_SPACE), # space
(r'{*', ANSI_INVERSE), # invert
(r'{^', ANSI_BLINK), # blinking text (very annoying and not supported by all clients)
(r'{u', ANSI_UNDERLINE), # underline
(r'{r', hilite + ANSI_RED),
(r'{g', hilite + ANSI_GREEN),
(r'{y', hilite + ANSI_YELLOW),
(r'{b', hilite + ANSI_BLUE),
(r'{m', hilite + ANSI_MAGENTA),
(r'{c', hilite + ANSI_CYAN),
(r'{w', hilite + ANSI_WHITE), # pure white
(r'{x', hilite + ANSI_BLACK), # dark grey
(r'{R', unhilite + ANSI_RED),
(r'{G', unhilite + ANSI_GREEN),
(r'{Y', unhilite + ANSI_YELLOW),
(r'{B', unhilite + ANSI_BLUE),
(r'{M', unhilite + ANSI_MAGENTA),
(r'{C', unhilite + ANSI_CYAN),
(r'{W', unhilite + ANSI_WHITE), # light grey
(r'{X', unhilite + ANSI_BLACK), # pure black
# hilight-able colors
(r'{h', hilite),
(r'{H', unhilite),
(r'{!R', ANSI_RED),
(r'{!G', ANSI_GREEN),
(r'{!Y', ANSI_YELLOW),
(r'{!B', ANSI_BLUE),
(r'{!M', ANSI_MAGENTA),
(r'{!C', ANSI_CYAN),
(r'{!W', ANSI_WHITE), # light grey
(r'{!X', ANSI_BLACK), # pure black
# normal ANSI backgrounds
(r'{[R', ANSI_BACK_RED),
(r'{[G', ANSI_BACK_GREEN),
(r'{[Y', ANSI_BACK_YELLOW),
(r'{[B', ANSI_BACK_BLUE),
(r'{[M', ANSI_BACK_MAGENTA),
(r'{[C', ANSI_BACK_CYAN),
(r'{[W', ANSI_BACK_WHITE), # light grey background
(r'{[X', ANSI_BACK_BLACK), # pure black background
## alternative |-format
(r'|n', ANSI_NORMAL), # reset
(r'|/', ANSI_RETURN), # line break
(r'|-', ANSI_TAB), # tab
(r'|_', ANSI_SPACE), # space
(r'|*', ANSI_INVERSE), # invert
(r'|^', ANSI_BLINK), # blinking text (very annoying and not supported by all clients)
(r'|u', ANSI_UNDERLINE), # underline
(r'|r', hilite + ANSI_RED),
(r'|g', hilite + ANSI_GREEN),
(r'|y', hilite + ANSI_YELLOW),
(r'|b', hilite + ANSI_BLUE),
(r'|m', hilite + ANSI_MAGENTA),
(r'|c', hilite + ANSI_CYAN),
(r'|w', hilite + ANSI_WHITE), # pure white
(r'|x', hilite + ANSI_BLACK), # dark grey
(r'|R', unhilite + ANSI_RED),
(r'|G', unhilite + ANSI_GREEN),
(r'|Y', unhilite + ANSI_YELLOW),
(r'|B', unhilite + ANSI_BLUE),
(r'|M', unhilite + ANSI_MAGENTA),
(r'|C', unhilite + ANSI_CYAN),
(r'|W', unhilite + ANSI_WHITE), # light grey
(r'|X', unhilite + ANSI_BLACK), # pure black
# hilight-able colors
(r'|h', hilite),
(r'|H', unhilite),
(r'|!R', ANSI_RED),
(r'|!G', ANSI_GREEN),
(r'|!Y', ANSI_YELLOW),
(r'|!B', ANSI_BLUE),
(r'|!M', ANSI_MAGENTA),
(r'|!C', ANSI_CYAN),
(r'|!W', ANSI_WHITE), # light grey
(r'|!X', ANSI_BLACK), # pure black
# normal ANSI backgrounds
(r'|[R', ANSI_BACK_RED),
(r'|[G', ANSI_BACK_GREEN),
(r'|[Y', ANSI_BACK_YELLOW),
(r'|[B', ANSI_BACK_BLUE),
(r'|[M', ANSI_BACK_MAGENTA),
(r'|[C', ANSI_BACK_CYAN),
(r'|[W', ANSI_BACK_WHITE), # light grey background
(r'|[X', ANSI_BACK_BLACK) # pure black background
]
ansi_bright_bgs = [
# "bright" ANSI backgrounds using xterm256 since ANSI
# standard does not support it (will
# fallback to dark ANSI background colors if xterm256
# is not supported by client)
(r'{[r', r'{[500'),
(r'{[g', r'{[050'),
(r'{[y', r'{[550'),
(r'{[b', r'{[005'),
(r'{[m', r'{[505'),
(r'{[c', r'{[055'),
(r'{[w', r'{[555'), # white background
(r'{[x', r'{[222'), # dark grey background
## |-style variations
(r'|[r', r'|[500'),
(r'|[g', r'|[050'),
(r'|[y', r'|[550'),
(r'|[b', r'|[005'),
(r'|[m', r'|[505'),
(r'|[c', r'|[055'),
(r'|[w', r'|[555'), # white background
(r'|[x', r'|[222')] # dark grey background
# xterm256 {123, %c134. These are replaced directly by
# the sub_xterm256 method
xterm256_map = [
(r'\{[0-5]{3}', ""), # {123 - foreground colour
(r'\{\[[0-5]{3}', ""), # {[123 - background colour
## -style
(r'\|[0-5]{3}', ""), # |123 - foreground colour
(r'\|\[[0-5]{3}', ""), # |[123 - background colour
(r'\{[0-5]{3}', ""), # {123 - foreground colour
(r'\{\[[0-5]{3}', ""), # {[123 - background colour
## |-style
(r'\|[0-5]{3}', ""), # |123 - foreground colour
(r'\|\[[0-5]{3}', ""), # |[123 - background colour
]
mxp_re = r'\|lc(.*?)\|lt(.*?)\|le'
# prepare regex matching
brightbg_sub = re.compile(r"|".join([r"(?<!\|)%s" % re.escape(tup[0]) for tup in ansi_bright_bgs]), re.DOTALL)
xterm256_sub = re.compile(r"|".join([tup[0] for tup in xterm256_map]), re.DOTALL)
ansi_sub = re.compile(r"|".join([re.escape(tup[0]) for tup in ext_ansi_map]), re.DOTALL)
mxp_sub = re.compile(mxp_re, re.DOTALL)
# used by regex replacer to correctly map ansi sequences
ansi_map = dict(ext_ansi_map)
ansi_bright_bgs_map = dict(ansi_bright_bgs)
# prepare matching ansi codes overall
ansi_re = r"\033\[[0-9;]+m"
ansi_regex = re.compile(ansi_re)
# escapes - these double-chars will be replaced with a single
# instance of each
ansi_escapes = re.compile(r"(%s)" % "|".join(ANSI_ESCAPES), re.DOTALL)
ANSI_PARSER = ANSIParser()
#
# Access function
#
def parse_ansi(string, strip_ansi=False, parser=ANSI_PARSER, xterm256=False, mxp=False):
"""
Parses a string, subbing color codes as needed.
Args:
string (str): The string to parse.
strip_ansi (bool, optional): Strip all ANSI sequences.
parser (ansi.AnsiParser, optional): A parser instance to use.
xterm256 (bool, optional): Support xterm256 or not.
mxp (bool, optional): Support MXP markup or not.
Returns:
string (str): The parsed string.
"""
return parser.parse_ansi(string, strip_ansi=strip_ansi, xterm256=xterm256, mxp=mxp)
def strip_ansi(string, parser=ANSI_PARSER):
"""
Strip all ansi from the string. This handles the Evennia-specific
markup.
Args:
parser (ansi.AnsiParser, optional): The parser to use.
Returns:
string (str): The stripped string.
"""
return parser.parse_ansi(string, strip_ansi=True)
def strip_raw_ansi(string, parser=ANSI_PARSER):
"""
Remove raw ansi codes from string. This assumes pure
ANSI-bytecodes in the string.
Args:
string (str): The string to parse.
parser (bool, optional): The parser to use.
Returns:
string (str): the stripped string.
"""
return parser.strip_raw_codes(string)
def raw(string):
"""
Escapes a string into a form which won't be colorized by the ansi
parser.
Returns:
string (str): The raw, escaped string.
"""
return string.replace('{', '{{')
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
def _spacing_preflight(func):
"""
This wrapper function is used to do some preflight checks on
functions used for padding ANSIStrings.
"""
def wrapped(self, width, fillchar=None):
if fillchar is None:
fillchar = " "
if (len(fillchar) != 1) or (not isinstance(fillchar, basestring)):
raise TypeError("must be char, not %s" % type(fillchar))
if not isinstance(width, int):
raise TypeError("integer argument expected, got %s" % type(width))
difference = width - len(self)
if difference <= 0:
return self
return func(self, width, fillchar, difference)
return wrapped
def _query_super(func_name):
"""
Have the string class handle this with the cleaned string instead
of ANSIString.
"""
def wrapped(self, *args, **kwargs):
return getattr(self.clean(), func_name)(*args, **kwargs)
return wrapped
def _on_raw(func_name):
"""
Like query_super, but makes the operation run on the raw string.
"""
def wrapped(self, *args, **kwargs):
args = list(args)
try:
string = args.pop(0)
if hasattr(string, '_raw_string'):
args.insert(0, string.raw())
else:
args.insert(0, string)
except IndexError:
pass
result = getattr(self._raw_string, func_name)(*args, **kwargs)
if isinstance(result, basestring):
return ANSIString(result, decoded=True)
return result
return wrapped
def _transform(func_name):
"""
Some string functions, like those manipulating capital letters,
return a string the same length as the original. This function
allows us to do the same, replacing all the non-coded characters
with the resulting string.
"""
def wrapped(self, *args, **kwargs):
replacement_string = _query_super(func_name)(self, *args, **kwargs)
to_string = []
char_counter = 0
for index in range(0, len(self._raw_string)):
if index in self._code_indexes:
to_string.append(self._raw_string[index])
elif index in self._char_indexes:
to_string.append(replacement_string[char_counter])
char_counter += 1
return ANSIString(
''.join(to_string), decoded=True,
code_indexes=self._code_indexes, char_indexes=self._char_indexes,
clean_string=replacement_string)
return wrapped
class ANSIMeta(type):
"""
Many functions on ANSIString are just light wrappers around the unicode
base class. We apply them here, as part of the classes construction.
"""
def __init__(cls, *args, **kwargs):
for func_name in [
'count', 'startswith', 'endswith', 'find', 'index', 'isalnum',
'isalpha', 'isdigit', 'islower', 'isspace', 'istitle', 'isupper',
'rfind', 'rindex', '__len__']:
setattr(cls, func_name, _query_super(func_name))
for func_name in [
'__mod__', 'expandtabs', 'decode', 'replace', 'format',
'encode']:
setattr(cls, func_name, _on_raw(func_name))
for func_name in [
'capitalize', 'translate', 'lower', 'upper', 'swapcase']:
setattr(cls, func_name, _transform(func_name))
super(ANSIMeta, cls).__init__(*args, **kwargs)
class ANSIString(with_metaclass(ANSIMeta, unicode)):
"""
String-like object that is aware of ANSI codes.
This isn't especially efficient, as it doesn't really have an
understanding of what the codes mean in order to eliminate
redundant characters. This could be made as an enhancement to ANSI_PARSER.
If one is going to use ANSIString, one should generally avoid converting
away from it until one is about to send information on the wire. This is
because escape sequences in the string may otherwise already be decoded,
and taken literally the second time around.
Please refer to the Metaclass, ANSIMeta, which is used to apply wrappers
for several of the methods that need not be defined directly here.
"""
def __new__(cls, *args, **kwargs):
"""
When creating a new ANSIString, you may use a custom parser that has
the same attributes as the standard one, and you may declare the
string to be handled as already decoded. It is important not to double
decode strings, as escapes can only be respected once.
Internally, ANSIString can also passes itself precached code/character
indexes and clean strings to avoid doing extra work when combining
ANSIStrings.
"""
string = args[0]
if not isinstance(string, basestring):
string = to_str(string, force_string=True)
parser = kwargs.get('parser', ANSI_PARSER)
decoded = kwargs.get('decoded', False) or hasattr(string, '_raw_string')
code_indexes = kwargs.pop('code_indexes', None)
char_indexes = kwargs.pop('char_indexes', None)
clean_string = kwargs.pop('clean_string', None)
# All True, or All False, not just one.
checks = [x is None for x in [code_indexes, char_indexes, clean_string]]
if not len(set(checks)) == 1:
raise ValueError("You must specify code_indexes, char_indexes, "
"and clean_string together, or not at all.")
if not all(checks):
decoded = True
if not decoded:
# Completely new ANSI String
clean_string = to_unicode(parser.parse_ansi(string, strip_ansi=True, mxp=True))
string = parser.parse_ansi(string, xterm256=True, mxp=True)
elif clean_string is not None:
# We have an explicit clean string.
pass
elif hasattr(string, '_clean_string'):
# It's already an ANSIString
clean_string = string._clean_string
code_indexes = string._code_indexes
char_indexes = string._char_indexes
string = string._raw_string
else:
# It's a string that has been pre-ansi decoded.
clean_string = parser.strip_raw_codes(string)
if not isinstance(string, unicode):
string = string.decode('utf-8')
ansi_string = super(ANSIString, cls).__new__(ANSIString, to_str(clean_string), "utf-8")
ansi_string._raw_string = string
ansi_string._clean_string = clean_string
ansi_string._code_indexes = code_indexes
ansi_string._char_indexes = char_indexes
return ansi_string
def __str__(self):
return self._raw_string.encode('utf-8')
def __unicode__(self):
"""
Unfortunately, this is not called during print() statements
due to a bug in the Python interpreter. You can always do
unicode() or str() around the resulting ANSIString and print
that.
"""
return self._raw_string
def __repr__(self):
"""
Let's make the repr the command that would actually be used to
construct this object, for convenience and reference.
"""
return "ANSIString(%s, decoded=True)" % repr(self._raw_string)
def __init__(self, *_, **kwargs):
"""
When the ANSIString is first initialized, a few internal variables
have to be set.
The first is the parser. It is possible to replace Evennia's standard
ANSI parser with one of your own syntax if you wish, so long as it
implements the same interface.
The second is the _raw_string. It should be noted that the ANSIStrings
are unicode based. This seemed more reasonable than basing it off of
the string class, because if someone were to use a unicode character,
the benefits of knowing the indexes of the ANSI characters would be
negated by the fact that a character within the string might require
more than one byte to be represented. The raw string is, then, a
unicode object rather than a true encoded string. If you need the
encoded string for sending over the wire, try using the .encode()
method.
The third thing to set is the _clean_string. This is a unicode object
that is devoid of all ANSI Escapes.
Finally, _code_indexes and _char_indexes are defined. These are lookup
tables for which characters in the raw string are related to ANSI
escapes, and which are for the readable text.
"""
self.parser = kwargs.pop('parser', ANSI_PARSER)
super(ANSIString, self).__init__()
if self._code_indexes is None:
self._code_indexes, self._char_indexes = self._get_indexes()
@staticmethod
def _shifter(iterable, offset):
"""
Takes a list of integers, and produces a new one incrementing all
by a number.
"""
return [i + offset for i in iterable]
@classmethod
def _adder(cls, first, second):
"""
Joins two ANSIStrings, preserving calculated info.
"""
raw_string = first._raw_string + second._raw_string
clean_string = first._clean_string + second._clean_string
code_indexes = first._code_indexes[:]
char_indexes = first._char_indexes[:]
code_indexes.extend(
cls._shifter(second._code_indexes, len(first._raw_string)))
char_indexes.extend(
cls._shifter(second._char_indexes, len(first._raw_string)))
return ANSIString(raw_string, code_indexes=code_indexes,
char_indexes=char_indexes,
clean_string=clean_string)
def __add__(self, other):
"""
We have to be careful when adding two strings not to reprocess things
that don't need to be reprocessed, lest we end up with escapes being
interpreted literally.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(self, other)
def __radd__(self, other):
"""
Likewise, if we're on the other end.
"""
if not isinstance(other, basestring):
return NotImplemented
if not isinstance(other, ANSIString):
other = ANSIString(other)
return self._adder(other, self)
def __getslice__(self, i, j):
"""
This function is deprecated, so we just make it call the proper
function.
"""
return self.__getitem__(slice(i, j))
def _slice(self, slc):
"""
This function takes a slice() object.
Slices have to be handled specially. Not only are they able to specify
a start and end with [x:y], but many forget that they can also specify
an interval with [x:y:z]. As a result, not only do we have to track
the ANSI Escapes that have played before the start of the slice, we
must also replay any in these intervals, should they exist.
Thankfully, slicing the _char_indexes table gives us the actual
indexes that need slicing in the raw string. We can check between
those indexes to figure out what escape characters need to be
replayed.
"""
slice_indexes = self._char_indexes[slc]
# If it's the end of the string, we need to append final color codes.
if not slice_indexes:
return ANSIString('')
try:
string = self[slc.start]._raw_string
except IndexError:
return ANSIString('')
last_mark = slice_indexes[0]
# Check between the slice intervals for escape sequences.
i = None
for i in slice_indexes[1:]:
for index in range(last_mark, i):
if index in self._code_indexes:
string += self._raw_string[index]
last_mark = i
try:
string += self._raw_string[i]
except IndexError:
pass
if i is not None:
append_tail = self._get_interleving(self._char_indexes.index(i) + 1)
else:
append_tail = ''
return ANSIString(string + append_tail, decoded=True)
def __getitem__(self, item):
"""
Gateway for slices and getting specific indexes in the ANSIString. If
this is a regexable ANSIString, it will get the data from the raw
string instead, bypassing ANSIString's intelligent escape skipping,
for reasons explained in the __new__ method's docstring.
"""
if isinstance(item, slice):
# Slices must be handled specially.
return self._slice(item)
try:
self._char_indexes[item]
except IndexError:
raise IndexError("ANSIString Index out of range")
# Get character codes after the index as well.
if self._char_indexes[-1] == self._char_indexes[item]:
append_tail = self._get_interleving(item + 1)
else:
append_tail = ''
item = self._char_indexes[item]
clean = self._raw_string[item]
result = ''
# Get the character they're after, and replay all escape sequences
# previous to it.
for index in range(0, item + 1):
if index in self._code_indexes:
result += self._raw_string[index]
return ANSIString(result + clean + append_tail, decoded=True)
def clean(self):
"""
Return a unicode object without the ANSI escapes.
"""
return self._clean_string
def raw(self):
"""
Return a unicode object with the ANSI escapes.
"""
return self._raw_string
def partition(self, sep, reverse=False):
"""
Similar to split, but always creates a tuple with three items:
1. The part before the separator
2. The separator itself.
3. The part after.
We use the same techniques we used in split() to make sure each are
colored.
"""
if hasattr(sep, '_clean_string'):
sep = sep.clean()
if reverse:
parent_result = self._clean_string.rpartition(sep)
else:
parent_result = self._clean_string.partition(sep)
current_index = 0
result = tuple()
for section in parent_result:
result += (self[current_index:current_index + len(section)],)
current_index += len(section)
return result
def _get_indexes(self):
"""
Two tables need to be made, one which contains the indexes of all
readable characters, and one which contains the indexes of all ANSI
escapes. It's important to remember that ANSI escapes require more
that one character at a time, though no readable character needs more
than one character, since the unicode base class abstracts that away
from us. However, several readable characters can be placed in a row.
We must use regexes here to figure out where all the escape sequences
are hiding in the string. Then we use the ranges of their starts and
ends to create a final, comprehensive list of all indexes which are
dedicated to code, and all dedicated to text.
It's possible that only one of these tables is actually needed, the
other assumed to be what isn't in the first.
"""
code_indexes = []
for match in self.parser.ansi_regex.finditer(self._raw_string):
code_indexes.extend(range(match.start(), match.end()))
if not code_indexes:
# Plain string, no ANSI codes.
return code_indexes, list(range(0, len(self._raw_string)))
# all indexes not occupied by ansi codes are normal characters
char_indexes = [i for i in range(len(self._raw_string)) if i not in code_indexes]
return code_indexes, char_indexes
def _get_interleving(self, index):
"""
Get the code characters from the given slice end to the next
character.
"""
try:
index = self._char_indexes[index - 1]
except IndexError:
return ''
s = ''
while True:
index += 1
if index in self._char_indexes:
break
elif index in self._code_indexes:
s += self._raw_string[index]
else:
break
return s
def __mul__(self, other):
"""
Multiplication method. Implemented for performance reasons.
"""
if not isinstance(other, int):
return NotImplemented
raw_string = self._raw_string * other
clean_string = self._clean_string * other
code_indexes = self._code_indexes[:]
char_indexes = self._char_indexes[:]
for i in range(1, other + 1):
code_indexes.extend(
self._shifter(self._code_indexes, i * len(self._raw_string)))
char_indexes.extend(
self._shifter(self._char_indexes, i * len(self._raw_string)))
return ANSIString(
raw_string, code_indexes=code_indexes, char_indexes=char_indexes,
clean_string=clean_string)
def __rmul__(self, other):
return self.__mul__(other)
def split(self, by=None, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
drop_spaces = by is None
if drop_spaces:
by = " "
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
res = []
start = 0
while maxsplit != 0:
next = self._clean_string.find(by, start)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[start:next])
start = next + bylen
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[start:len(self)])
if drop_spaces:
return [part for part in res if part != ""]
return res
def rsplit(self, by=None, maxsplit=-1):
"""
Stolen from PyPy's pure Python string implementation, tweaked for
ANSIString.
PyPy is distributed under the MIT licence.
http://opensource.org/licenses/MIT
"""
res = []
end = len(self)
drop_spaces = by is None
if drop_spaces:
by = " "
bylen = len(by)
if bylen == 0:
raise ValueError("empty separator")
while maxsplit != 0:
next = self._clean_string.rfind(by, 0, end)
if next < 0:
break
# Get character codes after the index as well.
res.append(self[next+bylen:end])
end = next
maxsplit -= 1 # NB. if it's already < 0, it stays < 0
res.append(self[:end])
res.reverse()
if drop_spaces:
return [part for part in res if part != ""]
return res
def strip(self, chars=None):
"""
Strip from both ends, taking ANSI markers into account.
"""
clean = self._clean_string
raw = self._raw_string
# count continuous sequence of chars from left and right
nlen = len(clean)
nlstripped = nlen - len(clean.lstrip(chars))
nrstripped = nlen - len(clean.rstrip(chars))
# within the stripped regions, only retain parts of the raw
# string *not* matching the clean string (these are ansi/mxp tags)
lstripped = ""
ic, ir1 = 0, 0
while nlstripped:
if ic >= nlstripped:
break
elif raw[ir1] != clean[ic]:
lstripped += raw[ir1]
else:
ic += 1
ir1 += 1
rstripped = ""
ic, ir2 = nlen-1, len(raw)-1
while nrstripped:
if nlen - ic > nrstripped:
break
elif raw[ir2] != clean[ic]:
rstripped += raw[ir2]
else:
ic -= 1
ir2 -= 1
rstripped = rstripped[::-1]
return ANSIString(lstripped + raw[ir1:ir2+1] + rstripped)
def lstrip(self, chars=None):
"""
Strip from the left, taking ANSI markers into account.
"""
clean = self._clean_string
raw = self._raw_string
# count continuous sequence of chars from left and right
nlen = len(clean)
nlstripped = nlen - len(clean.lstrip(chars))
# within the stripped regions, only retain parts of the raw
# string *not* matching the clean string (these are ansi/mxp tags)
lstripped = ""
ic, ir1 = 0, 0
while nlstripped:
if ic >= nlstripped:
break
elif raw[ir1] != clean[ic]:
lstripped += raw[ir1]
else:
ic += 1
ir1 += 1
return ANSIString(lstripped + raw[ir1:])
def rstrip(self, chars=None):
"""
Strip from the right, taking ANSI markers into account.
"""
clean = self._clean_string
raw = self._raw_string
nlen = len(clean)
nrstripped = nlen - len(clean.rstrip(chars))
rstripped = ""
ic, ir2 = nlen-1, len(raw)-1
while nrstripped:
if nlen - ic > nrstripped:
break
elif raw[ir2] != clean[ic]:
rstripped += raw[ir2]
else:
ic -= 1
ir2 -= 1
rstripped = rstripped[::-1]
return ANSIString(raw[:ir2+1] + rstripped)
def join(self, iterable):
"""
Joins together strings in an iterable.
"""
result = ANSIString('')
last_item = None
for item in iterable:
if last_item is not None:
result += self._raw_string
if not isinstance(item, ANSIString):
item = ANSIString(item)
result += item
last_item = item
return result
def _filler(self, char, amount):
"""
Generate a line of characters in a more efficient way than just adding
ANSIStrings.
"""
if not isinstance(char, ANSIString):
line = char * amount
return ANSIString(
char * amount, code_indexes=[], char_indexes=list(range(0, len(line))),
clean_string=char)
try:
start = char._code_indexes[0]
except IndexError:
start = None
end = char._char_indexes[0]
prefix = char._raw_string[start:end]
postfix = char._raw_string[end + 1:]
line = char._clean_string * amount
code_indexes = [i for i in range(0, len(prefix))]
length = len(prefix) + len(line)
code_indexes.extend([i for i in range(length, length + len(postfix))])
char_indexes = self._shifter(range(0, len(line)), len(prefix))
raw_string = prefix + line + postfix
return ANSIString(
raw_string, clean_string=line, char_indexes=char_indexes,
code_indexes=code_indexes)
@_spacing_preflight
def center(self, width, fillchar, difference):
"""
Center some text with some spaces padding both sides.
"""
remainder = difference % 2
difference /= 2
spacing = self._filler(fillchar, difference)
result = spacing + self + spacing + self._filler(fillchar, remainder)
return result
@_spacing_preflight
def ljust(self, width, fillchar, difference):
"""
Left justify some text.
"""
return self + self._filler(fillchar, difference)
@_spacing_preflight
def rjust(self, width, fillchar, difference):
"""
Right justify some text.
"""
return self._filler(fillchar, difference) + self
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import subprocess
import re
import time
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.contrib.kubernetes import kube_client
class SparkSubmitHook(BaseHook, LoggingMixin):
"""
This hook is a wrapper around the spark-submit binary to kick off a spark-submit job.
It requires that the "spark-submit" binary is in the PATH or the spark_home to be
supplied.
:param conf: Arbitrary Spark configuration properties
:type conf: dict
:param conn_id: The connection id as configured in Airflow administration. When an
invalid connection_id is supplied, it will default to yarn.
:type conn_id: str
:param files: Upload additional files to the executor running the job, separated by a
comma. Files will be placed in the working directory of each executor.
For example, serialized objects.
:type files: str
:param py_files: Additional python files used by the job, can be .zip, .egg or .py.
:type py_files: str
:param driver_classpath: Additional, driver-specific, classpath settings.
:type driver_classpath: str
:param jars: Submit additional jars to upload and place them in executor classpath.
:type jars: str
:param java_class: the main class of the Java application
:type java_class: str
:param packages: Comma-separated list of maven coordinates of jars to include on the
driver and executor classpaths
:type packages: str
:param exclude_packages: Comma-separated list of maven coordinates of jars to exclude
while resolving the dependencies provided in 'packages'
:type exclude_packages: str
:param repositories: Comma-separated list of additional remote repositories to search
for the maven coordinates given with 'packages'
:type repositories: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone, YARN and Kubernetes only) Number of cores per
executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:type driver_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param name: Name of the job (default airflow-spark)
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param application_args: Arguments for the application being submitted
:type application_args: list
:param env_vars: Environment variables for spark-submit. It
supports yarn and k8s mode too.
:type env_vars: dict
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:type verbose: bool
"""
def __init__(self,
conf=None,
conn_id='spark_default',
files=None,
py_files=None,
driver_classpath=None,
jars=None,
java_class=None,
packages=None,
exclude_packages=None,
repositories=None,
total_executor_cores=None,
executor_cores=None,
executor_memory=None,
driver_memory=None,
keytab=None,
principal=None,
name='default-name',
num_executors=None,
application_args=None,
env_vars=None,
verbose=False):
self._conf = conf
self._conn_id = conn_id
self._files = files
self._py_files = py_files
self._driver_classpath = driver_classpath
self._jars = jars
self._java_class = java_class
self._packages = packages
self._exclude_packages = exclude_packages
self._repositories = repositories
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._name = name
self._num_executors = num_executors
self._application_args = application_args
self._env_vars = env_vars
self._verbose = verbose
self._submit_sp = None
self._yarn_application_id = None
self._kubernetes_driver_pod = None
self._connection = self._resolve_connection()
self._is_yarn = 'yarn' in self._connection['master']
self._is_kubernetes = 'k8s' in self._connection['master']
if self._is_kubernetes and kube_client is None:
raise RuntimeError(
"{master} specified by kubernetes dependencies are not installed!".format(
self._connection['master']))
self._should_track_driver_status = self._resolve_should_track_driver_status()
self._driver_id = None
self._driver_status = None
self._spark_exit_code = None
def _resolve_should_track_driver_status(self):
"""
Determines whether or not this hook should poll the spark driver status through
subsequent spark-submit status requests after the initial spark-submit request
:return: if the driver status should be tracked
"""
return ('spark://' in self._connection['master'] and
self._connection['deploy_mode'] == 'cluster')
def _resolve_connection(self):
# Build from connection master or default to yarn if not available
conn_data = {'master': 'yarn',
'queue': None,
'deploy_mode': None,
'spark_home': None,
'spark_binary': 'spark-submit',
'namespace': 'default'}
try:
# Master can be local, yarn, spark://HOST:PORT, mesos://HOST:PORT and
# k8s://https://<HOST>:<PORT>
conn = self.get_connection(self._conn_id)
if conn.port:
conn_data['master'] = "{}:{}".format(conn.host, conn.port)
else:
conn_data['master'] = conn.host
# Determine optional yarn queue from the extra field
extra = conn.extra_dejson
conn_data['queue'] = extra.get('queue', None)
conn_data['deploy_mode'] = extra.get('deploy-mode', None)
conn_data['spark_home'] = extra.get('spark-home', None)
conn_data['spark_binary'] = extra.get('spark-binary', 'spark-submit')
conn_data['namespace'] = extra.get('namespace', 'default')
except AirflowException:
self.log.debug(
"Could not load connection string %s, defaulting to %s",
self._conn_id, conn_data['master']
)
return conn_data
def get_conn(self):
pass
def _get_spark_binary_path(self):
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'], 'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
return connection_cmd
def _build_spark_submit_command(self, application):
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
if self._conf:
for key in self._conf:
connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
if self._env_vars and (self._is_kubernetes or self._is_yarn):
if self._is_yarn:
tmpl = "spark.yarn.appMasterEnv.{}={}"
else:
tmpl = "spark.kubernetes.driverEnv.{}={}"
for key in self._env_vars:
connection_cmd += [
"--conf",
tmpl.format(key, str(self._env_vars[key]))]
elif self._env_vars and self._connection['deploy_mode'] != "cluster":
self._env = self._env_vars # Do it on Popen of the process
elif self._env_vars and self._connection['deploy_mode'] == "cluster":
raise AirflowException(
"SparkSubmitHook env_vars is not supported in standalone-cluster mode.")
if self._is_kubernetes:
connection_cmd += ["--conf", "spark.kubernetes.namespace={}".format(
self._connection['namespace'])]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._driver_classpath:
connection_cmd += ["--driver-classpath", self._driver_classpath]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._packages:
connection_cmd += ["--packages", self._packages]
if self._exclude_packages:
connection_cmd += ["--exclude-packages", self._exclude_packages]
if self._repositories:
connection_cmd += ["--repositories", self._repositories]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._connection['queue']:
connection_cmd += ["--queue", self._connection['queue']]
if self._connection['deploy_mode']:
connection_cmd += ["--deploy-mode", self._connection['deploy_mode']]
# The actual script to execute
connection_cmd += [application]
# Append any application arguments
if self._application_args:
connection_cmd += self._application_args
self.log.info("Spark-Submit cmd: %s", connection_cmd)
return connection_cmd
def _build_track_driver_status_command(self):
"""
Construct the command to poll the driver status.
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The driver id so we can poll for its status
if self._driver_id:
connection_cmd += ["--status", self._driver_id]
else:
raise AirflowException(
"Invalid status: attempted to poll driver " +
"status but no driver id is known. Giving up.")
self.log.debug("Poll driver status cmd: %s", connection_cmd)
return connection_cmd
def submit(self, application="", **kwargs):
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_spark_submit_command(application)
if hasattr(self, '_env'):
env = os.environ.copy()
env.update(self._env)
kwargs["env"] = env
self._submit_sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True,
**kwargs)
self._process_spark_submit_log(iter(self._submit_sp.stdout.readline, ''))
returncode = self._submit_sp.wait()
# Check spark-submit return code. In Kubernetes mode, also check the value
# of exit code in the log, as it may differ.
if returncode or (self._is_kubernetes and self._spark_exit_code != 0):
raise AirflowException(
"Cannot execute: {}. Error code is: {}.".format(
spark_submit_cmd, returncode
)
)
self.log.debug("Should track driver: {}".format(self._should_track_driver_status))
# We want the Airflow job to wait until the Spark driver is finished
if self._should_track_driver_status:
if self._driver_id is None:
raise AirflowException(
"No driver id is known: something went wrong when executing " +
"the spark submit command"
)
# We start with the SUBMITTED status as initial status
self._driver_status = "SUBMITTED"
# Start tracking the driver status (blocking function)
self._start_driver_status_tracking()
if self._driver_status != "FINISHED":
raise AirflowException(
"ERROR : Driver {} badly exited with status {}"
.format(self._driver_id, self._driver_status)
)
def _process_spark_submit_log(self, itr):
"""
Processes the log files and extracts useful information out of it.
If the deploy-mode is 'client', log the output of the submit command as those
are the output logs of the Spark worker directly.
Remark: If the driver needs to be tracked for its status, the log-level of the
spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection['deploy_mode'] == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
self.log.info("Identified spark driver id: %s",
self._yarn_application_id)
# If we run Kubernetes cluster mode, we want to extract the driver pod id
# from the logs so we can kill the application when we stop it unexpectedly
elif self._is_kubernetes:
match = re.search('\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line)
if match:
self._kubernetes_driver_pod = match.groups()[0]
self.log.info("Identified spark driver pod: %s",
self._kubernetes_driver_pod)
# Store the Spark Exit code
match_exit_code = re.search('\s*Exit code: (\d+)', line)
if match_exit_code:
self._spark_exit_code = int(match_exit_code.groups()[0])
# if we run in standalone cluster mode and we want to track the driver status
# we need to extract the driver id from the logs. This allows us to poll for
# the status using the driver id. Also, we can kill the driver when needed.
elif self._should_track_driver_status and not self._driver_id:
match_driver_id = re.search('(driver-[0-9\-]+)', line)
if match_driver_id:
self._driver_id = match_driver_id.groups()[0]
self.log.info("identified spark driver id: {}"
.format(self._driver_id))
else:
self.log.info(line)
self.log.debug("spark submit log: {}".format(line))
def _process_spark_status_log(self, itr):
"""
parses the logs of the spark driver status query process
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# Check if the log line is about the driver status and extract the status.
if "driverState" in line:
self._driver_status = line.split(' : ')[1] \
.replace(',', '').replace('\"', '').strip()
self.log.debug("spark driver status log: {}".format(line))
def _start_driver_status_tracking(self):
"""
Polls the driver based on self._driver_id to get the status.
Finish successfully when the status is FINISHED.
Finish failed when the status is ERROR/UNKNOWN/KILLED/FAILED.
Possible status:
SUBMITTED: Submitted but not yet scheduled on a worker
RUNNING: Has been allocated to a worker to run
FINISHED: Previously ran and exited cleanly
RELAUNCHING: Exited non-zero or due to worker failure, but has not yet
started running again
UNKNOWN: The status of the driver is temporarily not known due to
master failure recovery
KILLED: A user manually killed this driver
FAILED: The driver exited non-zero and was not supervised
ERROR: Unable to run or restart due to an unrecoverable error
(e.g. missing jar file)
"""
# When your Spark Standalone cluster is not performing well
# due to misconfiguration or heavy loads.
# it is possible that the polling request will timeout.
# Therefore we use a simple retry mechanism.
missed_job_status_reports = 0
max_missed_job_status_reports = 10
# Keep polling as long as the driver is processing
while self._driver_status not in ["FINISHED", "UNKNOWN",
"KILLED", "FAILED", "ERROR"]:
# Sleep for 1 second as we do not want to spam the cluster
time.sleep(1)
self.log.debug("polling status of spark driver with id {}"
.format(self._driver_id))
poll_drive_status_cmd = self._build_track_driver_status_command()
status_process = subprocess.Popen(poll_drive_status_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True)
self._process_spark_status_log(iter(status_process.stdout.readline, ''))
returncode = status_process.wait()
if returncode:
if missed_job_status_reports < max_missed_job_status_reports:
missed_job_status_reports = missed_job_status_reports + 1
else:
raise AirflowException(
"Failed to poll for the driver status {} times: returncode = {}"
.format(max_missed_job_status_reports, returncode)
)
def _build_spark_driver_kill_command(self):
"""
Construct the spark-submit command to kill a driver.
:return: full command to kill a driver
"""
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'],
'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The actual kill command
connection_cmd += ["--kill", self._driver_id]
self.log.debug("Spark-Kill cmd: %s", connection_cmd)
return connection_cmd
def on_kill(self):
self.log.debug("Kill Command is being called")
if self._should_track_driver_status:
if self._driver_id:
self.log.info('Killing driver {} on cluster'
.format(self._driver_id))
kill_cmd = self._build_spark_driver_kill_command()
driver_kill = subprocess.Popen(kill_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.log.info("Spark driver {} killed with return code: {}"
.format(self._driver_id, driver_kill.wait()))
if self._submit_sp and self._submit_sp.poll() is None:
self.log.info('Sending kill signal to %s', self._connection['spark_binary'])
self._submit_sp.kill()
if self._yarn_application_id:
self.log.info('Killing application {} on YARN'
.format(self._yarn_application_id))
kill_cmd = "yarn application -kill {}" \
.format(self._yarn_application_id).split()
yarn_kill = subprocess.Popen(kill_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.log.info("YARN killed with return code: %s", yarn_kill.wait())
if self._kubernetes_driver_pod:
self.log.info('Killing pod %s on Kubernetes', self._kubernetes_driver_pod)
# Currently only instantiate Kubernetes client for killing a spark pod.
try:
client = kube_client.get_kube_client()
api_response = client.delete_namespaced_pod(
self._kubernetes_driver_pod,
self._connection['namespace'],
body=client.V1DeleteOptions(),
pretty=True)
self.log.info("Spark on K8s killed with response: %s", api_response)
except kube_client.ApiException as e:
self.log.info("Exception when attempting to kill Spark on K8s:")
self.log.exception(e)
|
|
"""HDXObject abstract class containing helper functions for creating, checking, and updating HDX objects.
New HDX objects should extend this in similar fashion to Resource for example.
"""
import copy
import logging
from abc import ABC, abstractmethod
from collections import UserDict
from typing import Any, Dict, List, Optional, Tuple, Union
from ckanapi.errors import NotFound
from hdx.utilities.dictandlist import merge_two_dictionaries
from hdx.utilities.loader import (
load_json_into_existing_dict,
load_yaml_into_existing_dict,
)
from hdx.api.configuration import Configuration
logger = logging.getLogger(__name__)
class HDXError(Exception):
pass
class HDXObject(UserDict, ABC):
"""HDXObject abstract class containing helper functions for creating, checking, and updating HDX objects.
New HDX objects should extend this in similar fashion to Resource for example.
Args:
initial_data (Dict): Initial metadata dictionary
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
"""
@staticmethod
@abstractmethod
def actions() -> Dict[str, str]:
"""Dictionary of actions that can be performed on object
Returns:
Dict[str, str]: Dictionary of actions that can be performed on object
"""
def __init__(
self, initial_data: Dict, configuration: Optional[Configuration] = None
) -> None:
self.old_data = None
if configuration is None:
self.configuration: Configuration = Configuration.read()
else:
self.configuration: Configuration = configuration
super().__init__(initial_data)
def get_old_data_dict(self) -> None:
"""Get previous internal dictionary
Returns:
dict: Previous internal dictionary
"""
return self.old_data
def update_from_yaml(self, path: str) -> None:
"""Update metadata with static metadata from YAML file
Args:
path (str): Path to YAML dataset metadata
Returns:
None
"""
self.data = load_yaml_into_existing_dict(self.data, path)
def update_from_json(self, path: str) -> None:
"""Update metadata with static metadata from JSON file
Args:
path (str): Path to JSON dataset metadata
Returns:
None
"""
self.data = load_json_into_existing_dict(self.data, path)
def _read_from_hdx(
self,
object_type: str,
value: str,
fieldname: str = "id",
action: Optional[str] = None,
**kwargs: Any,
) -> Tuple[bool, Union[Dict, str]]:
"""Makes a read call to HDX passing in given parameter.
Args:
object_type (str): Description of HDX object type (for messages)
value (str): Value of HDX field
fieldname (str): HDX field name. Defaults to id.
action (Optional[str]): Replacement CKAN action url to use. Defaults to None.
**kwargs: Other fields to pass to CKAN.
Returns:
Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)
"""
if not fieldname:
raise HDXError(f"Empty {object_type} field name!")
if action is None:
action = self.actions()["show"]
data = {fieldname: value}
data.update(kwargs)
try:
result = self.configuration.call_remoteckan(action, data)
return True, result
except NotFound:
return False, f"{fieldname}={value}: not found!"
except Exception as e:
raise HDXError(
f"Failed when trying to read: {fieldname}={value}! (POST)"
) from e
def _load_from_hdx(self, object_type: str, id_field: str) -> bool:
"""Helper method to load the HDX object given by identifier from HDX
Args:
object_type (str): Description of HDX object type (for messages)
id_field (str): HDX object identifier
Returns:
bool: True if loaded, False if not
"""
success, result = self._read_from_hdx(object_type, id_field)
if success:
self.old_data = self.data
self.data = result
return True
logger.debug(result)
return False
@staticmethod
@abstractmethod
def read_from_hdx(
id_field: str, configuration: Optional[Configuration] = None
) -> Optional["HDXObject"]:
"""Abstract method to read the HDX object given by identifier from HDX and return it
Args:
id_field (str): HDX object identifier
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[HDXObject]: HDX object if successful read, None if not
"""
@classmethod
def _read_from_hdx_class(
cls,
object_type: str,
identifier: str,
configuration: Optional[Configuration] = None,
) -> Optional["HDXObject"]:
"""Reads the HDX object given by identifier from HDX and returns it
Args:
object_type (str): Description of HDX object type (for messages)
identifier (str): Identifier
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[HDXObject]: HDX object if successful read, None if not
"""
hdxobject = cls(configuration=configuration)
result = hdxobject._load_from_hdx(object_type, identifier)
if result:
return hdxobject
return None
def _check_existing_object(
self, object_type: str, id_field_name: str
) -> None:
if not self.data:
raise HDXError(f"No data in {object_type}!")
if id_field_name not in self.data:
raise HDXError(
f"No {id_field_name} field (mandatory) in {object_type}!"
)
def _check_load_existing_object(
self, object_type: str, id_field_name: str, operation: str = "update"
) -> None:
"""Check metadata exists and contains HDX object identifier, and if so load HDX object
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
operation (str): Operation to report if error. Defaults to update.
Returns:
None
"""
self._check_existing_object(object_type, id_field_name)
if not self._load_from_hdx(object_type, self.data[id_field_name]):
raise HDXError(f"No existing {object_type} to {operation}!")
@abstractmethod
def check_required_fields(self, ignore_fields: List[str] = list()) -> None:
"""Abstract method to check that metadata for HDX object is complete. The parameter ignore_fields should
be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
Returns:
None
"""
def _check_required_fields(
self, object_type: str, ignore_fields: List[str]
) -> None:
"""Helper method to check that metadata for HDX object is complete
Args:
ignore_fields (List[str]): Any fields to ignore in the check
Returns:
None
"""
for field in self.configuration[object_type]["required_fields"]:
if field not in ignore_fields:
if field not in self.data:
raise HDXError(
f"Field {field} is missing in {object_type}!"
)
if not self.data[field] and not isinstance(
self.data[field], bool
):
raise HDXError(f"Field {field} is empty in {object_type}!")
def _check_kwargs_fields(self, object_type: str, **kwargs: Any) -> None:
"""Helper method to check kwargs and set fields appropriately and to check metadata fields unless it is
specified not to do so.
Args:
object_type (str): Description of HDX object type (for messages)
**kwargs: See below
ignore_field (str): Any field to ignore when checking dataset metadata. Defaults to None.
Returns:
None
"""
if (
"batch_mode" in kwargs
): # Whether or not CKAN should change groupings of datasets on /datasets page
self.data["batch_mode"] = kwargs["batch_mode"]
if (
"skip_validation" in kwargs
): # Whether or not CKAN should perform validation steps (checking fields present)
self.data["skip_validation"] = kwargs["skip_validation"]
if "ignore_check" not in kwargs or not kwargs.get(
"ignore_check"
): # allow ignoring of field checks
ignore_fields = kwargs.get("ignore_fields", list())
ignore_field = self.configuration[object_type].get(
"ignore_on_update"
)
if ignore_field and ignore_field not in ignore_fields:
ignore_fields.append(ignore_field)
ignore_field = kwargs.get("ignore_field")
if ignore_field and ignore_field not in ignore_fields:
ignore_fields.append(ignore_field)
self.check_required_fields(ignore_fields=ignore_fields)
def _hdx_update(
self,
object_type: str,
id_field_name: str,
files_to_upload: Dict = dict(),
force_active: bool = False,
**kwargs: Any,
) -> None:
"""Helper method to update HDX object
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
files_to_upload (Dict): Files to upload to HDX
force_active (bool): Make object state active. Defaults to False.
**kwargs: See below
operation (str): Operation to perform eg. patch. Defaults to update.
ignore_field (str): Any field to ignore when checking dataset metadata. Defaults to None.
Returns:
None
"""
self._check_kwargs_fields(object_type, **kwargs)
operation = kwargs.get("operation", "update")
self._save_to_hdx(
operation, id_field_name, files_to_upload, force_active
)
def _merge_hdx_update(
self,
object_type: str,
id_field_name: str,
files_to_upload: Dict = dict(),
force_active: bool = False,
**kwargs: Any,
) -> None:
"""Helper method to check if HDX object exists and update it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
files_to_upload (Dict): Files to upload to HDX
force_active (bool): Make object state active. Defaults to False.
**kwargs: See below
operation (str): Operation to perform eg. patch. Defaults to update.
ignore_field (str): Any field to ignore when checking dataset metadata. Defaults to None.
Returns:
None
"""
merge_two_dictionaries(self.data, self.old_data)
self._hdx_update(
object_type,
id_field_name,
files_to_upload=files_to_upload,
force_active=force_active,
**kwargs,
)
@abstractmethod
def update_in_hdx(self) -> None:
"""Abstract method to check if HDX object exists in HDX and if so, update it
Returns:
None
"""
def _update_in_hdx(
self,
object_type: str,
id_field_name: str,
files_to_upload: Dict = dict(),
force_active: bool = True,
**kwargs: Any,
) -> None:
"""Helper method to check if HDX object exists in HDX and if so, update it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
files_to_upload (Dict): Files to upload to HDX
force_active (bool): Make object state active. Defaults to True.
**kwargs: See below
operation (str): Operation to perform eg. patch. Defaults to update.
ignore_field (str): Any field to ignore when checking dataset metadata. Defaults to None.
Returns:
None
"""
self._check_load_existing_object(object_type, id_field_name)
# We load an existing object even though it may well have been loaded already
# to prevent an admittedly unlikely race condition where someone has updated
# the object in the intervening time
self._merge_hdx_update(
object_type,
id_field_name,
files_to_upload,
force_active=force_active,
**kwargs,
)
def _write_to_hdx(
self,
action: str,
data: Dict,
id_field_name: str = None,
files_to_upload: Dict = dict(),
) -> Union[Dict, List]:
"""Creates or updates an HDX object in HDX and return HDX object metadata dict
Args:
action (str): Action to perform eg. 'create', 'update'
data (Dict): Data to write to HDX
id_field_name (Optional[str]): Name of field containing HDX object identifier. Defaults to None.
files_to_upload (Dict): Files to upload to HDX
Returns:
Union[Dict,List]: HDX object metadata
"""
try:
for key, value in files_to_upload.items():
files_to_upload[key] = open(value, "rb")
return self.configuration.call_remoteckan(
self.actions()[action], data, files=files_to_upload
)
except Exception as e:
if id_field_name:
idstr = f" {data[id_field_name]}"
else:
idstr = ""
raise HDXError(
f"Failed when trying to {action}{idstr}! (POST)"
) from e
finally:
for file in files_to_upload.values():
if isinstance(file, str):
continue
file.close()
def _save_to_hdx(
self,
action: str,
id_field_name: str,
files_to_upload: Dict = dict(),
force_active: bool = False,
) -> None:
"""Creates or updates an HDX object in HDX, saving current data and replacing with returned HDX object data
from HDX
Args:
action (str): Action to perform: 'create' or 'update'
id_field_name (str): Name of field containing HDX object identifier
files_to_upload (Dict): Files to upload to HDX
force_active (bool): Make object state active. Defaults to False.
Returns:
None
"""
if force_active:
self.data["state"] = "active"
result = self._write_to_hdx(
action, self.data, id_field_name, files_to_upload
)
self.old_data = self.data
self.data = result
@abstractmethod
def create_in_hdx(self) -> None:
"""Abstract method to check if resource exists in HDX and if so, update it, otherwise create it
Returns:
None
"""
def _create_in_hdx(
self,
object_type: str,
id_field_name: str,
name_field_name: str,
files_to_upload: Dict = dict(),
force_active: bool = True,
**kwargs: Any,
) -> None:
"""Helper method to check if resource exists in HDX and if so, update it, otherwise create it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
name_field_name (str): Name of field containing HDX object name
files_to_upload (Dict): Files to upload to HDX
force_active (bool): Make object state active. Defaults to True.
Returns:
None
"""
if "ignore_check" not in kwargs: # allow ignoring of field checks
self.check_required_fields()
if id_field_name in self.data and self._load_from_hdx(
object_type, self.data[id_field_name]
):
logger.warning(
f"{object_type} exists. Updating {self.data[id_field_name]}"
)
self._merge_hdx_update(
object_type,
id_field_name,
files_to_upload,
force_active,
**kwargs,
)
else:
self._save_to_hdx(
"create", name_field_name, files_to_upload, force_active
)
@abstractmethod
def delete_from_hdx(self) -> None:
"""Abstract method to deletes a resource from HDX
Returns:
None
"""
def _delete_from_hdx(self, object_type: str, id_field_name: str) -> None:
"""Helper method to deletes a resource from HDX
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
Returns:
None
"""
if id_field_name not in self.data:
raise HDXError(
f"No {id_field_name} field (mandatory) in {object_type}!"
)
self._save_to_hdx("delete", id_field_name)
@classmethod
def _autocomplete(
cls,
name: str,
limit: int = 20,
configuration: Optional[Configuration] = None,
**kwargs: Any,
) -> List:
"""Helper method to autocomplete a name and return matches
Args:
name (str): Name to autocomplete
limit (int): Maximum number of matches to return
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs:
offset (int): The offset to start returning tags from.
Returns:
List: Autocomplete matches
"""
hdxobject = cls(configuration=configuration)
data = {"q": name, "limit": limit}
data.update(kwargs)
return hdxobject._write_to_hdx("autocomplete", data)
def _addupdate_hdxobject(
self,
hdxobjects: List["HDXObject"],
id_field: str,
new_hdxobject: "HDXObject",
) -> "HDXObject":
"""Helper function to add a new HDX object to a supplied list of HDX objects or update existing metadata if the object
already exists in the list
Args:
hdxobjects (List[HDXObject]): list of HDX objects to which to add new objects or update existing ones
id_field (str): Field on which to match to determine if object already exists in list
new_hdxobject (HDXObject): The HDX object to be added/updated
Returns:
HDXObject: The HDX object which was added or updated
"""
for hdxobject in hdxobjects:
if hdxobject[id_field] == new_hdxobject[id_field]:
merge_two_dictionaries(hdxobject, new_hdxobject)
return hdxobject
hdxobjects.append(new_hdxobject)
return new_hdxobject
def _remove_hdxobject(
self,
objlist: List[Union["HDXObject", Dict]],
obj: Union["HDXObject", Dict, str],
matchon: str = "id",
delete: bool = False,
) -> bool:
"""Remove an HDX object from a list within the parent HDX object
Args:
objlist (List[Union[HDXObject,Dict]]): list of HDX objects
obj (Union[HDXObject,Dict,str]): Either an id or hdx object metadata either from an HDX object or a dictionary
matchon (str): Field to match on. Defaults to id.
delete (bool): Whether to delete HDX object. Defaults to False.
Returns:
bool: True if object removed, False if not
"""
if objlist is None:
return False
if isinstance(obj, str):
obj_id = obj
elif isinstance(obj, dict) or isinstance(obj, HDXObject):
obj_id = obj.get(matchon)
else:
raise HDXError("Type of object not a string, dict or T<=HDXObject")
if not obj_id:
return False
for i, objdata in enumerate(objlist):
objid = objdata.get(matchon)
if objid and objid == obj_id:
if delete:
objlist[i].delete_from_hdx()
del objlist[i]
return True
return False
def _convert_hdxobjects(self, hdxobjects: List["HDXObject"]) -> List[Dict]:
"""Helper function to convert supplied list of HDX objects to a list of dict
Args:
hdxobjects (List[HDXObject]): List of HDX objects to convert
Returns:
List[Dict]: List of HDX objects converted to simple dictionaries
"""
newhdxobjects = list()
for hdxobject in hdxobjects:
newhdxobjects.append(hdxobject.data)
return newhdxobjects
def _copy_hdxobjects(
self,
hdxobjects: List["HDXObject"],
hdxobjectclass: type,
attribute_to_copy: Optional[str] = None,
) -> List["HDXObject"]:
"""Helper function to make a deep copy of a supplied list of HDX objects
Args:
hdxobjects (List[HDXObject]): list of HDX objects to copy
hdxobjectclass (type): Type of the HDX Objects to be copied
attribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None.
Returns:
List[HDXObject]: Deep copy of list of HDX objects
"""
newhdxobjects = list()
for hdxobject in hdxobjects:
newhdxobjectdata = copy.deepcopy(hdxobject.data)
newhdxobject = hdxobjectclass(
newhdxobjectdata, configuration=self.configuration
)
if attribute_to_copy:
value = getattr(hdxobject, attribute_to_copy)
setattr(newhdxobject, attribute_to_copy, value)
newhdxobjects.append(newhdxobject)
return newhdxobjects
def _separate_hdxobjects(
self,
hdxobjects: List["HDXObject"],
hdxobjects_name: str,
id_field: str,
hdxobjectclass: type,
) -> None:
"""Helper function to take a list of HDX objects contained in the internal dictionary and add them to a
supplied list of HDX objects or update existing metadata if any objects already exist in the list. The list in
the internal dictionary is then deleted.
Args:
hdxobjects (List[HDXObject]): list of HDX objects to which to add new objects or update existing ones
hdxobjects_name (str): Name of key in internal dictionary from which to obtain list of HDX objects
id_field (str): Field on which to match to determine if object already exists in list
hdxobjectclass (type): Type of the HDX Object to be added/updated
Returns:
None
"""
new_hdxobjects = self.data.get(hdxobjects_name, list())
""":type : List["HDXObject"]"""
if new_hdxobjects:
hdxobject_names = set()
for hdxobject in hdxobjects:
hdxobject_name = hdxobject[id_field]
hdxobject_names.add(hdxobject_name)
for new_hdxobject in new_hdxobjects:
if hdxobject_name == new_hdxobject[id_field]:
merge_two_dictionaries(hdxobject, new_hdxobject)
break
for new_hdxobject in new_hdxobjects:
if new_hdxobject[id_field] not in hdxobject_names:
hdxobjects.append(
hdxobjectclass(
new_hdxobject, configuration=self.configuration
)
)
del self.data[hdxobjects_name]
def _get_tags(self) -> List[str]:
"""Return the dataset's list of tags
Returns:
List[str]: list of tags or [] if there are none
"""
tags = self.data.get("tags", None)
if not tags:
return list()
return [x["name"] for x in tags]
def _add_tag(self, tag: str, vocabulary_id: Optional[str] = None) -> bool:
"""Add a tag
Args:
tag (str): Tag to add
vocabulary_id (Optional[str]): Vocabulary tag is in. Defaults to None.
Returns:
bool: True if tag added or False if tag already present
"""
tag = tag.lower()
tags = self.data.get("tags", None)
if tags:
if tag in [x["name"] for x in tags]:
return False
else:
tags = list()
tagdict = {"name": tag}
if vocabulary_id is not None:
tagdict["vocabulary_id"] = vocabulary_id
tags.append(tagdict)
self.data["tags"] = tags
return True
def _add_tags(
self, tags: List[str], vocabulary_id: Optional[str] = None
) -> List[str]:
"""Add a list of tag
Args:
tags (List[str]): list of tags to add
vocabulary_id (Optional[str]): Vocabulary tag is in. Defaults to None.
Returns:
List[str]: Tags that were successfully added
"""
added_tags = list()
for tag in tags:
if self._add_tag(tag, vocabulary_id=vocabulary_id):
added_tags.append(tag)
return added_tags
def _get_stringlist_from_commastring(self, field: str) -> List[str]:
"""Return list of strings from comma separated list
Args:
field (str): Field containing comma separated list
Returns:
List[str]: List of strings
"""
strings = self.data.get(field)
if strings:
return strings.split(",")
else:
return list()
def _add_string_to_commastring(self, field: str, string: str) -> bool:
"""Add a string to a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to add
Returns:
bool: True if string added or False if string already present
"""
if string in self._get_stringlist_from_commastring(field):
return False
strings = f"{self.data.get(field, '')},{string}"
if strings[0] == ",":
strings = strings[1:]
self.data[field] = strings
return True
def _add_strings_to_commastring(
self, field: str, strings: List[str]
) -> bool:
"""Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
"""
allstringsadded = True
for string in strings:
if not self._add_string_to_commastring(field, string):
allstringsadded = False
return allstringsadded
def _remove_string_from_commastring(self, field: str, string: str) -> bool:
"""Remove a string from a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to remove
Returns:
bool: True if string removed or False if not
"""
commastring = self.data.get(field, "")
if string in commastring:
self.data[field] = commastring.replace(string, "")
return True
return False
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators corresponding to Python builtin functions.
List of built-in functions: https://docs.python.org/3/library/functions.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.autograph.utils import py_func
from tensorflow.python.autograph.utils import tensors
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
UNDEFINED = object()
def overload_of(f):
if f in SUPPORTED_BUILTINS:
return BUILTIN_FUINCTIONS_MAP[f.__name__]
return f
def abs_(x):
if tensor_util.is_tensor(x):
return _tf_abs(x)
return _py_abs(x)
def _tf_abs(x):
return math_ops.abs(x)
def _py_abs(x):
return abs(x)
def float_(x=0):
if tensor_util.is_tensor(x):
return _tf_float(x)
return _py_float(x)
def _tf_float(x):
# TODO(mdan): We shouldn't assume float32.
if x.dtype == dtypes.string:
return gen_parsing_ops.string_to_number(x, out_type=dtypes.float32)
return math_ops.cast(x, dtype=dtypes.float32)
def _py_float(x):
return float(x)
def int_(x=0, base=UNDEFINED):
if tensor_util.is_tensor(x):
return _tf_int(x, base)
return _py_int(x, base)
def _tf_int(x, base):
if base not in (10, UNDEFINED):
raise NotImplementedError('base {} not supported for int'.format(base))
# TODO(mdan): We shouldn't assume int32.
if x.dtype == dtypes.string:
return gen_parsing_ops.string_to_number(x, out_type=dtypes.int32)
return math_ops.cast(x, dtype=dtypes.int32)
def _py_int(x, base):
if base is UNDEFINED:
return int(x)
return int(x, base)
def len_(s):
if tensors.is_tensor_array(s):
return _tf_tensor_array_len(s)
elif tensors.is_tensor_list(s):
return _tf_tensor_list_len(s)
elif tensor_util.is_tensor(s):
return _tf_tensor_len(s)
return _py_len(s)
def _tf_tensor_array_len(s):
return s.size()
def _tf_tensor_list_len(s):
return list_ops.tensor_list_length(s)
def _tf_tensor_len(s):
"""Overload of len_ for Tensor arguments."""
# Statically shaped tensors: length is known ahead of time.
if s.shape.ndims and s.shape.dims[0].value is not None:
return s.shape.dims[0].value
# Static shape of unknown dimensions: use dynamic shape but statically
# chech that it's a scalar.
shape = array_ops.shape(s)
assert shape.shape, 'shape tensor of zero size? {}'.format(shape)
if shape.shape[0] == 0:
raise ValueError(
'len requires a non-scalar tensor, got one of shape {}'.format(shape))
if shape.shape.dims[0].value is not None:
return array_ops.shape(s)[0]
# Fully dynamic shape: use ops.
rank = array_ops.rank(s)
def raise_zero_rank_error():
msg = gen_string_ops.string_join(
['len requires non-zero rank, got ',
gen_string_ops.as_string(rank)])
with ops.control_dependencies([control_flow_ops.Assert(False, [msg])]):
return constant_op.constant(0, dtype=dtypes.int32)
return control_flow_ops.cond(rank > 0, lambda: array_ops.shape(s)[0],
raise_zero_rank_error)
def _py_len(s):
return len(s)
def print_(*objects, **kwargs):
# Note: Python 2.6 doesn't support explicit keywords after starargs.
unknown_kwargs = tuple(
set(kwargs.keys()) - set(('sep', 'end', 'file', 'flush')))
if unknown_kwargs:
raise ValueError('invalid keyword arguments: {}'.format(unknown_kwargs))
# TODO(mdan): use logging_ops.Print when py_func is not supported.
return _tf_py_func_print(objects, kwargs)
def _tf_py_func_print(objects, kwargs):
"""Overload of print_ as a py_func implementation."""
override_kwargs = {k: v for k, v in kwargs.items() if v is not UNDEFINED}
if 'flush' not in override_kwargs:
# Defaulting to flushing the console in graph mode, which helps reduce
# garbled output in IPython.
override_kwargs['flush'] = True
def print_wrapper(*vals):
if six.PY3:
# TensorFlow doesn't seem to generate Unicode when passing strings to
# py_func. This causes the print to add a "b'" wrapper to the output,
# which is probably never what you want.
vals = tuple(
v.decode('utf-8') if isinstance(v, bytes) else v for v in vals)
six.print_(*vals, **override_kwargs)
return py_func.wrap_py_func(
print_wrapper, None, objects, use_dummy_return=True)
def range_(start_or_stop, stop=UNDEFINED, step=UNDEFINED):
if any(tensor_util.is_tensor(s) for s in (start_or_stop, stop, step)):
return _tf_range(start_or_stop, stop, step)
return _py_range(start_or_stop, stop, step)
def _tf_range(start_or_stop, stop, step):
# Note: for static inputs (e.g. constants), tf.range errors out at graph
# construction time, instead of returning an empty tensor. Preventing the
# graph construction error aligns the semantics with Python.
# TODO(mdan): We should optimize this when a full tensor is not required.
if step is not UNDEFINED:
# TODO(mdan): Add argument coercion similar to other cases.
return math_ops.range(start_or_stop, stop, step)
if stop is not UNDEFINED:
stop = math_ops.maximum(start_or_stop, stop)
return math_ops.range(start_or_stop, stop)
start_or_stop = math_ops.maximum(start_or_stop, 0)
return math_ops.range(start_or_stop)
def _py_range(start_or_stop, stop, step):
if step is not UNDEFINED:
return range(start_or_stop, stop, step)
if stop is not UNDEFINED:
return range(start_or_stop, stop)
return range(start_or_stop)
SUPPORTED_BUILTINS = set((abs, float, int, len, print, range))
if six.PY2:
SUPPORTED_BUILTINS.add(xrange)
BUILTIN_FUINCTIONS_MAP = {
'abs': abs_,
'float': float_,
'int': int_,
'len': len_,
'print': print_,
'range': range_,
# TODO(mdan): This might make more sense as tf.data.range.
'xrange': range_,
}
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 14 08:53:04 2017
@author: e84605
"""
def bar_chart_line(value, maximum, rounding='middle', num_chars=30, value_char='#',
empty_char='_', name=None, name_width=None, name_align='right',
order=['bar', 'proportion', 'absolute'], proportion_decimals=3,
absolute_width=None, absolute_align='left', spaces=1):
"""
Function to return a string simply showing the relationship between a portion and
its whole.
Outputs something like:
- 'under_18 #######.......................'
or - ' under_18 #######.......................'
or - ' under_18 #######....................... .221 15432/69809'
The idea is to determine the maximum widths of the names and, if desired, absolute
values beforehand, and then call this function iterably so everything aligns.
Arguments:
:value: {int|float} The value that is part of a whole
:maximum: {int|float} The whole, which would result in a complete bar
:rounding: {'ceiling|floor|nearest|middle'} middle rounds always towards half the
maximum. This is useful to only ever show a totally blank or totally
complete line if the value == 0 or value == maximum
:num_chars: {int} number of characters in a complete bar
:value_char: {str} single string to use for bar
:empty_char: {str} single string to use to show length of maximum bar value
:name: {str} a name for the bar being shown
:name_width: {int} name will be padded out to this length, in order to
align bars
:name_align: {'right', 'left', 'center'} I like 'right'
:order: {list containing one to three of ['bar', 'proportion', 'absolute']}
The order in which to add items after the name. Any item not listed is
not included in the returned string. These items are:
- bar, e.g. '####......'
- proportion, from 0. to 1.
- absolute, <value>/<maximum> recapitulated arguments
:proportion_decimals: {int}, e.g. 3 gives '.667', '.000' or '1 ' (special case)
:absolute_width: {int}, width to align 'absolute' component
:absolute_align: {str} ['left', 'right', 'center'], self-explanatory
:spaces: {int} number of spaces to put between items
Exception:
AssertionError if unexpected argument
AssertionError if name_width < len(name)
Note: no AssertionError if absolute_width < len(absolute). For this reason,
it might be a good idea to put absolute last in order, if used.
Returns:
{str} without \n, containing concatenated [name|bar|proportion|absolute]
"""
def align_char(alignment):
"""
Returns the pyformat.info character to align text left, right or center.
"""
if alignment == 'left':
return ''
elif alignment == 'right':
return '>'
elif alignment == 'center':
return '^'
else:
raise ValueError("Must be 'left', 'center', or 'right', not '{}'".format(alignment))
from math import floor, ceil
assert value <= maximum
assert not (name_width is not None and name is None), "If name_width is specified, \
name must be specified too."
assert rounding in ['nearest', 'ceiling', 'floor', 'middle']
assert name_align in ['left', 'right', 'center']
assert set(order) in [{'absolute', 'bar', 'proportion'},
{'bar', 'proportion'},
{'absolute', 'bar'},
{'absolute', 'proportion'},
{'bar'},
{'proportion'},
{'absolute'}]
proportion = value / maximum
results = {}
if name is not None:
order = ['name'] + order
if name_width is None:
name_width = len(name)
assert name_width >= len(name), 'name_width must not be smaller than the name length'
align_string = '{{:{}{}}}'.format(align_char(name_align), name_width)
name = align_string.format(name)
results['name'] = name
if 'bar' in order:
num_chars_float = proportion * num_chars
if rounding == 'nearest':
num_chars_int = round(num_chars_float)
elif rounding == 'ceiling':
num_chars_int = ceil(num_chars_float)
elif rounding == 'floor':
num_chars_int = floor(num_chars_float)
else:
if value < (maximum / 2):
num_chars_int = ceil(num_chars_float)
else:
num_chars_int = floor(num_chars_float)
num_on = int(num_chars_int)
num_off = num_chars - num_on
results['bar'] = '{}{}'.format(value_char * num_on, empty_char * num_off)
if 'proportion' in order:
if round(proportion, proportion_decimals) == 1.:
results['proportion'] = '1{}'.format(' '*proportion_decimals)
else:
align_string='{{:.{}f}}'.format(proportion_decimals)
results['proportion'] = align_string.format(proportion)[1:]
if 'absolute' in order:
absolute = '{}/{}'.format(value, maximum)
if absolute_width is None or absolute_width < len(absolute): # will not raise
absolute_width = len(absolute) # an Exception
align_string = '{{:{}{}}}'.format(align_char(absolute_align), absolute_width)
results['absolute'] = align_string.format(absolute)
final = [results[x] for x in order]
spacing = ' '*spaces
return spacing.join(final)
def pdprofiler(df, subset=None, check_nan=True, cols=[], rec_types=True,
nan_count=True, unique_count=True, top_x=0, vc_nonan=False, sorter=False,
file_save=False, hist=0, explanatory_dict=None):
'''
This function profiles each column in a dataframe, and prints formatted results
to the screen.
Returns: nothing. Prints a profile of each column in a pandas df.
Parameters:
- df: pandas df to profile
- subset: random subset that will be used, with replacement
- check_nan: if false, will not look for nans in data type profiling. Setting
false will speed up the program
- cols: empty list = profile all. Feed subset of columns as a list of strings
to limit the function to some columns.
- rec_types : if true, will give the variable types of each record, and print
bar graphs to represent distribution.
- nan_count : if true and nans present in col, returns nan proportion
as a bar graph
- unique_count: if true, returns proportion unique vals as bar graph
- top_x : if >0, prints top x values of a feature.
- vc_nonan : if true, prints the proportion of column (excluding nans)
that have this value next to the bar graph. Has no effect if top_x=0
- sorter: if true and top_x>0, returns not the top x highest values but the
first x alphabetically/numerically sorted values
- file_save: if true, saves "dtype_output.txt" which pipes output
- hist: if >0, gives a histogram with 90th/10th percentile, and x middle categories
'''
#THIS ASSUMES YOU COPY THE PDTOOLS DIRECTORY INTO THE FOLDER WHERE YOU ARE RUNNING THE PROGRAM
import pandas as pd
import numpy as np
import math
if file_save:
import sys
orig_stdout=sys.stdout
f=open("./dtype_output.txt", "w")
sys.stdout=f
def dtype_column_profiler(df, col, subset_c=None, check_nan=True):
"""
This function profiles columns in a pandas dataframe to determine a more specific
datatype than the numpy profiler.
Returns: tuple (pandas datatype, specific datatype, hasnan, unique)
- pandas datatype can be int64, object, float64 or bool
- specific datatype can be 'true int', 'int-like', 'true float', 'float-like',
'true bool', 'bool-like', 'possibly bool-like', 'true string'
- hasnan is 0 or 1
- unique is an int = to number of unique values
Parameters:
- subset: random subset of data to use. Subsets with replacement. Subset
recommended since function has to loop through data to find nans
- df: pandas dataframe
- check_nan : False to cancel looping through to find nans hasnan will be 0
Datatype keys:
true-int -> pandas detects an int, has more than 2 unique values
int-like -> factor or float, but all values are integers
true bool -> all values are bool
bool-like -> factor, float or int, values are 1 and 0 or 1.0 and 0.0
possibly bool-like -> factor, float or int. 2 unique values
true string -> factor, strings. Catches mixed types as well
true float
float-like -> factor with float values
All of these are evaluated after NaNs are stripped
"""
pd_dt=str(df[col].dtype)
#subset=max(subset_c, len(df))
if(subset_c is None):
subset=len(df)
else:
subset=subset_c
#sample with replacement
try:
rand_smp = df[[col]].sample(subset, replace=False).iloc[:,0].tolist()
except ValueError:
rand_smp = df[[col]].sample(subset, replace=True).iloc[:,0].tolist()
#check for nans
nan_check=0
if check_nan:
for e in rand_smp:
import math
try:
if math.isnan(float(e)):
nan_check=1
break
except ValueError:
pass
#int profiler - can be bool, possible bool (2 unique values) or int. No nans
if(pd_dt=='int64'):
#bool
myset=set(rand_smp)
if((myset == set([1, 0]))):
return (pd_dt, 'bool-like', nan_check, len(myset), 0)
#possible bool
elif((len(myset)==2)):
return (pd_dt, 'possibly bool-like', nan_check, len(myset), 0)
return (pd_dt, 'true int', nan_check, len(myset), 0)
elif(pd_dt=='bool'):
return (pd_dt, 'true bool', nan_check, 2, 0)
#String profiler - can be int, float, bool or string, all with/without nan
elif(pd_dt == 'object'):
#int, float and bool with nan
if nan_check==1 :
import math
rand_smp_noNaN=[]
#build list of non nan elements
for e in rand_smp:
try:
if(not math.isnan(e)):
rand_smp_noNaN+=[e]
except TypeError:
rand_smp_noNaN+=[e]
num_nan=len(rand_smp)-len(rand_smp_noNaN)
#bool with nan check
myset=set(rand_smp_noNaN)
if((myset == set(['1', '0'])) | (myset == set(['1.0', '0.0']))):
return (pd_dt, 'bool-like', nan_check, len(myset), num_nan)
#int with nan check
try:
rand_smp_int_noNaN=[int(e) for e in rand_smp_noNaN]
if(rand_smp_int_noNaN == rand_smp_noNaN):
return (pd_dt, 'int-like', nan_check, len(myset), num_nan)
except ValueError:
pass
#float with nan
try:
rand_smp_float_noNaN=[float(e) for e in rand_smp_noNaN]
if(rand_smp_float_noNaN == rand_smp_noNaN):
return (pd_dt, 'float-like', nan_check, len(myset), num_nan)
except ValueError:
pass
#possible bool with nan check. If 2 or 1 elements
if(len(set(rand_smp_noNaN)) in [1,2] ):
return (pd_dt, 'possibly bool-like', nan_check, len(myset), num_nan)
#true string with nan
return (pd_dt, 'true string', nan_check, len(myset), num_nan)
#int, float, bool nonan
#bool detection
myset=set(rand_smp)
if((myset == set(['1', '0'])) | (myset == set(['1.0', '0.0']))):
return (pd_dt, 'bool-like', nan_check, len(myset))
elif (len(myset)==2):
return (pd_dt, 'possibly bool-like',nan_check, len(myset), 0)
#int detection
try:
rand_smp_int = [str(int(e)) for e in rand_smp]
if(rand_smp_int == rand_smp):
return (pd_dt,'int-like',nan_check, len(myset), 0)
except ValueError:
pass
#float detection
try:
rand_smp_float = [str(float(e)) for e in rand_smp]
if(rand_smp_float == rand_smp):
return (pd_dt,'float-like',nan_check, len(myset), 0)
except ValueError as e:
pass
#true string nonan
return (pd_dt, 'true string', nan_check, len(myset), 0)
#float profiler - can be int, bool, bool-like or float, all with/without nan
else:
#int and bool with nan
if nan_check==1 :
import math
rand_smp_noNaN=[e for e in rand_smp if not math.isnan(e)]
num_nan=len(rand_smp)-len(rand_smp_noNaN)
#bool with nan check
myset=set(rand_smp_noNaN)
if(myset == set([1.0, 0.0])):
return (pd_dt, 'bool-like', nan_check, len(myset), num_nan)
#possible bool with nan check. If 2 or 1 elements
if(len(set(rand_smp_noNaN)) in [1,2]):
return (pd_dt, 'possibly bool-like', nan_check, len(myset), num_nan)
#int with nan check
rand_smp_int_noNaN=[float(int(e)) for e in rand_smp_noNaN]
if(rand_smp_int_noNaN == rand_smp_noNaN):
return (pd_dt, 'int-like', nan_check, len(myset), num_nan)
#float with nan
return (pd_dt, 'true float', nan_check, len(myset), num_nan)
#bool no nan
myset=set(rand_smp)
if((myset == set([1.0, 0.0]))):
return (pd_dt, 'bool-like', nan_check, len(myset), 0)
#possible bool no nan
elif((len(myset)==2)):
return (pd_dt, 'possibly bool-like', nan_check, len(myset), 0)
#int no nan
try:
rand_smp_int = [float(int(e)) for e in rand_smp]
if(rand_smp_int == rand_smp):
return (pd_dt, 'int-like', nan_check, len(myset), 0)
except ValueError:
pass
#float without nan
return (pd_dt, 'true float', nan_check, len(myset), 0)
def record_profiler(col):
"""
Takes as input a pandas series, returns a dict of dtype:value.
Possible dict keys are:
- bool-like string (T, True, F, False etc...)
- bool
- int
- int-like string
- int-like float
- float
- float-like string
- nan
- string
"""
types={}
for t in ['bool-like string', 'bool', 'int', 'int-like float', 'int-like string',
'float', 'float-like string', 'string', 'nan']:
types[t]=0
for e in col:
#bool. Note numbers are not counted as booleans
if (isinstance(e, bool)):
types['bool']+=1
#int
elif (isinstance(e, int)):
types['int']+=1
#float and float-like int
elif(isinstance(e, float)):
if(math.isnan(e)):
types['nan']+=1
elif(float(int(e))==e):
types['int-like float']+=1
else:
types['float']+=1
#string, float-like string, int-like string, bool-like string
elif(isinstance(e, str)):
next=0 #next variable to avoid double assigning as int-like and string
try:
if(e.lower in ['t', 'true', 'f', 'false', 'yes', 'y', 'no', 'n']):
types['bool-like string']+=1
next=1
if(str(int(e))==e):
types['int-like string']+=1
next=1
elif(str(float(e))==e):
types['float like string']+=1
next=1
except ValueError:
types['string']+=1
next=1
if (next==0):
types['string']+=1
else:
try:
if(math.isnan(e)):
types['nan']+=1
except:
pass
return types
def distribute_weights(inputs, total=30):
inp_sum=sum(inputs)
weight_remaining=total
normal_w_sum=0
sol={}
for e in inputs:
#if under th, set weight of 1
if ((e < (inp_sum/total)) & (e>0)):
sol[e]=1
weight_remaining-=1
#if over th, set weight of 0 for now, increment remainder sum
else:
sol[e]=0
normal_w_sum+=e
#remainder sums
for e in inputs:
if sol[e]==0:
sol[e]=round(e*(weight_remaining/normal_w_sum))
return sol
def value_counts_hist(df, col, n_bins, pareto=True):
"""Prints quick and dirty horizontal histograms of
value counts. If Pareto == True, returns in descending
numerical order, otherwise alphabetical. """
#get bins
bins=[df[col].min()-0.00001, df[col].quantile(0.1)]
for i in range(1,n_bins+1):
bins+=[df[col].quantile(0.1)+i*(df[col].quantile(0.9)-df[col].quantile(0.1))/(n_bins+1)]
bins+=[df[col].quantile(0.9), df[col].max()+0.00001]
#histogram index
try:
idx=["< "+str(round(bins[1],2))]
idx+=[str(round(bins[i],3))+" - "+str(round(bins[i+1],3)) for i in range(1, len(bins)-2)]
idx+=["> "+str(round(bins[len(bins)-2],3))]
x = pd.DataFrame(df[col].value_counts(bins=bins, sort=False))
x.index=idx
except ValueError:
print("histogram unnecessary - see value counts")
return
#pareto
if not pareto:
x.sort_index(inplace=True)
#rendering
x.columns = ['cnt']
x['pct'] = 0.0
x['bar'] = ''
for idx, row in x.iterrows():
pct = row.cnt*100.0/x.cnt.sum()
reps = 30*row.cnt//x.cnt.sum()
x.loc[idx, 'bar'] = '#' * reps + '_' * (30-reps)
x.loc[idx, 'pct'] = round(pct,1)
print(x)
print('')
return df
'''
Main function call. For each column, profiles the default datatype. Performs
additional operations depending on options, as specified in description above.
'''
i=1
if cols:
columns=cols
else:
columns=df.columns
#table print
for col in columns:
profile=dtype_column_profiler(df, col, subset, check_nan)
nanflag='none'
if(profile[2]==1):
nanflag="HAS NAN"
#header
print("COL ", i, "OF ", len(df.columns), ": ",col)
#nested data dict
if (explanatory_dict != None):
if(col in explanatory_dict):
for it in explanatory_dict[col].items():
print("{}:{}".format(k,v))
#basic types
print("BASICS: numpy dtype: ", profile[0], (8-len(profile[0]))*" ",
"| dtype: ", profile[1], (12-len(profile[1]))*' ',
"| flag: ", nanflag, ' '*(8-len(nanflag)),
"| non null: ", len(df)-sum(df[col].isnull()) ,
"\n")
#unique proportion
if(unique_count):
print("UNIQUE:"," "*10, barchartline.bar_chart_line(profile[3], len(df)))
#nan proportion
if((nan_count) & (nanflag=='HAS NAN')):
print("NAN COUNT: "," "*6, barchartline.bar_chart_line(sum(df[col].isnull()), len(df)))
#variable types breakdown
if((profile[0] in ['object', 'float64']) and (rec_types)):
print('DTYPES:')
dic=record_profiler(df[col])
weights=distribute_weights(dic.values())
spacer=0
#dict for dtypes
char_dt={'bool-like string':'b', 'bool':'B',
'int':'I','int-like float':'i', 'int-like string':'i',
'float':'F', 'float-like string':'f',
'string':'S',
'nan':'n'}
for key, value in dic.items():
if value > 0:
print(" ", key,(16-len(key)+spacer)*" " +
char_dt[key]*weights[value] +
"."*(30-weights[value]-spacer) +
" "*2)
spacer+=weights[value]
print()
#print top x values
if(top_x>0):
print('VALUE COUNTS :')
if sorter==False:
v_c=df[col].value_counts()
else:
v_c=df[col].values_counts(ascending=True)
if(vc_nonan):
nonan_count=len(df)-sum(df[col].isnull())
for k in range(0,top_x):
try:
if(vc_nonan):
propor_nonan=round(v_c[str(v_c.index[k])]/nonan_count,4)
else:
propor_nonan=''
print(v_c.index[k],":", (30-len(str(v_c.index[k])))*" ",
barchartline.bar_chart_line(value=v_c.loc[v_c.index[k]],
maximum=len(df))
, " |", propor_nonan)
#if top_x is overshooting - top_x=3 but 2 unique values
except IndexError:
break
print()
#histogram
if(hist>0 and profile[0] in ['float64', 'int64']):
print("DESCRIPTION:")
print(df[col].describe())
print()
print("HIST:")
value_counts_hist(df, col, hist)
#footer
print("-"*120)
i+=1
if file_save:
sys.stdout=orig_stdout
f.close()
if __name__=='__main__':
#code testing
import warnings
warnings.filterwarnings('ignore')
import barchartline
import pandas as pd
import numpy as np
import math
data=np.array([np.arange(10)]*3).T
index=range(10)
column=['a', 'b', 'c']
df=pd.DataFrame(data, index=index, columns=column)
df['a']=range(10)
df['a'].iloc[4]=6
df['b']='0'
df['b'].iloc[6:8]='1'
df['b'].iloc[1]=np.nan
df['c']=range(10)
df['c'].iloc[1]=1.6
df['c'].iloc[5]=np.nan
print(df)
dtype_profiler(df, hist=2, top_x=3, vc_nonan=True)
|
|
from ..Qt import QtGui, QtCore
from ..Point import Point
from .GraphicsObject import GraphicsObject
from .. import functions as fn
import numpy as np
import weakref
__all__ = ['InfiniteLine']
class InfiniteLine(GraphicsObject):
"""
**Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`
Displays a line of infinite length.
This line may be dragged to indicate a position in data coordinates.
=============================== ===================================================
**Signals:**
sigDragged(self)
sigPositionChangeFinished(self)
sigPositionChanged(self)
=============================== ===================================================
"""
sigDragged = QtCore.Signal(object)
sigPositionChangeFinished = QtCore.Signal(object)
sigPositionChanged = QtCore.Signal(object)
def __init__(self, pos=None, angle=90, pen=None, movable=False, bounds=None):
"""
=============== ==================================================================
**Arguments:**
pos Position of the line. This can be a QPointF or a single value for
vertical/horizontal lines.
angle Angle of line in degrees. 0 is horizontal, 90 is vertical.
pen Pen to use when drawing line. Can be any arguments that are valid
for :func:`mkPen <pyqtgraph.mkPen>`. Default pen is transparent
yellow.
movable If True, the line can be dragged to a new position by the user.
bounds Optional [min, max] bounding values. Bounds are only valid if the
line is vertical or horizontal.
=============== ==================================================================
"""
GraphicsObject.__init__(self)
if bounds is None: ## allowed value boundaries for orthogonal lines
self.maxRange = [None, None]
else:
self.maxRange = bounds
self.moving = False
self.setMovable(movable)
self.mouseHovering = False
self.p = [0, 0]
self.setAngle(angle)
if pos is None:
pos = Point(0,0)
self.setPos(pos)
if pen is None:
pen = (200, 200, 100)
self.setPen(pen)
self.setHoverPen(color=(255,0,0), width=self.pen.width())
self.currentPen = self.pen
#self.setFlag(self.ItemSendsScenePositionChanges)
def setMovable(self, m):
"""Set whether the line is movable by the user."""
self.movable = m
self.setAcceptHoverEvents(m)
def setBounds(self, bounds):
"""Set the (minimum, maximum) allowable values when dragging."""
self.maxRange = bounds
self.setValue(self.value())
def setPen(self, *args, **kwargs):
"""Set the pen for drawing the line. Allowable arguments are any that are valid
for :func:`mkPen <pyqtgraph.mkPen>`."""
self.pen = fn.mkPen(*args, **kwargs)
if not self.mouseHovering:
self.currentPen = self.pen
self.update()
def setHoverPen(self, *args, **kwargs):
"""Set the pen for drawing the line while the mouse hovers over it.
Allowable arguments are any that are valid
for :func:`mkPen <pyqtgraph.mkPen>`.
If the line is not movable, then hovering is also disabled.
Added in version 0.9.9."""
self.hoverPen = fn.mkPen(*args, **kwargs)
if self.mouseHovering:
self.currentPen = self.hoverPen
self.update()
def setAngle(self, angle):
"""
Takes angle argument in degrees.
0 is horizontal; 90 is vertical.
Note that the use of value() and setValue() changes if the line is
not vertical or horizontal.
"""
self.angle = ((angle+45) % 180) - 45 ## -45 <= angle < 135
self.resetTransform()
self.setRotation(self.angle)
self.update()
def setPos(self, pos):
if type(pos) in [list, tuple]:
newPos = pos
elif isinstance(pos, QtCore.QPointF):
newPos = [pos.x(), pos.y()]
else:
if self.angle == 90:
newPos = [pos, 0]
elif self.angle == 0:
newPos = [0, pos]
else:
raise Exception("Must specify 2D coordinate for non-orthogonal lines.")
## check bounds (only works for orthogonal lines)
if self.angle == 90:
if self.maxRange[0] is not None:
newPos[0] = max(newPos[0], self.maxRange[0])
if self.maxRange[1] is not None:
newPos[0] = min(newPos[0], self.maxRange[1])
elif self.angle == 0:
if self.maxRange[0] is not None:
newPos[1] = max(newPos[1], self.maxRange[0])
if self.maxRange[1] is not None:
newPos[1] = min(newPos[1], self.maxRange[1])
if self.p != newPos:
self.p = newPos
GraphicsObject.setPos(self, Point(self.p))
self.update()
self.sigPositionChanged.emit(self)
def getXPos(self):
return self.p[0]
def getYPos(self):
return self.p[1]
def getPos(self):
return self.p
def value(self):
"""Return the value of the line. Will be a single number for horizontal and
vertical lines, and a list of [x,y] values for diagonal lines."""
if self.angle%180 == 0:
return self.getYPos()
elif self.angle%180 == 90:
return self.getXPos()
else:
return self.getPos()
def setValue(self, v):
"""Set the position of the line. If line is horizontal or vertical, v can be
a single value. Otherwise, a 2D coordinate must be specified (list, tuple and
QPointF are all acceptable)."""
self.setPos(v)
## broken in 4.7
#def itemChange(self, change, val):
#if change in [self.ItemScenePositionHasChanged, self.ItemSceneHasChanged]:
#self.updateLine()
#print "update", change
#print self.getBoundingParents()
#else:
#print "ignore", change
#return GraphicsObject.itemChange(self, change, val)
def boundingRect(self):
#br = UIGraphicsItem.boundingRect(self)
br = self.viewRect()
## add a 4-pixel radius around the line for mouse interaction.
px = self.pixelLength(direction=Point(1,0), ortho=True) ## get pixel length orthogonal to the line
if px is None:
px = 0
w = (max(4, self.pen.width()/2, self.hoverPen.width()/2)+1) * px
br.setBottom(-w)
br.setTop(w)
return br.normalized()
def paint(self, p, *args):
br = self.boundingRect()
p.setPen(self.currentPen)
p.drawLine(Point(br.right(), 0), Point(br.left(), 0))
#p.drawRect(self.boundingRect())
def dataBounds(self, axis, frac=1.0, orthoRange=None):
if axis == 0:
return None ## x axis should never be auto-scaled
else:
return (0,0)
def mouseDragEvent(self, ev):
if self.movable and ev.button() == QtCore.Qt.LeftButton:
if ev.isStart():
self.moving = True
self.cursorOffset = self.pos() - self.mapToParent(ev.buttonDownPos())
self.startPosition = self.pos()
ev.accept()
if not self.moving:
return
#pressDelta = self.mapToParent(ev.buttonDownPos()) - Point(self.p)
self.setPos(self.cursorOffset + self.mapToParent(ev.pos()))
self.sigDragged.emit(self)
if ev.isFinish():
self.moving = False
self.sigPositionChangeFinished.emit(self)
#else:
#print ev
def mouseClickEvent(self, ev):
if self.moving and ev.button() == QtCore.Qt.RightButton:
ev.accept()
self.setPos(self.startPosition)
self.moving = False
self.sigDragged.emit(self)
self.sigPositionChangeFinished.emit(self)
def hoverEvent(self, ev):
if (not ev.isExit()) and self.movable and ev.acceptDrags(QtCore.Qt.LeftButton):
self.setMouseHover(True)
else:
self.setMouseHover(False)
def setMouseHover(self, hover):
## Inform the item that the mouse is (not) hovering over it
if self.mouseHovering == hover:
return
self.mouseHovering = hover
if hover:
self.currentPen = self.hoverPen
else:
self.currentPen = self.pen
self.update()
#def hoverEnterEvent(self, ev):
#print "line hover enter"
#ev.ignore()
#self.updateHoverPen()
#def hoverMoveEvent(self, ev):
#print "line hover move"
#ev.ignore()
#self.updateHoverPen()
#def hoverLeaveEvent(self, ev):
#print "line hover leave"
#ev.ignore()
#self.updateHoverPen(False)
#def updateHoverPen(self, hover=None):
#if hover is None:
#scene = self.scene()
#hover = scene.claimEvent(self, QtCore.Qt.LeftButton, scene.Drag)
#if hover:
#self.currentPen = fn.mkPen(255, 0,0)
#else:
#self.currentPen = self.pen
#self.update()
|
|
"""
A simple VTK widget for PyQt v5, the Qt v5 bindings for Python.
See http://www.trolltech.com for Qt documentation, and
http://www.riverbankcomputing.co.uk for PyQt.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
Changes by Phil Thompson, Nov. 2006
Ported to PyQt v4.
Added support for wheel events.
Changes by Phil Thompson, Oct. 2007
Bug fixes.
Changes by Phil Thompson, Mar. 2008
Added cursor support.
Changes by Rodrigo Mologni, Sep. 2013 (Credit to Daniele Esposti)
Bug fix to PySide: Converts PyCObject to void pointer.
Changes by Alex Tsui, Apr. 2015
Port from PyQt4 widget.
"""
try:
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QSize
from PyQt5.QtCore import QEvent
except ImportError:
raise ImportError("Cannot load PyQt5")
import vtk
class QVTKRenderWindowInteractor(QWidget):
""" A QVTKRenderWindowInteractor for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.# self.frame.setLayout(self._widget.verticalLayout)
# self.setCentralWidget(self.frame)
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
# Map between VTK and Qt cursors.
_CURSOR_MAP = {
0: Qt.ArrowCursor, # VTK_CURSOR_DEFAULT
1: Qt.ArrowCursor, # VTK_CURSOR_ARROW
2: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZENE
3: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZENWSE
4: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZESW
5: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZESE
6: Qt.SizeVerCursor, # VTK_CURSOR_SIZENS
7: Qt.SizeHorCursor, # VTK_CURSOR_SIZEWE
8: Qt.SizeAllCursor, # VTK_CURSOR_SIZEALL
9: Qt.PointingHandCursor, # VTK_CURSOR_HAND
10: Qt.CrossCursor, # VTK_CURSOR_CROSSHAIR
}
def __init__(self, parent=None, wflags=Qt.WindowFlags(), **kw):
# the current button
self._ActiveButton = Qt.NoButton
# private attributes
self.__saveX = 0
self.__saveY = 0
self.__saveModifiers = Qt.NoModifier
self.__saveButtons = Qt.NoButton
# do special handling of some keywords:
# stereo, rw
stereo = 0
if 'stereo' in kw:
if kw['stereo']:
stereo = 1
rw = None
if 'rw' in kw:
rw = kw['rw']
# create qt-level widget
QWidget.__init__(self, parent, wflags|Qt.MSWindowsOwnDC)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
WId = self.winId()
if type(WId).__name__ == 'PyCObject':
from ctypes import pythonapi, c_void_p, py_object
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
WId = pythonapi.PyCObject_AsVoidPtr(WId)
self._RenderWindow.SetWindowInfo(str(int(WId)))
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
if 'iren' in kw:
self._Iren = kw['iren']
else:
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
# do all the necessary qt setup
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.setAttribute(Qt.WA_PaintOnScreen)
self.setMouseTracking(True) # get all mouse events
self.setFocusPolicy(Qt.WheelFocus)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self._Timer = QTimer(self)
self._Timer.timeout.connect(self.TimerEvent)
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
#Create a hidden child widget and connect its destroyed signal to its
#parent ``Finalize`` slot. The hidden children will be destroyed before
#its parent thus allowing cleanup of VTK elements.
#self._hidden = QtGui.QWidget(self)
self._hidden = QWidget(self)
self._hidden.hide()
self._hidden.destroyed.connect(self.Finalize)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(self.__class__.__name__ + \
" has no attribute named " + attr)
def Finalize(self):
'''
Call internal cleanup method on VTK objects
'''
self._RenderWindow.Finalize()
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the current
# cursor is not yet set so we defer this by which time the current
# cursor should have been set.
QTimer.singleShot(0, self.ShowCursor)
def HideCursor(self):
"""Hides the cursor."""
self.setCursor(Qt.BlankCursor)
def ShowCursor(self):
"""Shows the cursor."""
vtk_cursor = self._Iren.GetRenderWindow().GetCurrentCursor()
qt_cursor = self._CURSOR_MAP.get(vtk_cursor, Qt.ArrowCursor)
self.setCursor(qt_cursor)
def closeEvent(self, evt):
self.Finalize()
def sizeHint(self):
return QSize(400, 400)
def paintEngine(self):
return None
def paintEvent(self, ev):
self._Iren.Render()
def resizeEvent(self, ev):
w = self.width()
h = self.height()
vtk.vtkRenderWindow.SetSize(self._RenderWindow, w, h)
self._Iren.SetSize(w, h)
self._Iren.ConfigureEvent()
self.update()
def _GetCtrlShift(self, ev):
ctrl = shift = False
if hasattr(ev, 'modifiers'):
if ev.modifiers() & Qt.ShiftModifier:
shift = True
if ev.modifiers() & Qt.ControlModifier:
ctrl = True
else:
if self.__saveModifiers & Qt.ShiftModifier:
shift = True
if self.__saveModifiers & Qt.ControlModifier:
ctrl = True
return ctrl, shift
def enterEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == QEvent.MouseButtonDblClick:
repeat = 1
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = ev.button()
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonPressEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonPressEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonPressEvent()
def mouseReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonReleaseEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self, ev):
self.__saveModifiers = ev.modifiers()
self.__saveButtons = ev.buttons()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = str(ev.text())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyReleaseEvent()
def wheelEvent(self, ev):
if ev.angleDelta().y() >= 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self.update()
def QVTKRenderWidgetConeExample():
"""A simple example that uses the QVTKRenderWindowInteractor class."""
# every QT app needs an app
app = QApplication(['QVTKRenderWindowInteractor'])
# create the widget
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
# if you dont want the 'q' key to exit comment this.
widget.AddObserver("ExitEvent", lambda o, e, a=app: a.quit())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# start event processing
app.exec_()
if __name__ == "__main__":
QVTKRenderWidgetConeExample()
|
|
import datetime
from django.db import models
from kanboard import signals
class Card(models.Model):
"""
A card is a specific piece of work which must be done on a project, which
can be hung on a "board" (under a specific "phase").
"""
title = models.CharField(max_length=80)
board = models.ForeignKey("Board", related_name="cards")
phase = models.ForeignKey("Phase", related_name="cards")
# Order is within a phase.
order = models.SmallIntegerField()
created_by = models.ForeignKey('auth.User')
backlogged_at = models.DateTimeField(default=datetime.datetime.now)
#Optional fields
started_at = models.DateTimeField(blank=True, null=True)
done_at = models.DateTimeField(blank=True, null=True)
description = models.TextField(blank=True)
size = models.CharField(max_length=80, blank=True)
# Color represents a "#003399" style css color.
color = models.CharField(max_length=7, blank=True)
ready = models.BooleanField()
blocked = models.BooleanField()
blocked_because = models.TextField(blank=True)
class Meta:
ordering = ['order', ]
def __unicode__(self):
return "%s - %s (%s) -- %s" % (self.id, self.title, self.order, self.phase.title)
def change_phase(self, new_phase, change_at=None):
"""
Changes a cards phase to the one passed in.
If the card changes from backlogged to started
or started to done it updates the appropriate
timestamps.
"""
if not change_at:
change_at = datetime.datetime.now()
if self.phase.status == Phase.UPCOMING:
if new_phase.status in (Phase.PROGRESS, Phase.FINISHED):
self.started_at = change_at
elif new_phase.status == Phase.UPCOMING and self.started_at:
self.started_at == None
elif new_phase.status == Phase.FINISHED:
if not self.done_at:
self.done_at = change_at
elif new_phase.status == Phase.PROGRESS:
if self.done_at:
self.done_at == None
from_phase = self.phase
self.phase = new_phase
self.save()
signals.phase_change.send(sender=self, from_phase=from_phase,
to_phase=new_phase, changed_at=change_at)
signals.phase_change.connect(signals.update_phase_log)
models.signals.pre_save.connect(signals.card_order, sender=Card)
class Board(models.Model):
title = models.CharField(max_length=80)
slug = models.SlugField()
#Optional fields
description = models.TextField(blank=True)
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return 'kanboard', [self.slug]
models.signals.post_save.connect(signals.create_default_phases, sender=Board)
class Phase(models.Model):
UPCOMING = 'upcoming'
PROGRESS = 'progress'
FINISHED = 'finished'
STATUSES = (
(UPCOMING, 'Upcoming'),
(PROGRESS, 'In progress'),
(FINISHED, 'Finished'),
)
title = models.CharField(max_length=80)
board = models.ForeignKey("Board", related_name="phases")
# Order of the phase within the board:
order = models.SmallIntegerField()
# The status is used to determine whether the phase is WIP or not (for
# stats calculation):
status = models.CharField(max_length=25, choices=STATUSES,
default=PROGRESS)
#Optional fields
description = models.TextField(blank=True)
limit = models.SmallIntegerField(blank=True, null=True)
class Meta:
ordering = ['order']
def __unicode__(self):
return u"%s - %s (%s)" % (self.board.title, self.title, self.order)
def update_log(self, count, changed_at):
log, created = PhaseLog.objects.get_or_create(phase=self,
date=changed_at)
log.count = count
log.save()
models.signals.post_save.connect(signals.update_phase_order, sender=Phase)
models.signals.post_save.connect(signals.create_phase_log, sender=Phase)
class PhaseLog(models.Model):
"""
Tracks the count for a phase for the period of
one day.
"""
phase = models.ForeignKey(Phase, related_name='logs')
count = models.SmallIntegerField(default=0)
date = models.DateField()
class Meta:
unique_together = ('phase', 'date')
def __unicode__(self):
return u"%s log on %s - %s" % (self.phase.title, self.date, self.count)
#TODO: Implement goal object
class KanboardStats(object):
"""
Queries a board and other related models
to calculate various performance stats.
"""
def __init__(self, board):
self.board = board
def delta_from_done(self, attr_name, start=None, finish=None):
now = datetime.datetime.now()
if not finish: finish = now
cards = Card.objects.filter(board=self.board, done_at__lte=finish)
if start:
cards = cards.filter(done_at__gte=start)
if not cards:
return datetime.timedelta()
deltas = [ card.done_at - getattr(card, attr_name) for card in cards ]
the_sum = sum(deltas, datetime.timedelta())
return the_sum / cards.count()
def cycle_time(self, start=None, finish=None):
"""
cycle_time returns a timedelta representing the
average cycle time of all completed objects on a board.
Note: Cycle time clock starts when work begins on the request and ends when the item is ready for delivery.
"""
return self.delta_from_done('started_at', start, finish)
def lead_time(self, start=None, finish=None):
"""
lead_time returns a timedelta object representing the
average lead time of all completed objects on a board.
It optionally accepts a start and end datetime object,
which will limit the average to cards completed during that
time phase.
Note: Lead time clock starts when the request is made and ends at delivery.
"""
return self.delta_from_done('backlogged_at', start, finish)
def cumulative_flow(self, date=None):
"""
cumulative_flow returns a dictionary-like object,
each key is a Phase name and the value is the number of
objects that were in that phase on that day.
Note: The done count equals Done + Archive
"""
if date is None: date = datetime.date.today()
result = {}
for phase in self.board.phases.all():
try:
log = PhaseLog.objects.filter(phase=phase, date__lte=date).order_by('-date')[0]
result[phase.title] = log.count
except IndexError:
#We assume the count is 0 to start because
#the phase may not have existed on or before the date requested
result[phase.title] = 0
backlog, archive = self.board.get_backlog(), self.board.get_archive()
archive_count = result[archive.title]
result[backlog.title] += archive_count
del result[archive.title]
return result
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_search_engine
short_description: Configure web filter search engines in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify webfilter feature and search_engine category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
webfilter_search_engine:
description:
- Configure web filter search engines.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
charset:
description:
- Search engine charset.
type: str
choices:
- utf-8
- gb2312
hostname:
description:
- Hostname (regular expression).
type: str
name:
description:
- Search engine name.
required: true
type: str
query:
description:
- Code used to prefix a query (must end with an equals character).
type: str
safesearch:
description:
- Safe search method. You can disable safe search, add the safe search string to URLs, or insert a safe search header.
type: str
choices:
- disable
- url
- header
safesearch_str:
description:
- Safe search parameter used in the URL.
type: str
url:
description:
- URL (regular expression).
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure web filter search engines.
fortios_webfilter_search_engine:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
webfilter_search_engine:
charset: "utf-8"
hostname: "myhostname"
name: "default_name_5"
query: "<your_own_value>"
safesearch: "disable"
safesearch_str: "<your_own_value>"
url: "myurl.com"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_webfilter_search_engine_data(json):
option_list = ['charset', 'hostname', 'name',
'query', 'safesearch', 'safesearch_str',
'url']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def webfilter_search_engine(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['webfilter_search_engine'] and data['webfilter_search_engine']:
state = data['webfilter_search_engine']['state']
else:
state = True
webfilter_search_engine_data = data['webfilter_search_engine']
filtered_data = underscore_to_hyphen(filter_webfilter_search_engine_data(webfilter_search_engine_data))
if state == "present":
return fos.set('webfilter',
'search-engine',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('webfilter',
'search-engine',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_webfilter(data, fos):
if data['webfilter_search_engine']:
resp = webfilter_search_engine(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"webfilter_search_engine": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"charset": {"required": False, "type": "str",
"choices": ["utf-8", "gb2312"]},
"hostname": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"query": {"required": False, "type": "str"},
"safesearch": {"required": False, "type": "str",
"choices": ["disable", "url", "header"]},
"safesearch_str": {"required": False, "type": "str"},
"url": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
# issue/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import *
from .models import ALPHABETICAL_ASCENDING, Issue, OrganizationLinkToIssue
from follow.models import FollowIssue
from admin_tools.views import redirect_to_sign_in_page
from config.base import get_environment_variable
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from election.models import ElectionManager
from exception.models import handle_record_found_more_than_one_exception
from image.controllers import cache_issue_image_master, cache_resized_image_locally, delete_cached_images_for_issue
from image.models import WeVoteImageManager
from organization.models import OrganizationManager, OrganizationListManager
from voter.models import voter_has_authority
from voter_guide.models import VoterGuideListManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists, get_voter_device_id, STATE_CODE_MAP
from django.http import HttpResponse
import json
ORGANIZATION_LINK_TO_ISSUE_SYNC_URL = \
get_environment_variable("ORGANIZATION_LINK_TO_ISSUE_SYNC_URL") # organizationLinkToIssueSyncOut
ISSUES_SYNC_URL = get_environment_variable("ISSUES_SYNC_URL") # issuesSyncOut
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
def issues_sync_out_view(request): # issuesSyncOut
issue_search = request.GET.get('issue_search', '')
try:
issue_list = Issue.objects.all()
filters = []
if positive_value_exists(issue_search):
new_filter = Q(issue_name__icontains=issue_search)
filters.append(new_filter)
new_filter = Q(issue_description__icontains=issue_search)
filters.append(new_filter)
new_filter = Q(we_vote_id__icontains=issue_search)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
issue_list = issue_list.filter(final_filters)
issue_list_dict = issue_list.values('we_vote_id', 'hide_issue',
'issue_name', 'issue_description', 'issue_icon_local_path',
'issue_followers_count', 'linked_organization_count',
'we_vote_hosted_image_url_large', 'we_vote_hosted_image_url_medium',
'we_vote_hosted_image_url_tiny')
if issue_list_dict:
issue_list_json = list(issue_list_dict)
return HttpResponse(json.dumps(issue_list_json), content_type='application/json')
except Exception as e:
pass
json_data = {
'success': False,
'status': 'ISSUES_LIST_MISSING'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def issues_retrieve_view(request): # issuesRetrieve
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
sort_formula = request.GET.get('sort_formula', ALPHABETICAL_ASCENDING) # Alternate: MOST_LINKED_ORGANIZATIONS
ballot_location_shortcut = request.GET.get('ballot_location_shortcut', False)
ballot_returned_we_vote_id = request.GET.get('ballot_returned_we_vote_id', False)
google_civic_election_id = request.GET.get('google_civic_election_id', False)
voter_issues_only = request.GET.get('voter_issues_only', False)
include_voter_follow_status = request.GET.get('include_voter_follow_status', False)
http_response = issues_retrieve_for_api(voter_device_id, sort_formula, google_civic_election_id,
voter_issues_only, include_voter_follow_status,
ballot_location_shortcut, ballot_returned_we_vote_id)
return http_response
def retrieve_issues_to_follow_view(request): # retrieveIssuesToFollow
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
sort_formula = request.GET.get('sort_formula', ALPHABETICAL_ASCENDING) # Alternate: MOST_LINKED_ORGANIZATIONS
http_response = retrieve_issues_to_follow_for_api(voter_device_id, sort_formula)
return http_response
@login_required
def issues_import_from_master_server_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
if WE_VOTE_SERVER_ROOT_URL in ISSUES_SYNC_URL:
messages.add_message(request, messages.ERROR, "Cannot sync with Master We Vote Server -- "
"this is the Master We Vote Server.")
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = issues_import_from_master_server(request)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Issues import completed. '
'Saved: {saved}, Updated: {updated}, '
'Not processed: {not_processed}'
''.format(saved=results['issues_saved'],
updated=results['issues_updated'],
not_processed=results['issues_not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def issue_list_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'political_data_manager', 'political_data_viewer',
'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
issue_search = request.GET.get('issue_search', '')
show_hidden_issues = request.GET.get('show_hidden_issues', False)
show_all_elections = request.GET.get('show_all_elections', False)
issue_list_count = 0
issue_we_vote_id_list = []
organization_we_vote_id_in_this_election_list = []
organization_retrieved_list = {}
organization_link_to_issue_list = []
organizations_attached_to_this_issue = {}
if positive_value_exists(google_civic_election_id):
# If we are just looking at one election, then we want to retrieve a list of the voter guides associated
# with this election. This way we can order the issues based on the number of organizations with positions
# in this election linked to issues.
voter_guide_list_manager = VoterGuideListManager()
organization_manager = OrganizationManager()
results = voter_guide_list_manager.retrieve_voter_guides_for_election(google_civic_election_id)
if results['voter_guide_list_found']:
voter_guide_list = results['voter_guide_list']
for one_voter_guide in voter_guide_list:
organization_we_vote_id_in_this_election_list.append(one_voter_guide.organization_we_vote_id)
# try:
if positive_value_exists(len(organization_we_vote_id_in_this_election_list)):
organization_link_to_issue_list_query = OrganizationLinkToIssue.objects.all()
organization_link_to_issue_list_query = organization_link_to_issue_list_query.filter(
organization_we_vote_id__in=organization_we_vote_id_in_this_election_list)
organization_link_to_issue_list = list(organization_link_to_issue_list_query)
for one_organization_link_to_issue in organization_link_to_issue_list:
if one_organization_link_to_issue.organization_we_vote_id not in organization_retrieved_list:
# If here, we need to retrieve the organization
organization_results = organization_manager.retrieve_organization_from_we_vote_id(
one_organization_link_to_issue.organization_we_vote_id)
if organization_results['organization_found']:
organization_object = organization_results['organization']
organization_retrieved_list[one_organization_link_to_issue.organization_we_vote_id] = \
organization_object
if one_organization_link_to_issue.issue_we_vote_id not in organizations_attached_to_this_issue:
organizations_attached_to_this_issue[one_organization_link_to_issue.issue_we_vote_id] = []
organizations_attached_to_this_issue[one_organization_link_to_issue.issue_we_vote_id].\
append(
organization_retrieved_list[one_organization_link_to_issue.organization_we_vote_id])
# if one_organization_link_to_issue.issue_we_vote_id not in issue_we_vote_id_list:
# issue_we_vote_id_list.append(one_organization_link_to_issue.issue_we_vote_id)
# except Exception as e:
# pass
try:
issue_list_query = Issue.objects.all()
if positive_value_exists(show_hidden_issues) or positive_value_exists(issue_search):
# If trying to show hidden issues, no change to the query needed
pass
else:
# By default, we only show the issues marked "hide_issue=False"
issue_list_query = issue_list_query.filter(hide_issue=False)
# if positive_value_exists(len(issue_we_vote_id_list)):
# issue_list_query = issue_list_query.filter(we_vote_id__in=issue_we_vote_id_list)
if positive_value_exists(issue_search):
search_words = issue_search.split()
for one_word in search_words:
filters = []
new_filter = Q(issue_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(issue_description__icontains=one_word)
filters.append(new_filter)
new_filter = Q(we_vote_id__icontains=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
issue_list_query = issue_list_query.filter(final_filters)
issue_list_query = issue_list_query.order_by('issue_name')
issue_list_count = issue_list_query.count()
issue_list = list(issue_list_query)
if issue_list_count:
altered_issue_list = []
organization_link_to_issue_list_manager = OrganizationLinkToIssueList()
# Update the linked_organization_count
for one_issue in issue_list:
one_issue.linked_organization_count = \
organization_link_to_issue_list_manager.fetch_linked_organization_count(one_issue.we_vote_id)
try:
one_issue.save()
except Exception as e:
pass
if one_issue.we_vote_id in organizations_attached_to_this_issue:
one_issue.linked_organization_list = organizations_attached_to_this_issue[one_issue.we_vote_id]
one_issue.linked_organization_list_count = len(one_issue.linked_organization_list)
else:
one_issue.linked_organization_list = []
one_issue.linked_organization_list_count = 0
altered_issue_list.append(one_issue)
else:
altered_issue_list = issue_list
except Issue.DoesNotExist:
# This is fine
altered_issue_list = []
pass
# Order based on number of organizations per issue
altered_issue_list.sort(key=lambda x: x.linked_organization_list_count, reverse=True)
# include issue_followers in the issue list
add_issue_followers(altered_issue_list)
status_print_list = ""
status_print_list += "issue_list_count: " + \
str(issue_list_count) + " "
messages.add_message(request, messages.INFO, status_print_list)
messages_on_stage = get_messages(request)
election_manager = ElectionManager()
if positive_value_exists(show_all_elections):
results = election_manager.retrieve_elections()
election_list = results['election_list']
else:
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
template_values = {
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'issue_list': altered_issue_list,
'issue_search': issue_search,
'messages_on_stage': messages_on_stage,
'show_all_elections': show_all_elections,
'show_hidden_issues': positive_value_exists(show_hidden_issues),
'state_code': state_code,
'state_list': sorted_state_list,
}
return render(request, 'issue/issue_list.html', template_values)
@login_required
def issue_new_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
# These variables are here because there was an error on the edit_process_view and the voter needs to try again
issue_name = request.GET.get('issue_name', "")
issue_description = request.GET.get('issue_description', "")
issue_icon_local_path = request.GET.get('issue_icon_local_path', "")
hide_issue = request.GET.get('hide_issue', True) # Default to true
# Its helpful to see existing issues when entering a new issue
issue_list = []
try:
issue_list = Issue.objects.all()
issue_list = issue_list.order_by('issue_name')[:500]
except Issue.DoesNotExist:
# This is fine
pass
messages_on_stage = get_messages(request)
template_values = {
'google_civic_election_id': google_civic_election_id,
'hide_issue': hide_issue,
'issue_list': issue_list,
'issue_name': issue_name,
'issue_description': issue_description,
'issue_icon_local_path': issue_icon_local_path,
'messages_on_stage': messages_on_stage,
'state_code': state_code,
}
return render(request, 'issue/issue_edit.html', template_values)
@login_required
def issue_edit_view(request, issue_we_vote_id):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
# These variables are here in case there was an error on the edit_process_view and the voter needs to try again
hide_issue = request.GET.get('hide_issue', True)
issue_name = request.GET.get('issue_name', '')
issue_description = request.GET.get('issue_description', '')
issue_icon_local_path = request.GET.get('issue_icon_local_path', '')
issue_image_file = request.GET.get('issue_image_file', '')
messages_on_stage = get_messages(request)
issue_on_stage_found = False
issue_on_stage = Issue()
organization_list = []
try:
issue_on_stage = Issue.objects.get(we_vote_id__iexact=issue_we_vote_id)
issue_on_stage_found = True
except Issue.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Issue.DoesNotExist:
# This is fine, create new below
pass
# Its helpful to see existing issues when entering a new issue
issue_list = []
try:
issue_list = Issue.objects.all()
issue_list = issue_list.order_by('issue_name')[:500]
except Issue.DoesNotExist:
# This is fine
pass
if issue_on_stage_found:
issue_on_stage_list = []
issue_on_stage_list.append(issue_we_vote_id)
organization_link_to_issue_list_manager = OrganizationLinkToIssueList()
organization_results = \
organization_link_to_issue_list_manager.retrieve_organization_we_vote_id_list_from_issue_we_vote_id_list(
issue_on_stage_list)
if organization_results['organization_we_vote_id_list_found']:
organization_list_manager = OrganizationListManager()
organization_we_vote_id_list = organization_results['organization_we_vote_id_list']
organization_list_results = \
organization_list_manager.retrieve_organizations_by_organization_we_vote_id_list(
organization_we_vote_id_list)
if organization_list_results['organization_list_found']:
organization_list = organization_list_results['organization_list']
template_values = {
'messages_on_stage': messages_on_stage,
'hide_issue': hide_issue,
'issue_list': issue_list,
'issue': issue_on_stage,
'issue_name': issue_name,
'issue_description': issue_description,
'issue_icon_local_path': issue_icon_local_path,
'issue_image_file': issue_image_file,
'google_civic_election_id': google_civic_election_id,
'state_code': state_code,
'organization_list': organization_list,
}
return render(request, 'issue/issue_edit.html', template_values)
@login_required
def issue_edit_process_view(request):
"""
Process the new or edit issue forms
:param request:
:return:
"""
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
master_we_vote_hosted_image_url = None
we_vote_hosted_image_url_large = None
we_vote_hosted_image_url_medium = None
we_vote_hosted_image_url_tiny = None
considered_left = request.POST.get('considered_left', False)
considered_right = request.POST.get('considered_right', False)
hide_issue = request.POST.get('hide_issue', False)
issue_we_vote_id = request.POST.get('issue_we_vote_id', False)
issue_name = request.POST.get('issue_name', False)
issue_description = request.POST.get('issue_description', False)
issue_icon_local_path = request.POST.get('issue_icon_local_path', False)
try:
if request.method == 'POST' and request.FILES['issue_image_file']:
issue_image_file = request.FILES.get('issue_image_file')
cache_issue_image_results = cache_issue_image_master(
google_civic_election_id, issue_image_file, issue_we_vote_id=issue_we_vote_id,
kind_of_image_issue=True, kind_of_image_original=True)
we_vote_image_manager = WeVoteImageManager()
if cache_issue_image_results['success']:
cached_master_we_vote_image = cache_issue_image_results['we_vote_image']
google_civic_election_id = cached_master_we_vote_image.google_civic_election_id
we_vote_parent_image_id = cached_master_we_vote_image.id
image_format = cached_master_we_vote_image.we_vote_image_file_location.split(".")[-1]
master_we_vote_hosted_image_url = cached_master_we_vote_image.we_vote_image_url
cache_large_resized_image_results = cache_resized_image_locally(
google_civic_election_id, master_we_vote_hosted_image_url, we_vote_parent_image_id,
issue_we_vote_id=issue_we_vote_id, image_format=image_format,
kind_of_image_issue=True, kind_of_image_large=True)
if cache_large_resized_image_results['success']:
cached_resized_image_results = we_vote_image_manager.retrieve_we_vote_image_from_url(
issue_we_vote_id=issue_we_vote_id, issue_image_url_https=master_we_vote_hosted_image_url,
kind_of_image_large=True)
if cached_resized_image_results['success']:
we_vote_hosted_image_url_large = \
cached_resized_image_results['we_vote_image'].we_vote_image_url
cache_medium_resized_image_results = cache_resized_image_locally(
google_civic_election_id, master_we_vote_hosted_image_url, we_vote_parent_image_id,
issue_we_vote_id=issue_we_vote_id, image_format=image_format,
kind_of_image_issue=True, kind_of_image_medium=True)
if cache_medium_resized_image_results['success']:
cached_resized_image_results = we_vote_image_manager.retrieve_we_vote_image_from_url(
issue_we_vote_id=issue_we_vote_id, issue_image_url_https=master_we_vote_hosted_image_url,
kind_of_image_medium=True)
if cached_resized_image_results['success']:
we_vote_hosted_image_url_medium = \
cached_resized_image_results['we_vote_image'].we_vote_image_url
cache_tiny_resized_image_results = cache_resized_image_locally(
google_civic_election_id, master_we_vote_hosted_image_url, we_vote_parent_image_id,
issue_we_vote_id=issue_we_vote_id, image_format=image_format,
kind_of_image_issue=True, kind_of_image_tiny=True)
if cache_tiny_resized_image_results['success']:
cached_resized_image_results = we_vote_image_manager.retrieve_we_vote_image_from_url(
issue_we_vote_id=issue_we_vote_id, issue_image_url_https=master_we_vote_hosted_image_url,
kind_of_image_tiny=True)
if cached_resized_image_results['success']:
we_vote_hosted_image_url_tiny = \
cached_resized_image_results['we_vote_image'].we_vote_image_url
except KeyError as e:
pass
# Check to see if this issue is already being used anywhere
issue_on_stage_found = False
issue_on_stage = Issue()
if positive_value_exists(issue_we_vote_id):
try:
issue_on_stage = Issue.objects.get(we_vote_id__iexact=issue_we_vote_id)
issue_on_stage_found = True
except Issue.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Issue.DoesNotExist:
# This is fine, create new below
pass
if issue_on_stage_found:
# Update
if issue_name is not False:
issue_on_stage.issue_name = issue_name
if issue_description is not False:
issue_on_stage.issue_description = issue_description
if issue_icon_local_path is not None:
issue_on_stage.issue_icon_local_path = issue_icon_local_path
if we_vote_hosted_image_url_large is not None:
issue_on_stage.we_vote_hosted_image_url_large = we_vote_hosted_image_url_large
if we_vote_hosted_image_url_medium is not None:
issue_on_stage.we_vote_hosted_image_url_medium = we_vote_hosted_image_url_medium
if we_vote_hosted_image_url_tiny is not None:
issue_on_stage.we_vote_hosted_image_url_tiny = we_vote_hosted_image_url_tiny
issue_on_stage.considered_left = considered_left
issue_on_stage.considered_right = considered_right
issue_on_stage.hide_issue = hide_issue
issue_on_stage.save()
issue_we_vote_id = issue_on_stage.we_vote_id
messages.add_message(request, messages.INFO, 'Issue updated.')
else:
# Create new
required_issue_variables = True if positive_value_exists(issue_name) else False
if required_issue_variables:
issue_on_stage = Issue(
issue_name=issue_name,
)
if issue_description is not False:
issue_on_stage.issue_description = issue_description
if issue_icon_local_path is not None:
issue_on_stage.issue_icon_local_path = issue_icon_local_path
if we_vote_hosted_image_url_large is not None:
issue_on_stage.we_vote_hosted_image_url_large = we_vote_hosted_image_url_large
if we_vote_hosted_image_url_medium is not None:
issue_on_stage.we_vote_hosted_image_url_medium = we_vote_hosted_image_url_medium
if we_vote_hosted_image_url_tiny is not None:
issue_on_stage.we_vote_hosted_image_url_tiny = we_vote_hosted_image_url_tiny
issue_on_stage.considered_left = considered_left
issue_on_stage.considered_right = considered_right
issue_on_stage.hide_issue = hide_issue
issue_on_stage.save()
issue_we_vote_id = issue_on_stage.we_vote_id
messages.add_message(request, messages.INFO, 'New issue saved.')
else:
messages.add_message(request, messages.INFO, 'Missing required variables.')
url_variables = "?google_civic_election_id=" + str(google_civic_election_id) + \
"&state_code=" + str(state_code)
if positive_value_exists(issue_we_vote_id):
return HttpResponseRedirect(reverse('issue:issue_edit', args=(issue_we_vote_id,)) +
url_variables)
else:
return HttpResponseRedirect(reverse('issue:issue_new', args=()) +
url_variables)
@login_required
def issue_summary_view(request, issue_id):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'political_data_manager', 'political_data_viewer',
'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
issue_id = convert_to_int(issue_id)
issue_on_stage_found = False
issue_on_stage = Issue()
try:
issue_on_stage = Issue.objects.get(id=issue_id)
issue_on_stage_found = True
except Issue.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Issue.DoesNotExist:
# This is fine, create new
pass
if issue_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'issue': issue_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'issue/issue_summary.html', template_values)
@login_required
def issue_delete_images_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
state_code = request.GET.get('state_code', '')
issue_we_vote_id = request.GET.get('issue_we_vote_id', '')
url_variables = "?google_civic_election_id=" + str(google_civic_election_id) + \
"&state_code=" + str(state_code)
if not positive_value_exists(issue_we_vote_id):
return HttpResponseRedirect(reverse('issue:issue_new', args=()) +
url_variables)
else:
issue_manager = IssueManager()
results = issue_manager.retrieve_issue_from_we_vote_id(issue_we_vote_id)
if not results['issue_found']:
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('issue:issue_edit', args=(issue_we_vote_id,)) +
url_variables)
issue = results['issue']
delete_image_results = delete_cached_images_for_issue(issue)
delete_image_count = delete_image_results['delete_image_count']
not_deleted_image_count = delete_image_results['not_deleted_image_count']
messages.add_message(request, messages.INFO,
"Images Deleted: {delete_image_count},"
.format(delete_image_count=delete_image_count))
return HttpResponseRedirect(reverse('issue:issue_edit', args=(issue_we_vote_id,)) +
url_variables)
@login_required
def issue_delete_process_view(request):
"""
Delete this issue
:param request:
:return:
"""
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
issue_we_vote_id = request.POST.get('issue_we_vote_id', False)
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
confirm_delete = convert_to_int(request.POST.get('confirm_delete', 0))
state_code = request.POST.get('state_code', '')
if not positive_value_exists(confirm_delete):
messages.add_message(request, messages.ERROR,
'Unable to delete this issue. '
'Please check the checkbox to confirm you want to delete this issue.')
return HttpResponseRedirect(reverse('issue:issue_edit', args=(issue_we_vote_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) + "&state_code=" +
str(state_code))
# Retrieve this issue
issue_on_stage_found = False
issue_on_stage = Issue()
if positive_value_exists(issue_we_vote_id):
try:
issue_query = Issue.objects.filter(we_vote_id__iexact=issue_we_vote_id)
if len(issue_query):
issue_on_stage = issue_query[0]
issue_on_stage_found = True
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not find issue -- exception.')
if not issue_on_stage_found:
messages.add_message(request, messages.ERROR, 'Could not find issue.')
return HttpResponseRedirect(reverse('issue:issue_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
# Are there any positions attached to this issue that should be moved to another
# instance of this issue?
organization_link_to_issue_list = OrganizationLinkToIssueList()
link_count = organization_link_to_issue_list.fetch_organization_count_for_issue(issue_we_vote_id)
if positive_value_exists(link_count):
organizations_found_for_this_issue = True
else:
organizations_found_for_this_issue = False
if not organizations_found_for_this_issue:
# Delete the issue
issue_on_stage.delete()
messages.add_message(request, messages.INFO, 'Issue deleted.')
else:
messages.add_message(request, messages.ERROR, 'Could not delete -- '
'organizations still attached to this issue.')
return HttpResponseRedirect(reverse('issue:issue_edit', args=(issue_we_vote_id,)))
return HttpResponseRedirect(reverse('issue:issue_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
@login_required
def organization_link_to_issue_import_from_master_server_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
if WE_VOTE_SERVER_ROOT_URL in ORGANIZATION_LINK_TO_ISSUE_SYNC_URL:
messages.add_message(request, messages.ERROR, "Cannot sync with Master We Vote Server -- "
"this is the Master We Vote Server.")
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = organization_link_to_issue_import_from_master_server(request)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO,
'Organization Links import completed. '
'Saved: {saved}, Updated: {updated}, '
'Not processed: {not_processed}'
''.format(saved=results['organization_link_to_issue_saved'],
updated=results['organization_link_to_issue_updated'],
not_processed=results['organization_link_to_issue_not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
# This page does not need to be protected.
def organization_link_to_issue_sync_out_view(request): # organizationLinkToIssueSyncOut
issue_search = request.GET.get('issue_search', '')
try:
issue_list = OrganizationLinkToIssue.objects.using('readonly').all()
# filters = []
# if positive_value_exists(issue_search):
# new_filter = Q(issue_name__icontains=issue_search)
# filters.append(new_filter)
#
# new_filter = Q(issue_description__icontains=issue_search)
# filters.append(new_filter)
#
# new_filter = Q(we_vote_id__icontains=issue_search)
# filters.append(new_filter)
#
# # Add the first query
# if len(filters):
# final_filters = filters.pop()
#
# # ...and "OR" the remaining items in the list
# for item in filters:
# final_filters |= item
#
# issue_list = issue_list.filter(final_filters)
issue_list_dict = issue_list.values('issue_we_vote_id', 'organization_we_vote_id',
'link_active', 'reason_for_link', 'link_blocked', 'reason_link_is_blocked')
if issue_list_dict:
issue_list_json = list(issue_list_dict)
return HttpResponse(json.dumps(issue_list_json), content_type='application/json')
except Exception as e:
pass
json_data = {
'success': False,
'status': 'ORGANIZATION_LINK_TO_ISSUE_LIST_MISSING'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
# Open to the web
def issue_partisan_analysis_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
issue_search = request.GET.get('issue_search', '')
show_hidden_issues = False
show_all_elections = False
organization_we_vote_id_in_this_election_list = []
organization_retrieved_list = {}
organization_link_to_issue_list = []
organizations_attached_to_this_issue = {}
if positive_value_exists(google_civic_election_id):
# If we are just looking at one election, then we want to retrieve a list of the voter guides associated
# with this election. This way we can order the issues based on the number of organizations with positions
# in this election linked to issues.
voter_guide_list_manager = VoterGuideListManager()
organization_manager = OrganizationManager()
results = voter_guide_list_manager.retrieve_voter_guides_for_election(google_civic_election_id)
if results['voter_guide_list_found']:
voter_guide_list = results['voter_guide_list']
for one_voter_guide in voter_guide_list:
organization_we_vote_id_in_this_election_list.append(one_voter_guide.organization_we_vote_id)
if positive_value_exists(len(organization_we_vote_id_in_this_election_list)):
organization_link_to_issue_list_query = OrganizationLinkToIssue.objects.all()
organization_link_to_issue_list_query = organization_link_to_issue_list_query.filter(
organization_we_vote_id__in=organization_we_vote_id_in_this_election_list)
organization_link_to_issue_list = list(organization_link_to_issue_list_query)
for one_organization_link_to_issue in organization_link_to_issue_list:
if one_organization_link_to_issue.organization_we_vote_id not in organization_retrieved_list:
# If here, we need to retrieve the organization
organization_results = organization_manager.retrieve_organization_from_we_vote_id(
one_organization_link_to_issue.organization_we_vote_id)
if organization_results['organization_found']:
organization_object = organization_results['organization']
organization_retrieved_list[one_organization_link_to_issue.organization_we_vote_id] = \
organization_object
if one_organization_link_to_issue.issue_we_vote_id not in organizations_attached_to_this_issue:
organizations_attached_to_this_issue[one_organization_link_to_issue.issue_we_vote_id] = []
organizations_attached_to_this_issue[one_organization_link_to_issue.issue_we_vote_id].\
append(organization_retrieved_list[one_organization_link_to_issue.organization_we_vote_id])
issue_list_left = []
issue_list_right = []
organization_list_left = []
organization_we_vote_id_list_left = []
organization_list_right = []
organization_we_vote_id_list_right = []
try:
issue_list_query = Issue.objects.using('readonly').all()
issue_list_query = issue_list_query.filter(hide_issue=False)
issue_list_query = issue_list_query.order_by('issue_name')
issue_list_count = issue_list_query.count()
issue_list = list(issue_list_query)
if issue_list_count:
altered_issue_list = []
for one_issue in issue_list:
if one_issue.we_vote_id not in organizations_attached_to_this_issue:
organizations_attached_to_this_issue[one_issue.we_vote_id] = []
one_issue.linked_organization_count = len(organizations_attached_to_this_issue[one_issue.we_vote_id])
altered_issue_list.append(one_issue)
if one_issue.considered_left:
issue_list_left.append(one_issue)
for one_organization in organizations_attached_to_this_issue[one_issue.we_vote_id]:
if one_organization.we_vote_id not in organization_we_vote_id_list_left:
organization_list_left.append(one_organization)
organization_we_vote_id_list_left.append(one_organization.we_vote_id)
if one_issue.considered_right:
issue_list_right.append(one_issue)
for one_organization in organizations_attached_to_this_issue[one_issue.we_vote_id]:
if one_organization.we_vote_id not in organization_we_vote_id_list_right:
organization_list_right.append(one_organization)
organization_we_vote_id_list_right.append(one_organization.we_vote_id)
else:
altered_issue_list = issue_list
except Issue.DoesNotExist:
# This is fine
altered_issue_list = []
pass
# Order based on number of organizations per issue
altered_issue_list.sort(key=lambda x: x.linked_organization_count, reverse=True)
issue_list_left.sort(key=lambda x: x.linked_organization_count, reverse=True)
issue_list_right.sort(key=lambda x: x.linked_organization_count, reverse=True)
position_list_manager = PositionListManager()
retrieve_public_positions = True
endorsement_count_left = position_list_manager.fetch_positions_count_for_voter_guide(
organization_we_vote_id_list_left, google_civic_election_id, state_code,
retrieve_public_positions)
endorsement_count_right = position_list_manager.fetch_positions_count_for_voter_guide(
organization_we_vote_id_list_right, google_civic_election_id, state_code,
retrieve_public_positions)
messages_on_stage = get_messages(request)
election_manager = ElectionManager()
if positive_value_exists(show_all_elections):
results = election_manager.retrieve_elections()
election_list = results['election_list']
else:
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
template_values = {
'election_list': election_list,
'endorsement_count_left': endorsement_count_left,
'endorsement_count_right': endorsement_count_right,
'google_civic_election_id': google_civic_election_id,
'issue_list': altered_issue_list,
'issue_list_left': issue_list_left,
'issue_list_right': issue_list_right,
'issue_search': issue_search,
'messages_on_stage': messages_on_stage,
'organization_list_left': organization_list_left,
'organization_list_right': organization_list_right,
'show_all_elections': show_all_elections,
'show_hidden_issues': positive_value_exists(show_hidden_issues),
'state_code': state_code,
'state_list': sorted_state_list,
}
return render(request, 'issue/issue_partisan_analysis.html', template_values)
def add_issue_followers(issue_list):
follow_models = FollowIssue.objects.all()
issue_to_follow_count = {issue.we_vote_id:0 for issue in issue_list}
for model in follow_models:
we_vote_id = model.issue_we_vote_id
if model.is_following():
try:
issue_to_follow_count[we_vote_id] += 1
except KeyError:
issue_to_follow_count[we_vote_id] = 1
for issue in issue_list:
issue.issue_followers_count = issue_to_follow_count[issue.we_vote_id]
|
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium WebUI resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
https://chromium.googlesource.com/chromium/src/+/main/styleguide/web/web.md
for the rules we're checking against here.
"""
# TODO(dbeam): Real CSS parser? https://github.com/danbeam/css-py/tree/css3
class CSSChecker(object):
DISABLE_PREFIX = 'csschecker-disable'
DISABLE_FORMAT = DISABLE_PREFIX + '(-[a-z]+)+ [a-z-]+(-[a-z-]+)*'
DISABLE_LINE = DISABLE_PREFIX + '-line'
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
self.file_filter = file_filter
def RemoveAtBlocks(self, s):
re = self.input_api.re
def _remove_comments(s):
return re.sub(r'/\*.*\*/', '', s)
lines = s.splitlines()
i = 0
while i < len(lines):
line = _remove_comments(lines[i]).strip()
if (len(line) > 0 and line[0] == '@' and
not line[1:].startswith(("apply", "page")) and
line[-1] == '{' and not re.match("\d+x\b", line[1:])):
j = i
open_brackets = 1
while open_brackets > 0:
j += 1
inner_line = _remove_comments(lines[j]).strip()
if not inner_line:
continue
if inner_line[-1] == '{':
open_brackets += 1
elif inner_line[-1] == '}':
# Ignore single line keyframes (from { height: 0; }).
if not re.match(r'\s*(from|to|\d+%)\s*{', inner_line):
open_brackets -= 1
elif len(inner_line) > 1 and inner_line[-2:] == '};':
# End of mixin. TODO(dbeam): worth detecting ": {" start?
open_brackets -= 1
del lines[j] # Later index first, as indices shift with deletion.
del lines[i]
else:
i += 1
return '\n'.join(lines)
def RunChecks(self):
# We use this a lot, so make a nick name variable.
re = self.input_api.re
def _collapseable_hex(s):
return (len(s) == 6 and s[0] == s[1] and s[2] == s[3] and s[4] == s[5])
def _is_gray(s):
return s[0] == s[1] == s[2] if len(s) == 3 else s[0:2] == s[2:4] == s[4:6]
def _extract_inline_style(s):
return '\n'.join(re.findall(r'<style\b[^>]*>([^<]*)<\/style>', s))
def _remove_comments_except_for_disables(s):
return re.sub(r'/\*(?! %s \*/$).*?\*/' % self.DISABLE_FORMAT, '', s,
flags=re.DOTALL | re.MULTILINE)
def _remove_grit(s):
return re.sub(r"""
<if[^>]+>.*?<\s*/\s*if[^>]*>| # <if> contents </if>
<include[^>]+> # <include>
""", '', s, flags=re.DOTALL | re.VERBOSE)
mixin_shim_reg = r'[\w-]+_-_[\w-]+'
def _remove_valid_vars(s):
valid_vars = r'--(?!' + mixin_shim_reg + r')[\w-]+:\s*([^;{}}]+);\s*'
return re.sub(valid_vars, '', s, flags=re.DOTALL)
def _remove_disable(content, lstrip=False):
prefix_reg = ('\s*' if lstrip else '')
disable_reg = '/\* %s \*/' % self.DISABLE_FORMAT
return re.sub(prefix_reg + disable_reg, '', content, re.MULTILINE)
def _remove_template_expressions(s):
return re.sub(r'\$i18n(Raw)?{[^}]*}', '', s, flags=re.DOTALL)
def _rgb_from_hex(s):
if len(s) == 3:
r, g, b = s[0] + s[0], s[1] + s[1], s[2] + s[2]
else:
r, g, b = s[0:2], s[2:4], s[4:6]
return int(r, base=16), int(g, base=16), int(b, base=16)
def _strip_prefix(s):
return re.sub(r'^-(?:o|ms|moz|khtml|webkit)-', '', s)
def alphabetize_props(contents):
errors = []
# TODO(dbeam): make this smart enough to detect issues in mixins.
strip_rule = lambda t: _remove_disable(t).strip()
for rule in re.finditer(r'{(.*?)}', contents, re.DOTALL):
semis = [strip_rule(r) for r in rule.group(1).split(';')][:-1]
rules = [r for r in semis if ': ' in r]
props = [r[0:r.find(':')] for r in rules]
if props != sorted(props):
errors.append(' %s;\n' % (';\n '.join(rules)))
return errors
def braces_have_space_before_and_nothing_after(line):
brace_space_reg = re.compile(r"""
(?:^|\S){| # selector{ or selector\n{ or
{\s*\S+\s* # selector { with stuff after it
$ # must be at the end of a line
""",
re.VERBOSE)
return brace_space_reg.search(line)
def classes_use_dashes(line):
# Intentionally dumbed down version of CSS 2.1 grammar for class without
# non-ASCII, escape chars, or whitespace.
class_reg = re.compile(r"""
(?<!')\.(-?[\w-]+).* # ., then maybe -, then alpha numeric and -
[,{]\s*$ # selectors should end with a , or {
""",
re.VERBOSE)
m = class_reg.search(line)
if not m:
return False
class_name = m.group(1)
return class_name.lower() != class_name or '_' in class_name
end_mixin_reg = re.compile(r'\s*};\s*$')
def close_brace_on_new_line(line):
# Ignore single frames in a @keyframe, i.e. 0% { margin: 50px; }
frame_reg = re.compile(r"""
\s*(from|to|\d+%)\s*{ # 50% {
\s*[\w-]+: # rule:
(\s*[\w\(\), -\.]+)+\s*; # value;
\s*}\s* # }
""",
re.VERBOSE)
return ('}' in line and re.search(r'[^ }]', line) and
not frame_reg.match(line) and not end_mixin_reg.match(line))
def colons_have_space_after(line):
colon_space_reg = re.compile(r"""
(?<!data) # ignore data URIs
:(?!//) # ignore url(http://), etc.
\S[^;]+;\s* # only catch one-line rules for now
""",
re.VERBOSE)
return colon_space_reg.search(line)
def favor_single_quotes(line):
return '"' in line
# Shared between hex_could_be_shorter and rgb_if_not_gray.
hex_reg = re.compile(r"""
\#([a-fA-F0-9]{3}|[a-fA-F0-9]{6}) # pound followed by 3 or 6 hex digits
(?=[^\w-]|$) # no more alphanum chars or at EOL
(?!.*(?:{.*|,\s*)$) # not in a selector
""",
re.VERBOSE)
def hex_could_be_shorter(line):
m = hex_reg.search(line)
return (m and _is_gray(m.group(1)) and _collapseable_hex(m.group(1)))
def rgb_if_not_gray(line):
m = hex_reg.search(line)
return (m and not _is_gray(m.group(1)))
small_seconds_reg = re.compile(r"""
(?:^|[^\w-]) # start of a line or a non-alphanumeric char
(0?\.[0-9]+)s # 1.0s
(?!-?[\w-]) # no following - or alphanumeric chars
""",
re.VERBOSE)
def milliseconds_for_small_times(line):
return small_seconds_reg.search(line)
def suggest_ms_from_s(line):
ms = int(float(small_seconds_reg.search(line).group(1)) * 1000)
return ' (replace with %dms)' % ms
def no_data_uris_in_source_files(line):
return re.search(r'\(\s*\s*data:', line)
def no_mixin_shims(line):
return re.search(r'--' + mixin_shim_reg + r'\s*:', line)
def no_quotes_in_url(line):
return re.search('url\s*\(\s*["\']', line, re.IGNORECASE)
def one_rule_per_line(line):
line = _remove_disable(line)
one_rule_reg = re.compile(r"""
[\w-](?<!data): # a rule: but no data URIs
(?!//)[^;]+; # value; ignoring colons in protocols:// and };
\s*[^ }]\s* # any non-space after the end colon
""",
re.VERBOSE)
return one_rule_reg.search(line) and not end_mixin_reg.match(line)
def pseudo_elements_double_colon(contents):
pseudo_elements = ['after',
'before',
'calendar-picker-indicator',
'color-swatch',
'color-swatch-wrapper',
'date-and-time-container',
'date-and-time-value',
'datetime-edit',
'datetime-edit-ampm-field',
'datetime-edit-day-field',
'datetime-edit-hour-field',
'datetime-edit-millisecond-field',
'datetime-edit-minute-field',
'datetime-edit-month-field',
'datetime-edit-second-field',
'datetime-edit-text',
'datetime-edit-week-field',
'datetime-edit-year-field',
'details-marker',
'file-upload-button',
'first-letter',
'first-line',
'inner-spin-button',
'input-placeholder',
'input-speech-button',
'media-slider-container',
'media-slider-thumb',
'meter-bar',
'meter-even-less-good-value',
'meter-inner-element',
'meter-optimum-value',
'meter-suboptimum-value',
'progress-bar',
'progress-inner-element',
'progress-value',
'resizer',
'scrollbar',
'scrollbar-button',
'scrollbar-corner',
'scrollbar-thumb',
'scrollbar-track',
'scrollbar-track-piece',
'search-cancel-button',
'search-decoration',
'search-results-button',
'search-results-decoration',
'selection',
'slider-container',
'slider-runnable-track',
'slider-thumb',
'textfield-decoration-container',
'validation-bubble',
'validation-bubble-arrow',
'validation-bubble-arrow-clipper',
'validation-bubble-heading',
'validation-bubble-message',
'validation-bubble-text-block']
pseudo_reg = re.compile(r"""
(?<!:): # a single colon, i.e. :after but not ::after
([a-zA-Z-]+) # a pseudo element, class, or function
(?=[^{}]+?{) # make sure a selector, not inside { rules }
""",
re.MULTILINE | re.VERBOSE)
errors = []
for p in re.finditer(pseudo_reg, contents):
pseudo = p.group(1).strip().splitlines()[0]
if _strip_prefix(pseudo.lower()) in pseudo_elements:
errors.append(' :%s (should be ::%s)' % (pseudo, pseudo))
return errors
def one_selector_per_line(contents):
# Ignore all patterns nested in :any(), :is().
any_reg = re.compile(
r"""
:(?:
(?:-webkit-)?any # :-webkit-any(a, b, i) selector
|is # :is(...) selector
)\(
""", re.DOTALL | re.VERBOSE)
# Iteratively remove nested :is(), :any() patterns from |contents|.
while True:
m = re.search(any_reg, contents)
if m is None:
break
start, end = m.span()
# Find corresponding right parenthesis.
pcount = 1
while end < len(contents) and pcount > 0:
if contents[end] == '(':
pcount += 1
elif contents[end] == ')':
pcount -= 1
end += 1
contents = contents[:start] + contents[end:]
multi_sels_reg = re.compile(r"""
(?:}\s*)? # ignore 0% { blah: blah; }, from @keyframes
([^,]+,(?=[^{}]+?{) # selector junk {, not in a { rule }
.*[,{])\s*$ # has to end with , or {
""",
re.MULTILINE | re.VERBOSE)
errors = []
for b in re.finditer(multi_sels_reg, contents):
errors.append(' ' + b.group(1).strip().splitlines()[-1:][0])
return errors
def suggest_rgb_from_hex(line):
suggestions = ['rgb(%d, %d, %d)' % _rgb_from_hex(h.group(1))
for h in re.finditer(hex_reg, line)]
return ' (replace with %s)' % ', '.join(suggestions)
def suggest_short_hex(line):
h = hex_reg.search(line).group(1)
return ' (replace with #%s)' % (h[0] + h[2] + h[4])
prefixed_logical_axis_reg = re.compile(r"""
-webkit-(min-|max-|)logical-(height|width):
""", re.VERBOSE)
def suggest_unprefixed_logical_axis(line):
prefix, prop = prefixed_logical_axis_reg.search(line).groups()
block_or_inline = 'block' if prop == 'height' else 'inline'
return ' (replace with %s)' % (prefix + block_or_inline + '-size')
def prefixed_logical_axis(line):
return prefixed_logical_axis_reg.search(line)
prefixed_logical_side_reg = re.compile(r"""
-webkit-(margin|padding|border)-(before|after|start|end)
(?!-collapse)(-\w+|):
""", re.VERBOSE)
def suggest_unprefixed_logical_side(line):
prop, pos, suffix = prefixed_logical_side_reg.search(line).groups()
if pos == 'before' or pos == 'after':
block_or_inline = 'block'
else:
block_or_inline = 'inline'
if pos == 'start' or pos == 'before':
start_or_end = 'start'
else:
start_or_end = 'end'
return ' (replace with %s)' % (
prop + '-' + block_or_inline + '-' + start_or_end + suffix)
def prefixed_logical_side(line):
return prefixed_logical_side_reg.search(line)
_LEFT_RIGHT_REG = '(?:(border|margin|padding)-|(text-align): )' \
'(left|right)' \
'(?:(-[a-z-^:]+):)?(?!.*/\* %s left-right \*/)' % \
self.DISABLE_LINE
def start_end_instead_of_left_right(line):
return re.search(_LEFT_RIGHT_REG, line, re.IGNORECASE)
def suggest_start_end_from_left_right(line):
groups = re.search(_LEFT_RIGHT_REG, line, re.IGNORECASE).groups()
prop_start, text_align, left_right, prop_end = groups
start_end = {'left': 'start', 'right': 'end'}[left_right]
if text_align:
return ' (replace with text-align: %s)' % start_end
prop = '%s-inline-%s%s' % (prop_start, start_end, prop_end or '')
return ' (replace with %s)' % prop
def zero_width_lengths(contents):
hsl_reg = re.compile(r"""
hsl\([^\)]* # hsl(maybestuff
(?:[, ]|(?<=\()) # a comma or space not followed by a (
(?:0?\.?)?0% # some equivalent to 0%
""",
re.VERBOSE)
zeros_reg = re.compile(r"""
^.*(?:^|[^0-9.]) # start/non-number
(?:\.0|0(?:\.0? # .0, 0, or 0.0
|px|em|%|in|cm|mm|pc|pt|ex)) # a length unit
(?!svg|png|jpg)(?:\D|$) # non-number/end
(?=[^{}]+?}).*$ # only { rules }
""",
re.MULTILINE | re.VERBOSE)
errors = []
for z in re.finditer(zeros_reg, contents):
first_line = z.group(0).strip().splitlines()[0]
if not hsl_reg.search(first_line):
errors.append(' ' + first_line)
return errors
def mixins(line):
return re.search(r'--[\w-]+:\s*({.*?)', line) or re.search(
r'@apply', line)
# NOTE: Currently multi-line checks don't support 'after'. Instead, add
# suggestions while parsing the file so another pass isn't necessary.
added_or_modified_files_checks = [
{ 'desc': 'Alphabetize properties and list vendor specific (i.e. '
'-webkit) above standard.',
'test': alphabetize_props,
'multiline': True,
},
{ 'desc': 'Start braces ({) end a selector, have a space before them '
'and no rules after.',
'test': braces_have_space_before_and_nothing_after,
},
{ 'desc': 'Classes use .dash-form.',
'test': classes_use_dashes,
},
{ 'desc': 'Always put a rule closing brace (}) on a new line.',
'test': close_brace_on_new_line,
},
{ 'desc': 'Colons (:) should have a space after them.',
'test': colons_have_space_after,
},
{ 'desc': 'Use single quotes (\') instead of double quotes (") in '
'strings.',
'test': favor_single_quotes,
},
{ 'desc': 'Use abbreviated hex (#rgb) when in form #rrggbb.',
'test': hex_could_be_shorter,
'after': suggest_short_hex,
},
{ 'desc': 'Use milliseconds for time measurements under 1 second.',
'test': milliseconds_for_small_times,
'after': suggest_ms_from_s,
},
{ 'desc': "Don't use data URIs in source files. Use grit instead.",
'test': no_data_uris_in_source_files,
},
{ 'desc': "Don't override custom properties created by Polymer's mixin "
"shim. Set mixins or documented custom properties directly.",
'test': no_mixin_shims,
},
{ 'desc': "Don't use quotes in url().",
'test': no_quotes_in_url,
},
{ 'desc': 'One rule per line (what not to do: color: red; margin: 0;).',
'test': one_rule_per_line,
},
{ 'desc': 'One selector per line (what not to do: a, b {}).',
'test': one_selector_per_line,
'multiline': True,
},
{ 'desc': 'Pseudo-elements should use double colon (i.e. ::after).',
'test': pseudo_elements_double_colon,
'multiline': True,
},
{ 'desc': 'Use rgb() over #hex when not a shade of gray (like #333).',
'test': rgb_if_not_gray,
'after': suggest_rgb_from_hex,
},
{ 'desc': 'Unprefix logical axis property.',
'test': prefixed_logical_axis,
'after': suggest_unprefixed_logical_axis,
},
{ 'desc': 'Unprefix logical side property.',
'test': prefixed_logical_side,
'after': suggest_unprefixed_logical_side,
},
{
'desc': 'Use -start/end instead of -left/right ' \
'(https://goo.gl/gQYY7z, add /* %s left-right */ to ' \
'suppress)' % self.DISABLE_LINE,
'test': start_end_instead_of_left_right,
'after': suggest_start_end_from_left_right,
},
{ 'desc': 'Use "0" for zero-width lengths (i.e. 0px -> 0)',
'test': zero_width_lengths,
'multiline': True,
},
{ 'desc': 'Avoid using CSS mixins. Use CSS shadow parts, CSS ' \
'variables, or common CSS classes instead.',
'test': mixins,
},
]
results = []
affected_files = self.input_api.AffectedFiles(include_deletes=False,
file_filter=self.file_filter)
files = []
for f in affected_files:
path = f.LocalPath()
is_html = path.endswith('.html')
if not is_html and not path.endswith('.css'):
continue
file_contents = '\n'.join(f.NewContents())
# Remove all /*comments*/, @at-keywords, and grit <if|include> tags; we're
# not using a real parser. TODO(dbeam): Check alpha in <if> blocks.
file_contents = _remove_grit(file_contents) # Must be done first.
if is_html:
# The <style> extraction regex can't handle <if> nor /* <tag> */.
prepped_html = _remove_comments_except_for_disables(file_contents)
file_contents = _extract_inline_style(prepped_html)
file_contents = self.RemoveAtBlocks(file_contents)
if not is_html:
file_contents = _remove_comments_except_for_disables(file_contents)
file_contents = _remove_valid_vars(file_contents)
file_contents = _remove_template_expressions(file_contents)
files.append((path, file_contents))
for f in files:
file_errors = []
for check in added_or_modified_files_checks:
# If the check is multiline, it receives the whole file and gives us
# back a list of things wrong. If the check isn't multiline, we pass it
# each line and the check returns something truthy if there's an issue.
if ('multiline' in check and check['multiline']):
assert not 'after' in check
check_errors = check['test'](f[1])
if len(check_errors) > 0:
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors).rstrip()))
else:
check_errors = []
lines = f[1].splitlines()
for lnum, line in enumerate(lines):
if check['test'](line):
error = ' ' + _remove_disable(line, lstrip=True).strip()
if 'after' in check:
error += check['after'](line)
check_errors.append(error)
if len(check_errors) > 0:
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors)))
if file_errors:
results.append(self.output_api.PresubmitPromptWarning(
'%s:\n%s' % (f[0], '\n\n'.join(file_errors))))
return results
|
|
# Copyright (c) 2012 OpenStack Foundation.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo import messaging
from oslo.messaging import serializer as om_serializer
from neutron.common import exceptions
from neutron.common import log
from neutron import context
from neutron.openstack.common import log as logging
from neutron.openstack.common import service
LOG = logging.getLogger(__name__)
TRANSPORT = None
NOTIFIER = None
ALLOWED_EXMODS = [
exceptions.__name__,
]
EXTRA_EXMODS = []
TRANSPORT_ALIASES = {
'neutron.openstack.common.rpc.impl_fake': 'fake',
'neutron.openstack.common.rpc.impl_qpid': 'qpid',
'neutron.openstack.common.rpc.impl_kombu': 'rabbit',
'neutron.openstack.common.rpc.impl_zmq': 'zmq',
'neutron.rpc.impl_fake': 'fake',
'neutron.rpc.impl_qpid': 'qpid',
'neutron.rpc.impl_kombu': 'rabbit',
'neutron.rpc.impl_zmq': 'zmq',
}
def init(conf):
global TRANSPORT, NOTIFIER
exmods = get_allowed_exmods()
TRANSPORT = messaging.get_transport(conf,
allowed_remote_exmods=exmods,
aliases=TRANSPORT_ALIASES)
serializer = RequestContextSerializer()
NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
def cleanup():
global TRANSPORT, NOTIFIER
assert TRANSPORT is not None
assert NOTIFIER is not None
TRANSPORT.cleanup()
TRANSPORT = NOTIFIER = None
def add_extra_exmods(*args):
EXTRA_EXMODS.extend(args)
def clear_extra_exmods():
del EXTRA_EXMODS[:]
def get_allowed_exmods():
return ALLOWED_EXMODS + EXTRA_EXMODS
def get_client(target, version_cap=None, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return messaging.RPCClient(TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer)
def get_server(target, endpoints, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
return messaging.get_rpc_server(TRANSPORT, target, endpoints,
'eventlet', serializer)
def get_notifier(service=None, host=None, publisher_id=None):
assert NOTIFIER is not None
if not publisher_id:
publisher_id = "%s.%s" % (service, host or cfg.CONF.host)
return NOTIFIER.prepare(publisher_id=publisher_id)
class RequestContextSerializer(om_serializer.Serializer):
"""This serializer is used to convert RPC common context into
Neutron Context.
"""
def __init__(self, base=None):
super(RequestContextSerializer, self).__init__()
self._base = base
def serialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.serialize_entity(ctxt, entity)
def deserialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.deserialize_entity(ctxt, entity)
def serialize_context(self, ctxt):
return ctxt.to_dict()
def deserialize_context(self, ctxt):
rpc_ctxt_dict = ctxt.copy()
user_id = rpc_ctxt_dict.pop('user_id', None)
if not user_id:
user_id = rpc_ctxt_dict.pop('user', None)
tenant_id = rpc_ctxt_dict.pop('tenant_id', None)
if not tenant_id:
tenant_id = rpc_ctxt_dict.pop('project_id', None)
return context.Context(user_id, tenant_id,
load_admin_roles=False, **rpc_ctxt_dict)
class RpcProxy(object):
'''
This class is created to facilitate migration from oslo-incubator
RPC layer implementation to oslo.messaging and is intended to
emulate RpcProxy class behaviour using oslo.messaging API once the
migration is applied.
'''
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None):
super(RpcProxy, self).__init__()
self.topic = topic
target = messaging.Target(topic=topic, version=default_version)
self._client = get_client(target, version_cap=version_cap)
def make_msg(self, method, **kwargs):
return {'method': method,
'namespace': self.RPC_API_NAMESPACE,
'args': kwargs}
@log.log
def call(self, context, msg, **kwargs):
return self.__call_rpc_method(
context, msg, rpc_method='call', **kwargs)
@log.log
def cast(self, context, msg, **kwargs):
self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs)
@log.log
def fanout_cast(self, context, msg, **kwargs):
kwargs['fanout'] = True
self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs)
def __call_rpc_method(self, context, msg, **kwargs):
options = dict(
((opt, kwargs[opt])
for opt in ('fanout', 'timeout', 'topic', 'version')
if kwargs.get(opt))
)
if msg['namespace']:
options['namespace'] = msg['namespace']
if options:
callee = self._client.prepare(**options)
else:
callee = self._client
func = getattr(callee, kwargs['rpc_method'])
return func(context, msg['method'], **msg['args'])
class RpcCallback(object):
'''
This class is created to facilitate migration from oslo-incubator
RPC layer implementation to oslo.messaging and is intended to set
callback version using oslo.messaging API once the migration is
applied.
'''
RPC_API_VERSION = '1.0'
def __init__(self):
super(RpcCallback, self).__init__()
self.target = messaging.Target(version=self.RPC_API_VERSION)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
self.serializer = serializer
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = create_connection(new=True)
LOG.debug("Creating Consumer connection for Service %s" %
self.topic)
endpoints = [self.manager]
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, endpoints, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, endpoints, fanout=False)
self.conn.create_consumer(self.topic, endpoints, fanout=True)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
class Connection(object):
def __init__(self):
super(Connection, self).__init__()
self.servers = []
def create_consumer(self, topic, endpoints, fanout=False):
target = messaging.Target(
topic=topic, server=cfg.CONF.host, fanout=fanout)
server = get_server(target, endpoints)
self.servers.append(server)
def consume_in_threads(self):
for server in self.servers:
server.start()
return self.servers
# functions
def create_connection(new=True):
return Connection()
# exceptions
RPCException = messaging.MessagingException
RemoteError = messaging.RemoteError
MessagingTimeout = messaging.MessagingTimeout
|
|
"""
A variety of distribution optimizers using scipy.optimize's methods.
"""
from __future__ import division
from collections import namedtuple
from itertools import combinations
from debtcollector import removals
import numpy as np
from .maxentropy import marginal_constraints_generic
from .optimization import BaseOptimizer, BaseConvexOptimizer, BaseNonConvexOptimizer
from .optutil import prepare_dist
from .pid_broja import (extra_constraints as broja_extra_constraints,
prepare_dist as broja_prepare_dist)
from .. import Distribution, product_distribution
from ..helpers import RV_MODES
from ..multivariate import coinformation as I
from ..utils import flatten
__all__ = [
'maxent_dist',
'marginal_maxent_dists',
]
def infer_free_values(A, b):
"""
Infer the indices of fixed values in an optimization vector.
Parameters
----------
A : np.ndarray
The constraint matrix.
b : np.ndarray
The constraint values.
Returns
-------
fixed : list
The list of fixed indices.
"""
# find locations of b == 0, since pmf values are non-negative, this means they are identically zero.
free = [i for i, n in enumerate(A[b == 0, :].sum(axis=0)) if n == 0]
while True:
# now find rows of A with only a single free value in them. those values must also be fixed.
fixed = A[:, free].sum(axis=1) == 1
new_fixed = [[i for i, n in enumerate(row) if n and (i in free)][0] for i, row in enumerate(A) if fixed[i]]
free = list(sorted(set(free) - set(new_fixed)))
if not new_fixed:
break
return free
class BaseDistOptimizer(BaseOptimizer):
"""
Calculate an optimized distribution consistent with the given marginal constraints.
"""
construct_initial = BaseOptimizer.construct_uniform_initial
def __init__(self, dist, marginals, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution from which the corresponding optimal distribution
will be calculated.
marginals : list, None
The list of sets of variables whose marginals will be constrained to
match the given distribution.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
super(BaseDistOptimizer, self).__init__(dist, dist.rvs, crvs=[], rv_mode='indices')
# todo: actually make this class support crvs?
self._all_vars = self._rvs
self.dist = prepare_dist(dist)
self._vpmf = self.dist.pmf.copy()
self._A, self._b = marginal_constraints_generic(self.dist, marginals, rv_mode)
self._shape = list(map(len, self.dist.alphabet))
self._free = infer_free_values(self._A, self._b)
self.constraints = [{'type': 'eq',
'fun': self.constraint_match_marginals,
},
]
self._optvec_size = len(self._free)
self._default_hops = 50
self._additional_options = {'options': {'maxiter': 1000,
'ftol': 1e-7,
'eps': 1.4901161193847656e-08,
}
}
def optimize(self, x0=None, niter=None, maxiter=None, polish=1e-8, callback=False):
"""
Optimize this distribution w.r.t the objective.
Parameters
----------
x0 : np.ndarray
An initial optimization vector.
niter : int
The number of optimization iterations to perform.
maxiter : int
The number of steps for an optimization subroutine to perform.
polish : float
The threshold for valid optimization elements. If 0, no polishing is
performed.
callback : bool
Whether to use a callback to track the performance of the optimization.
Generally, this should be False as it adds some significant time to the
optimization.
Returns
-------
result : OptimizeResult, None
Return the optimization result, or None if no optimization was needed.
"""
if len(self._free) == 0:
self._optima = self._vpmf
else:
if x0 is not None and len(x0) == len(self._vpmf):
# if a full pmf vector was passed in, restrict it to the free
# indices:
x0 = x0[self._free]
result = super(BaseDistOptimizer, self).optimize(x0=x0,
niter=niter,
maxiter=maxiter,
polish=polish,
callback=callback)
return result
def construct_vector(self, x):
"""
Expand the `x` argument to the full pmf.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
vpmf : np.array
The full pmf as a vector.
"""
if self._free:
self._vpmf[self._free] = x
return self._vpmf
def construct_joint(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
pmf : np.ndarray
The joint distribution.
"""
vec = self.construct_vector(x)
pmf = vec.reshape(self._shape)
return pmf
def constraint_match_marginals(self, x):
"""
Ensure that the joint distribution represented by the optimization
vector matches that of the distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
d : float
The deviation from the constraint.
"""
pmf = self.construct_vector(x)
return sum((np.dot(self._A, pmf) - self._b)**2)
def construct_dist(self, x=None, cutoff=1e-6, sparse=True):
"""
Construct the optimal distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
cutoff : float
A probability cutoff. Any joint event with probability below
this will be set to zero.
sparse : bool
Whether to make the distribution sparse or not. Defaults to True.
Returns
-------
d : distribution
The optimized distribution.
"""
if x is None:
x = self._optima.copy()
pmf = self.construct_vector(x)
pmf[pmf < cutoff] = 0
pmf /= pmf.sum()
new_dist = self.dist.copy()
new_dist.pmf = pmf.ravel()
if sparse:
new_dist.make_sparse()
new_dist.set_rv_names(self.dist.get_rv_names())
return new_dist
class MaxEntOptimizer(BaseDistOptimizer, BaseConvexOptimizer):
"""
Compute maximum entropy distributions.
"""
def _objective(self):
"""
Compute the negative entropy.
Returns
-------
objective : func
The objective function.
"""
entropy = self._entropy(self._rvs)
def objective(self, x):
"""
Compute -H[rvs]
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
return -entropy(pmf)
return objective
class MinEntOptimizer(BaseDistOptimizer, BaseNonConvexOptimizer):
"""
Compute minimum entropy distributions.
"""
def _objective(self):
"""
Compute the entropy.
Returns
-------
objective : func
The objective function.
"""
entropy = self._entropy(self._rvs)
def objective(self, x):
"""
Compute H[rvs]
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
return entropy(pmf)
return objective
class MaxCoInfoOptimizer(BaseDistOptimizer, BaseNonConvexOptimizer):
"""
Compute maximum co-information distributions.
"""
def _objective(self):
"""
Compute the negative co-information.
Returns
-------
objective : func
The objective function.
"""
coinformation = self._coinformation(self._rvs)
def objective(self, x):
"""
Compute -I[rvs]
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
return -coinformation(pmf)
return objective
class MinCoInfoOptimizer(BaseDistOptimizer, BaseNonConvexOptimizer):
"""
Compute minimum co-information distributions.
"""
def _objective(self):
"""
Compute the co-information.
Returns
-------
objective : func
The objective function.
"""
coinformation = self._coinformation(self._rvs)
def objective(self, x):
"""
Compute I[rvs]
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
return coinformation(pmf)
return objective
class MaxDualTotalCorrelationOptimizer(BaseDistOptimizer, BaseNonConvexOptimizer):
"""
Compute maximum dual total correlation distributions.
"""
def _objective(self):
"""
Compute the negative dual total correlation.
Returns
-------
objective : func
The objective function.
"""
dual_total_correlation = self._dual_total_correlation(self._rvs)
def objective(self, x):
"""
Compute -B[rvs]
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
return -dual_total_correlation(pmf)
return objective
class MinDualTotalCorrelationOptimizer(BaseDistOptimizer, BaseNonConvexOptimizer):
"""
Compute minimum dual total correlation distributions.
"""
def _objective(self):
"""
Compute the dual total correlation.
Returns
-------
objective : func
The objective function.
"""
dual_total_correlation = self._dual_total_correlation(self._rvs)
def objective(self, x):
"""
Compute B[rvs]
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The value of the objective.
"""
pmf = self.construct_joint(x)
return dual_total_correlation(pmf)
return objective
class BROJABivariateOptimizer(MaxCoInfoOptimizer):
"""
An optimizer for constructing the maximum co-information distribution
consistent with (source, target) marginals of the given distribution.
Notes
-----
Though maximizing co-information is generically a non-convex optimization,
with the specific constraints involved in this calculation the problem is
convex.
"""
def __init__(self, dist, sources, target, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution from which the corresponding optimal distribution
will be calculated.
sources : list, len = 2
List of two source sets of variables.
target : list
The target variables.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
dist = broja_prepare_dist(dist, sources, target, rv_mode)
super(BROJABivariateOptimizer, self).__init__(dist, [[0, 2], [1, 2]])
extra_free = broja_extra_constraints(self.dist, 2).free
self._free = list(sorted(set(self._free) & set(extra_free)))
self._optvec_size = len(self._free)
def maxent_dist(dist, rvs, x0=None, maxiter=1000, sparse=True, rv_mode=None):
"""
Return the maximum entropy distribution consistent with the marginals from
`dist` specified in `rvs`.
Parameters
----------
dist : Distribution
The distributions whose marginals should be matched.
rvs : list of lists
The marginals from `dist` to constrain.
x0 : np.ndarray
Initial condition for the optimizer.
maxiter : int
The number of optimization iterations to perform.
sparse : bool
Whether the returned distribution should be sparse or dense.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
Returns
-------
me : Distribution
The maximum entropy distribution.
"""
meo = MaxEntOptimizer(dist, rvs, rv_mode)
meo.optimize(x0=x0, maxiter=maxiter)
dist = meo.construct_dist(sparse=sparse)
return dist
def marginal_maxent_dists(dist, k_max=None):
"""
Return the marginal-constrained maximum entropy distributions.
Parameters
----------
dist : distribution
The distribution used to constrain the maxent distributions.
k_max : int
The maximum order to calculate.
Returns
-------
dists : list
A list of distributions, where the `i`th element is the maxent
distribution with the i-size marginals fixed.
"""
dist = prepare_dist(dist)
n_variables = dist.outcome_length()
if k_max is None:
k_max = n_variables
outcomes = list(dist._sample_space)
# Optimization for the k=0 and k=1 cases are slow since you have to optimize
# the full space. We also know the answer in these cases.
# This is safe since the distribution must be dense.
k0 = Distribution(outcomes, [1]*len(outcomes), base='linear', validate=False)
k0.normalize()
k1 = product_distribution(dist)
dists = [k0, k1]
for k in range(k_max + 1):
if k in [0, 1, n_variables]:
continue
rv_mode = dist._rv_mode
if rv_mode in [RV_MODES.NAMES, 'names']:
vars = dist.get_rv_names()
rvs = list(combinations(vars, k))
else:
rvs = list(combinations(range(n_variables), k))
dists.append(maxent_dist(dist, rvs, rv_mode=rv_mode))
# To match the all-way marginal is to match itself. Again, this is a time
# savings decision, even though the optimization should be fast.
if k_max == n_variables:
dists.append(dist)
return dists
PID = namedtuple('PID', ['R', 'U0', 'U1', 'S'])
@removals.remove(message="Please see dit.pid.PID_BROJA.",
version='1.0.0.dev8')
def pid_broja(dist, sources, target, niter=10, return_opt=False, rv_mode=None):
"""
Compute the BROJA partial information decomposition.
Parameters
----------
dist : Distribution
The distribution to compute the partial information decomposition of.
sources : iterable
The source variables of the distribution.
target : iterable
The target variable of the distribution.
niter : int
The number of optimization steps to perform.
return_opt : bool
If True, return the distribution resulting from the optimization.
Defaults to False.
rv_mode : str, None
Specifies how to interpret `sources` and `target`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
Returns
-------
pid : PID namedtuple
The partial information decomposition.
opt_dist : Distribution
The distribution resulting from the optimization. Note that var [0]
is sources[0], [1] is sources[1] and [2] is target.
"""
broja = BROJABivariateOptimizer(dist, sources, target, rv_mode)
broja.optimize(niter=niter)
opt_dist = broja.construct_dist()
r = -broja.objective(broja._optima)
# in opt_dist, source[0] is [0], sources[1] is [1], and target is [2]
# see broja_prepare_dist() for details
u0 = I(opt_dist, [[0], [2]], [1])
u1 = I(opt_dist, [[1], [2]], [0])
# r = 0.0 if close(r, 0, rtol=1e-6, atol=1e-6) else r
# u0 = 0.0 if close(u0, 0, rtol=1e-6, atol=1e-6) else u0
# u1 = 0.0 if close(u1, 0, rtol=1e-6, atol=1e-6) else u1
s = I(dist, [list(flatten(sources)), target]) - r - u0 - u1
pid = PID(R=r, U0=u0, U1=u1, S=s)
if return_opt:
return pid, opt_dist
else:
return pid
|
|
#
# DEPRECATED: implementation for ffi.verify()
#
import sys, imp
from . import model
from .error import VerificationError
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
imp.acquire_lock()
try:
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
finally:
imp.release_lock()
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
lst = [(key, tp) for (key, (tp, qual)) in
self.ffi._parser._declarations.items()]
lst.sort()
return lst
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' if (datasize < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' %s = alloca((size_t)datasize);' % (tovar,))
self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,))
self._prnt(' if (_cffi_convert_array_from_object('
'(char *)%s, _cffi_type(%d), %s) < 0)' % (
tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructOrUnion):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
for type in tp.args:
self._extra_local_variables(type, localvars)
for decl in localvars:
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' return %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
else:
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
fname))
except VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize, fqual in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
#
if check_value is not None:
self._check_int_constant_value(name, check_value)
#
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
# ----------
# enums
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
# ----------
# macros: for now only for integers
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
# ----------
# global variables
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = (tp.length == '...'))
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length == '...':
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
# ----------
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
typedef unsigned char _Bool;
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import threading
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
"""Generates test data to train a model on.
Arguments:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
@test_util.use_deterministic_cudnn
def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, expected_output_shape=None,
validate_training=True, adapt_data=None):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Numpy array of the expected output.
expected_output_dtype: Data type expected for the output.
expected_output_shape: Shape tuple for the expected shape of the output.
validate_training: Whether to attempt to validate training on this layer.
This might be set to False for non-differentiable layers that output
string or integer values.
adapt_data: Optional data for an 'adapt' call. If None, adapt() will not
be tested for this layer. This is only relevant for PreprocessingLayers.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError('input_shape is None')
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError('When testing layer %s, for input %s, found output '
'dtype=%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
keras.backend.dtype(y),
expected_output_dtype,
kwargs))
def assert_shapes_equal(expected, actual):
"""Asserts that the output shape from the layer matches the actual shape."""
if len(expected) != len(actual):
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
for expected_dim, actual_dim in zip(expected, actual):
if isinstance(expected_dim, tensor_shape.Dimension):
expected_dim = expected_dim.value
if isinstance(actual_dim, tensor_shape.Dimension):
actual_dim = actual_dim.value
if expected_dim is not None and expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
if expected_output_shape is not None:
assert_shapes_equal(tensor_shape.TensorShape(expected_output_shape),
y.shape)
# check shape inference
model = keras.models.Model(x, y)
computed_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
computed_output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=input_shape, dtype=input_dtype))
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
assert_shapes_equal(computed_output_shape, actual_output_shape)
assert_shapes_equal(computed_output_signature.shape, actual_output_shape)
if computed_output_signature.dtype != actual_output.dtype:
raise AssertionError(
'When testing layer %s, for input %s, found output_dtype='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual_output.dtype,
computed_output_signature.dtype, kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# See b/120160788 for more details. This should be mitigated after 2.0.
if validate_training:
model = keras.models.Model(x, layer(x))
if _thread_local_data.run_eagerly is not None:
model.compile(
'rmsprop',
'mse',
weighted_metrics=['acc'],
run_eagerly=should_run_eagerly())
else:
model.compile('rmsprop', 'mse', weighted_metrics=['acc'])
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
model = keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(computed_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s **after deserialization**, '
'for input %s, found output_shape='
'%s but expected to find inferred shape %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
computed_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# for further checks in the caller function
return actual_output
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
_thread_local_data.experimental_run_tf_function = None
_thread_local_data.saved_model_format = None
@tf_contextlib.contextmanager
def model_type_scope(value):
"""Provides a scope within which the model type to test is equal to `value`.
The model type gets restored to its original value upon exiting the scope.
Arguments:
value: model type value
Yields:
The provided value.
"""
previous_value = _thread_local_data.model_type
try:
_thread_local_data.model_type = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.model_type = previous_value
@tf_contextlib.contextmanager
def run_eagerly_scope(value):
"""Provides a scope within which we compile models to run eagerly or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models eagerly in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_eagerly
try:
_thread_local_data.run_eagerly = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_eagerly = previous_value
def should_run_eagerly():
"""Returns whether the models we are testing should be run eagerly."""
if _thread_local_data.run_eagerly is None:
raise ValueError('Cannot call `should_run_eagerly()` outside of a '
'`run_eagerly_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_eagerly and context.executing_eagerly()
@tf_contextlib.contextmanager
def experimental_run_tf_function_scope(value):
"""Provides a scope within which we compile models to run with distribution.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models with default distribution
in the active test. Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.experimental_run_tf_function
try:
_thread_local_data.experimental_run_tf_function = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.experimental_run_tf_function = previous_value
def should_run_tf_function():
"""Returns whether the models we are testing should be run distributed."""
if _thread_local_data.experimental_run_tf_function is None:
raise ValueError(
'Cannot call `should_run_tf_function()` outside of a '
'`experimental_run_tf_function_scope()` or `run_all_keras_modes` '
'decorator.')
return (_thread_local_data.experimental_run_tf_function and
context.executing_eagerly())
@tf_contextlib.contextmanager
def saved_model_format_scope(value):
"""Provides a scope within which the savde model format to test is `value`.
The saved model format gets restored to its original value upon exiting the
scope.
Arguments:
value: saved model format value
Yields:
The provided value.
"""
previous_value = _thread_local_data.saved_model_format
try:
_thread_local_data.saved_model_format = value
yield value
finally:
# Restore saved model format to initial value.
_thread_local_data.saved_model_format = previous_value
def get_saved_model_format():
"""Gets the saved model format that should be tested."""
if _thread_local_data.saved_model_format is None:
raise ValueError(
'Cannot call `get_saved_model_format()` outside of a '
'`saved_model_format_scope()` or `run_with_all_saved_model_formats` '
'decorator.')
return _thread_local_data.saved_model_format
def get_save_format():
if _thread_local_data.saved_model_format is None:
raise ValueError(
'Cannot call `get_saved_model_format()` outside of a '
'`saved_model_format_scope()` or `run_with_all_saved_model_formats` '
'decorator.')
return _thread_local_data.saved_model_format
def get_model_type():
"""Gets the model type that should be tested."""
if _thread_local_data.model_type is None:
raise ValueError('Cannot call `get_model_type()` outside of a '
'`model_type_scope()` or `run_with_all_model_types` '
'decorator.')
return _thread_local_data.model_type
def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None):
model = keras.models.Sequential()
if input_dim:
model.add(keras.layers.Dense(num_hidden, activation='relu',
input_dim=input_dim))
else:
model.add(keras.layers.Dense(num_hidden, activation='relu'))
activation = 'sigmoid' if num_classes == 1 else 'softmax'
model.add(keras.layers.Dense(num_classes, activation=activation))
return model
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
inputs = keras.Input(shape=(input_dim,))
outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs)
activation = 'sigmoid' if num_classes == 1 else 'softmax'
outputs = keras.layers.Dense(num_classes, activation=activation)(outputs)
return keras.Model(inputs, outputs)
class _SmallSubclassMLP(keras.Model):
"""A subclass model based small MLP."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLP, self).__init__()
self.layer_a = keras.layers.Dense(num_hidden, activation='relu')
activation = 'sigmoid' if num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
class _SmallSubclassMLPCustomBuild(keras.Model):
"""A subclass model small MLP that uses a custom build method."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLPCustomBuild, self).__init__()
self.layer_a = None
self.layer_b = None
self.num_hidden = num_hidden
self.num_classes = num_classes
def build(self, input_shape):
self.layer_a = keras.layers.Dense(self.num_hidden, activation='relu')
activation = 'sigmoid' if self.num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(self.num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
def get_small_subclass_mlp(num_hidden, num_classes):
return _SmallSubclassMLP(num_hidden, num_classes)
def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes):
return _SmallSubclassMLPCustomBuild(num_hidden, num_classes)
def get_small_mlp(num_hidden, num_classes, input_dim):
"""Get a small mlp of the model type specified by `get_model_type`."""
model_type = get_model_type()
if model_type == 'subclass':
return get_small_subclass_mlp(num_hidden, num_classes)
if model_type == 'subclass_custom_build':
return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes)
if model_type == 'sequential':
return get_small_sequential_mlp(num_hidden, num_classes, input_dim)
if model_type == 'functional':
return get_small_functional_mlp(num_hidden, num_classes, input_dim)
raise ValueError('Unknown model type {}'.format(model_type))
class _SubclassModel(keras.Model):
"""A Keras subclass model."""
def __init__(self, layers, *args, **kwargs):
"""Instantiate a model.
Args:
layers: a list of layers to be added to the model.
*args: Model's args
**kwargs: Model's keyword args, at most one of
input_tensor -> the input tensor required for ragged/sparse input.
"""
inputs = kwargs.pop('input_tensor', None)
super(_SubclassModel, self).__init__(*args, **kwargs)
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(layers)
if inputs is not None:
self._set_inputs(inputs)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
class _SubclassModelCustomBuild(keras.Model):
"""A Keras subclass model that uses a custom build method."""
def __init__(self, layer_generating_func, *args, **kwargs):
super(_SubclassModelCustomBuild, self).__init__(*args, **kwargs)
self.all_layers = None
self._layer_generating_func = layer_generating_func
def build(self, input_shape):
layers = []
for layer in self._layer_generating_func():
layers.append(layer)
self.all_layers = layers
def call(self, inputs, **kwargs):
x = inputs
for layer in self.all_layers:
x = layer(x)
return x
def get_model_from_layers(layers,
input_shape=None,
input_dtype=None,
name=None,
input_ragged=None,
input_sparse=None):
"""Builds a model from a sequence of layers.
Args:
layers: The layers used to build the network.
input_shape: Shape tuple of the input or 'TensorShape' instance.
input_dtype: Datatype of the input.
name: Name for the model.
input_ragged: Boolean, whether the input data is a ragged tensor.
input_sparse: Boolean, whether the input data is a sparse tensor.
Returns:
A Keras model.
"""
model_type = get_model_type()
if model_type == 'subclass':
inputs = None
if input_ragged or input_sparse:
inputs = keras.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
return _SubclassModel(layers, name=name, input_tensor=inputs)
if model_type == 'subclass_custom_build':
layer_generating_func = lambda: layers
return _SubclassModelCustomBuild(layer_generating_func, name=name)
if model_type == 'sequential':
model = keras.models.Sequential(name=name)
if input_shape:
model.add(
keras.layers.InputLayer(
input_shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse))
for layer in layers:
model.add(layer)
return model
if model_type == 'functional':
if not input_shape:
raise ValueError('Cannot create a functional model from layers with no '
'input shape.')
inputs = keras.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
outputs = inputs
for layer in layers:
outputs = layer(outputs)
return keras.Model(inputs, outputs, name=name)
raise ValueError('Unknown model type {}'.format(model_type))
class _MultiIOSubclassModel(keras.Model):
"""Multi IO Keras subclass model."""
def __init__(self, branch_a, branch_b, shared_input_branch=None,
shared_output_branch=None):
super(_MultiIOSubclassModel, self).__init__()
self._shared_input_branch = shared_input_branch
self._branch_a = branch_a
self._branch_b = branch_b
self._shared_output_branch = shared_output_branch
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = [a, b]
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
class _MultiIOSubclassModelCustomBuild(keras.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
def enable_v2_dtype_behavior(fn):
"""Decorator for enabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, True)
def disable_v2_dtype_behavior(fn):
"""Decorator for disabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, False)
def _set_v2_dtype_behavior(fn, enabled):
"""Returns version of 'fn' that runs with v2 dtype behavior on or off."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR
base_layer_utils.V2_DTYPE_BEHAVIOR = enabled
try:
return fn(*args, **kwargs)
finally:
base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior
return tf_decorator.make_decorator(fn, wrapper)
|
|
import os
import json
import time
import uuid
import signal
import logging
from threading import Thread
from addict import Dict
from six.moves.http_client import HTTPConnection
from .process import Process
from .interface import ExecutorDriver
from .utils import DAY, parse_duration, encode_data, decode_data
logger = logging.getLogger(__name__)
class MesosExecutorDriver(Process, ExecutorDriver):
def __init__(self, executor, use_addict=False, timeout=DAY,
http_timeout=10):
env = os.environ
agent_endpoint = env['MESOS_AGENT_ENDPOINT']
super(MesosExecutorDriver, self).__init__(master=agent_endpoint,
timeout=timeout)
framework_id = env['MESOS_FRAMEWORK_ID']
assert framework_id
self.framework_id = dict(value=framework_id)
executor_id = env['MESOS_EXECUTOR_ID']
self.executor_id = dict(value=executor_id)
grace_shutdown_period = env.get('MESOS_EXECUTOR_SHUTDOWN_GRACE_PERIOD')
if grace_shutdown_period:
self.grace_shutdown_period = parse_duration(grace_shutdown_period)
else:
self.grace_shutdown_period = 0.0
self.checkpoint = bool(env.get('MESOS_CHECKPOINT'))
self.local = bool(env.get('MESOS_LOCAL'))
self.executor = executor
self.framework_info = None
self.executor_info = None
self.tasks = {}
self.updates = {}
self._conn = None
self._dict_cls = Dict if use_addict else dict
self._http_timeout = http_timeout
def _delay_kill(self):
def _():
try:
time.sleep(self.grace_shutdown_period)
os.killpg(0, signal.SIGKILL)
except Exception:
logger.exception('Failed to force kill executor')
t = Thread(target=_)
t.daemon = True
t.start()
def gen_request(self):
body = json.dumps(dict(
type='SUBSCRIBE',
framework_id=self.framework_id,
executor_id=self.executor_id,
subscribe=dict(
unacknowledged_tasks=list(self.tasks.values()),
unacknowledged_updates=list(self.updates.values()),
),
))
request = ('POST /api/v1/executor HTTP/1.1\r\nHost: %s\r\n'
'Content-Type: application/json\r\n'
'Accept: application/json\r\n'
'Connection: close\r\nContent-Length: %s\r\n\r\n%s') % (
self.master, len(body), body
)
return request.encode('utf-8')
def start(self):
super(MesosExecutorDriver, self).start()
self._notify()
def on_close(self):
if self._conn is not None:
self._conn.close()
self._conn = None
self.version = None
self.executor.disconnected(self)
if not self.checkpoint:
if not self.local:
self._delay_kill()
self.executor.shutdown(self)
self.abort()
def on_event(self, event):
if 'type' in event:
_type = event['type'].lower()
if _type == 'shutdown':
self.on_shutdown()
return
if _type == 'heartbeat':
return
if _type not in event:
logger.error(
'Missing `%s` in event %s' %
(_type, event))
return
event = event[_type]
func_name = 'on_%s' % (_type,)
func = getattr(self, func_name, None)
if func is not None:
func(event)
else:
logger.error('Unknown type:%s, event:%s' % (_type, event))
else:
logger.error('Unknown event:%s' % (event,))
def on_subscribed(self, info):
executor_info = info['executor_info']
framework_info = info['framework_info']
agent_info = info['agent_info']
assert executor_info['executor_id'] == self.executor_id
assert framework_info['id'] == self.framework_id
if self.executor_info is None or self.framework_info is None:
self.executor_info = executor_info
self.framework_info = framework_info
self.executor.registered(
self, self._dict_cls(executor_info),
self._dict_cls(framework_info), self._dict_cls(agent_info)
)
else:
self.executor.reregistered(self, self._dict_cls(agent_info))
def on_launch(self, event):
task_info = event['task']
task_id = task_info['task_id']['value']
assert task_id not in self.tasks
self.tasks[task_id] = task_info
self.executor.launchTask(self, self._dict_cls(task_info))
def on_launch_group(self, event):
task_group = event['task_group']
task_infos = []
for task_info in task_group['tasks']:
task_id = task_info['task_id']['value']
assert task_id not in self.tasks
self.tasks[task_id] = task_info
task_infos.append(self._dict_cls(task_info))
self.executor.launchTaskGroup(self, task_infos)
def on_kill(self, event):
task_id = event['task_id']
self.executor.killTask(self, self._dict_cls(task_id))
def on_acknowledged(self, event):
task_id = event['task_id']['value']
uuid_ = uuid.UUID(bytes=decode_data(event['uuid']))
self.updates.pop(uuid_, None)
self.tasks.pop(task_id, None)
def on_message(self, event):
data = event['data']
self.executor.frameworkMessage(self, data)
def on_error(self, event):
message = event['message']
self.executor.error(self, message)
def on_shutdown(self):
if not self.local:
self._delay_kill()
self.executor.shutdown(self)
self.abort()
def _get_conn(self):
if not self.connected:
return None
if self._conn is not None:
return self._conn
host, port = self.master.split(':', 2)
port = int(port)
self._conn = HTTPConnection(host, port, timeout=self._http_timeout)
return self._conn
def _send(self, body, path='/api/v1/executor', method='POST', headers={}):
with self._lock:
conn = self._get_conn()
if conn is None:
raise RuntimeError('Not connected yet')
if body != '':
data = json.dumps(body).encode('utf-8')
headers['Content-Type'] = 'application/json'
else:
data = ''
stream_id = self.stream_id
if stream_id:
headers['Mesos-Stream-Id'] = stream_id
try:
conn.request(method, path, body=data, headers=headers)
resp = conn.getresponse()
except Exception:
self._conn.close()
self._conn = None
raise
if resp.status < 200 or resp.status >= 300:
raise RuntimeError(
'Failed to send request code=%s, message=%s' % (
resp.status, resp.read()
)
)
result = resp.read()
if not result:
return {}
try:
return json.loads(result.decode('utf-8'))
except Exception:
return {}
def sendStatusUpdate(self, status):
if 'timestamp' not in status:
status['timestamp'] = int(time.time())
if 'uuid' not in status:
status['uuid'] = encode_data(uuid.uuid4().bytes)
if 'source' not in status:
status['source'] = 'SOURCE_EXECUTOR'
body = dict(
type='UPDATE',
executor_id=self.executor_id,
framework_id=self.framework_id,
update=dict(
status=status,
),
)
self._send(body)
def sendFrameworkMessage(self, data):
body = dict(
type='MESSAGE',
executor_id=self.executor_id,
framework_id=self.framework_id,
message=dict(
data=data,
),
)
self._send(body)
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extensions supporting OAuth1."""
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone.contrib.oauth1 import core as oauth1
from keystone.contrib.oauth1 import validator
from keystone import exception
from keystone.i18n import _
from keystone.models import token_model
from keystone import notifications
CONF = cfg.CONF
@notifications.internal(notifications.INVALIDATE_USER_OAUTH_CONSUMER_TOKENS,
resource_id_arg_index=0)
def _emit_user_oauth_consumer_token_invalidate(payload):
# This is a special case notification that expect the payload to be a dict
# containing the user_id and the consumer_id. This is so that the token
# provider can invalidate any tokens in the token persistence if
# token persistence is enabled
pass
@dependency.requires('oauth_api', 'token_provider_api')
class ConsumerCrudV3(controller.V3Controller):
collection_name = 'consumers'
member_name = 'consumer'
@classmethod
def base_url(cls, context, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
# NOTE(stevemar): Overriding path to /OS-OAUTH1/consumers so that
# V3Controller.base_url handles setting the self link correctly.
path = '/OS-OAUTH1/' + cls.collection_name
return controller.V3Controller.base_url(context, path=path)
@controller.protected()
def create_consumer(self, context, consumer):
ref = self._assign_unique_id(self._normalize_dict(consumer))
consumer_ref = self.oauth_api.create_consumer(ref)
return ConsumerCrudV3.wrap_member(context, consumer_ref)
@controller.protected()
def update_consumer(self, context, consumer_id, consumer):
self._require_matching_id(consumer_id, consumer)
ref = self._normalize_dict(consumer)
self._validate_consumer_ref(ref)
ref = self.oauth_api.update_consumer(consumer_id, ref)
return ConsumerCrudV3.wrap_member(context, ref)
@controller.protected()
def list_consumers(self, context):
ref = self.oauth_api.list_consumers()
return ConsumerCrudV3.wrap_collection(context, ref)
@controller.protected()
def get_consumer(self, context, consumer_id):
ref = self.oauth_api.get_consumer(consumer_id)
return ConsumerCrudV3.wrap_member(context, ref)
@controller.protected()
def delete_consumer(self, context, consumer_id):
user_token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
payload = {'user_id': user_token_ref.user_id,
'consumer_id': consumer_id}
_emit_user_oauth_consumer_token_invalidate(payload)
self.oauth_api.delete_consumer(consumer_id)
def _validate_consumer_ref(self, consumer):
if 'secret' in consumer:
msg = _('Cannot change consumer secret')
raise exception.ValidationError(message=msg)
@dependency.requires('oauth_api')
class AccessTokenCrudV3(controller.V3Controller):
collection_name = 'access_tokens'
member_name = 'access_token'
@classmethod
def _add_self_referential_link(cls, context, ref):
# NOTE(lwolf): overriding method to add proper path to self link
ref.setdefault('links', {})
path = '/users/%(user_id)s/OS-OAUTH1/access_tokens' % {
'user_id': cls._get_user_id(ref)
}
ref['links']['self'] = cls.base_url(context, path) + '/' + ref['id']
@controller.protected()
def get_access_token(self, context, user_id, access_token_id):
access_token = self.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise exception.NotFound()
access_token = self._format_token_entity(context, access_token)
return AccessTokenCrudV3.wrap_member(context, access_token)
@controller.protected()
def list_access_tokens(self, context, user_id):
auth_context = context.get('environment',
{}).get('KEYSTONE_AUTH_CONTEXT', {})
if auth_context.get('is_delegated_auth'):
raise exception.Forbidden(
_('Cannot list request tokens'
' with a token issued via delegation.'))
refs = self.oauth_api.list_access_tokens(user_id)
formatted_refs = ([self._format_token_entity(context, x)
for x in refs])
return AccessTokenCrudV3.wrap_collection(context, formatted_refs)
@controller.protected()
def delete_access_token(self, context, user_id, access_token_id):
access_token = self.oauth_api.get_access_token(access_token_id)
consumer_id = access_token['consumer_id']
payload = {'user_id': user_id, 'consumer_id': consumer_id}
_emit_user_oauth_consumer_token_invalidate(payload)
return self.oauth_api.delete_access_token(
user_id, access_token_id)
@staticmethod
def _get_user_id(entity):
return entity.get('authorizing_user_id', '')
def _format_token_entity(self, context, entity):
formatted_entity = entity.copy()
access_token_id = formatted_entity['id']
user_id = self._get_user_id(formatted_entity)
if 'role_ids' in entity:
formatted_entity.pop('role_ids')
if 'access_secret' in entity:
formatted_entity.pop('access_secret')
url = ('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(access_token_id)s'
'/roles' % {'user_id': user_id,
'access_token_id': access_token_id})
formatted_entity.setdefault('links', {})
formatted_entity['links']['roles'] = (self.base_url(context, url))
return formatted_entity
@dependency.requires('oauth_api', 'role_api')
class AccessTokenRolesV3(controller.V3Controller):
collection_name = 'roles'
member_name = 'role'
@controller.protected()
def list_access_token_roles(self, context, user_id, access_token_id):
access_token = self.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise exception.NotFound()
authed_role_ids = access_token['role_ids']
authed_role_ids = jsonutils.loads(authed_role_ids)
refs = ([self._format_role_entity(x) for x in authed_role_ids])
return AccessTokenRolesV3.wrap_collection(context, refs)
@controller.protected()
def get_access_token_role(self, context, user_id,
access_token_id, role_id):
access_token = self.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise exception.Unauthorized(_('User IDs do not match'))
authed_role_ids = access_token['role_ids']
authed_role_ids = jsonutils.loads(authed_role_ids)
for authed_role_id in authed_role_ids:
if authed_role_id == role_id:
role = self._format_role_entity(role_id)
return AccessTokenRolesV3.wrap_member(context, role)
raise exception.RoleNotFound(_('Could not find role'))
def _format_role_entity(self, role_id):
role = self.role_api.get_role(role_id)
formatted_entity = role.copy()
if 'description' in role:
formatted_entity.pop('description')
if 'enabled' in role:
formatted_entity.pop('enabled')
return formatted_entity
@dependency.requires('assignment_api', 'oauth_api', 'token_provider_api')
class OAuthControllerV3(controller.V3Controller):
collection_name = 'not_used'
member_name = 'not_used'
def create_request_token(self, context):
headers = context['headers']
oauth_headers = oauth1.get_oauth_headers(headers)
consumer_id = oauth_headers.get('oauth_consumer_key')
requested_project_id = headers.get('Requested-Project-Id')
if not consumer_id:
raise exception.ValidationError(
attribute='oauth_consumer_key', target='request')
if not requested_project_id:
raise exception.ValidationError(
attribute='requested_project_id', target='request')
url = self.base_url(context, context['path'])
req_headers = {'Requested-Project-Id': requested_project_id}
req_headers.update(headers)
request_verifier = oauth1.RequestTokenEndpoint(
request_validator=validator.OAuthValidator(),
token_generator=oauth1.token_generator)
h, b, s = request_verifier.create_request_token_response(
url,
http_method='POST',
body=context['query_string'],
headers=req_headers)
if (not b) or int(s) > 399:
msg = _('Invalid signature')
raise exception.Unauthorized(message=msg)
request_token_duration = CONF.oauth1.request_token_duration
token_ref = self.oauth_api.create_request_token(consumer_id,
requested_project_id,
request_token_duration)
result = ('oauth_token=%(key)s&oauth_token_secret=%(secret)s'
% {'key': token_ref['id'],
'secret': token_ref['request_secret']})
if CONF.oauth1.request_token_duration:
expiry_bit = '&oauth_expires_at=%s' % token_ref['expires_at']
result += expiry_bit
headers = [('Content-Type', 'application/x-www-urlformencoded')]
response = wsgi.render_response(result,
status=(201, 'Created'),
headers=headers)
return response
def create_access_token(self, context):
headers = context['headers']
oauth_headers = oauth1.get_oauth_headers(headers)
consumer_id = oauth_headers.get('oauth_consumer_key')
request_token_id = oauth_headers.get('oauth_token')
oauth_verifier = oauth_headers.get('oauth_verifier')
if not consumer_id:
raise exception.ValidationError(
attribute='oauth_consumer_key', target='request')
if not request_token_id:
raise exception.ValidationError(
attribute='oauth_token', target='request')
if not oauth_verifier:
raise exception.ValidationError(
attribute='oauth_verifier', target='request')
req_token = self.oauth_api.get_request_token(
request_token_id)
expires_at = req_token['expires_at']
if expires_at:
now = timeutils.utcnow()
expires = timeutils.normalize_time(
timeutils.parse_isotime(expires_at))
if now > expires:
raise exception.Unauthorized(_('Request token is expired'))
url = self.base_url(context, context['path'])
access_verifier = oauth1.AccessTokenEndpoint(
request_validator=validator.OAuthValidator(),
token_generator=oauth1.token_generator)
h, b, s = access_verifier.create_access_token_response(
url,
http_method='POST',
body=context['query_string'],
headers=headers)
params = oauth1.extract_non_oauth_params(b)
if len(params) != 0:
msg = _('There should not be any non-oauth parameters')
raise exception.Unauthorized(message=msg)
if req_token['consumer_id'] != consumer_id:
msg = _('provided consumer key does not match stored consumer key')
raise exception.Unauthorized(message=msg)
if req_token['verifier'] != oauth_verifier:
msg = _('provided verifier does not match stored verifier')
raise exception.Unauthorized(message=msg)
if req_token['id'] != request_token_id:
msg = _('provided request key does not match stored request key')
raise exception.Unauthorized(message=msg)
if not req_token.get('authorizing_user_id'):
msg = _('Request Token does not have an authorizing user id')
raise exception.Unauthorized(message=msg)
access_token_duration = CONF.oauth1.access_token_duration
token_ref = self.oauth_api.create_access_token(request_token_id,
access_token_duration)
result = ('oauth_token=%(key)s&oauth_token_secret=%(secret)s'
% {'key': token_ref['id'],
'secret': token_ref['access_secret']})
if CONF.oauth1.access_token_duration:
expiry_bit = '&oauth_expires_at=%s' % (token_ref['expires_at'])
result += expiry_bit
headers = [('Content-Type', 'application/x-www-urlformencoded')]
response = wsgi.render_response(result,
status=(201, 'Created'),
headers=headers)
return response
@controller.protected()
def authorize_request_token(self, context, request_token_id, roles):
"""An authenticated user is going to authorize a request token.
As a security precaution, the requested roles must match those in
the request token. Because this is in a CLI-only world at the moment,
there is not another easy way to make sure the user knows which roles
are being requested before authorizing.
"""
auth_context = context.get('environment',
{}).get('KEYSTONE_AUTH_CONTEXT', {})
if auth_context.get('is_delegated_auth'):
raise exception.Forbidden(
_('Cannot authorize a request token'
' with a token issued via delegation.'))
req_token = self.oauth_api.get_request_token(request_token_id)
expires_at = req_token['expires_at']
if expires_at:
now = timeutils.utcnow()
expires = timeutils.normalize_time(
timeutils.parse_isotime(expires_at))
if now > expires:
raise exception.Unauthorized(_('Request token is expired'))
# put the roles in a set for easy comparison
authed_roles = set()
for role in roles:
authed_roles.add(role['id'])
# verify the authorizing user has the roles
user_token = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
user_id = user_token.user_id
project_id = req_token['requested_project_id']
user_roles = self.assignment_api.get_roles_for_user_and_project(
user_id, project_id)
cred_set = set(user_roles)
if not cred_set.issuperset(authed_roles):
msg = _('authorizing user does not have role required')
raise exception.Unauthorized(message=msg)
# create list of just the id's for the backend
role_list = list(authed_roles)
# verify the user has the project too
req_project_id = req_token['requested_project_id']
user_projects = self.assignment_api.list_projects_for_user(user_id)
for user_project in user_projects:
if user_project['id'] == req_project_id:
break
else:
msg = _("User is not a member of the requested project")
raise exception.Unauthorized(message=msg)
# finally authorize the token
authed_token = self.oauth_api.authorize_request_token(
request_token_id, user_id, role_list)
to_return = {'token': {'oauth_verifier': authed_token['verifier']}}
return to_return
|
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Administration module forms
"""
from django.db import models
from django import forms
from django.forms import ModelChoiceField
from treeio.core.conf import settings
from django.db.models import Q
from django.core.files.storage import default_storage
import django.contrib.auth.models as django_auth
from django.utils.translation import ugettext as _
from treeio.core.decorators import preprocess_form
from treeio.core.models import User, Group, Perspective, ModuleSetting, Page, PageFolder, user_autocreate_handler
import hashlib
import random
import re
preprocess_form()
PERMISSION_CHOICES = (
('everyone', 'Everyone'),
('usergroup', 'Automatic, User and Default Group'),
('usergroupreadonly', 'Automatic, User and Default Group. READ ONLY'),
('userallgroups', 'Automatic, User and All Their Groups'),
('userallgroupsreadonly',
'Automatic, User and All Their Groups. READ ONLY'),
('user', 'Automatic, User Only'),
('userreadonly', 'Automatic, User Only. READ ONLY'),
('nomoduleusergroup', 'Automatic, Skip Module, User and Default Group'),
('nomoduleusergroupreadonly',
'Automatic, Skip Module, User and Default Group. READ ONLY'),
('nomoduleuserallgroups',
'Automatic, Skip Module, User and All Their Groups'),
('nomoduleuserallgroupsreadonly',
'Automatic, Skip Module, User and All Their Groups. READ ONLY'),
('nomoduleuser', 'Automatic, Skip Module, User Only'),
('nomoduleuserreadonly', 'Automatic, Skip Module, User Only. READ ONLY'),
('forceusergroup', 'Force User and Default Group'),
('forceuserallgroups', 'Force User and All Their Groups'),
('forceuser', 'Force User Only'),
)
class SettingsForm(forms.Form):
""" Global settings form """
default_perspective = forms.ModelChoiceField(
label='Default Perspective', queryset=[])
default_permissions = forms.ChoiceField(label='Default Permissions',
choices=PERMISSION_CHOICES)
language = forms.ChoiceField(label='Language', choices=[])
default_timezone = forms.ChoiceField(label='Time Zone', choices=[])
logo = forms.ImageField(
label='Logo', required=False, widget=forms.FileInput)
def __init__(self, user, *args, **kwargs):
"Sets choices and initial value"
super(SettingsForm, self).__init__(*args, **kwargs)
self.fields['default_perspective'].queryset = Perspective.objects.all()
try:
conf = ModuleSetting.get_for_module(
'treeio.core', 'default_perspective')[0]
default_perspective = Perspective.objects.get(pk=long(conf.value))
self.fields['default_perspective'].initial = default_perspective.id
except:
pass
try:
conf = ModuleSetting.get_for_module(
'treeio.core', 'default_permissions')[0]
self.fields['default_permissions'].initial = conf.value
except:
self.fields['default_permissions'].initial = getattr(
settings, 'HARDTREE_DEFAULT_PERMISSIONS', 'everyone')
self.fields['default_timezone'].choices = getattr(
settings, 'HARDTREE_SERVER_TIMEZONE')
timezone = settings.HARDTREE_SERVER_DEFAULT_TIMEZONE
try:
conf = ModuleSetting.get_for_module(
'treeio.core', 'default_timezone')[0]
timezone = conf.value
except Exception:
pass
self.fields['default_timezone'].initial = timezone
self.fields['language'].choices = getattr(
settings, 'HARDTREE_LANGUAGES', [('en', 'English')])
language = getattr(settings, 'HARDTREE_LANGUAGES_DEFAULT', '')
try:
conf = ModuleSetting.get_for_module('treeio.core', 'language')[0]
language = conf.value
except IndexError:
pass
self.fields['language'].initial = language
if getattr(settings, 'HARDTREE_SUBSCRIPTION_CUSTOMIZATION', True):
logopath = ''
try:
conf = ModuleSetting.get_for_module(
'treeio.core', 'logopath')[0]
logopath = conf.value
except:
pass
if logopath:
match = re.match('.*[a-z0-9]{32}__(?P<filename>.+)$', logopath)
if match:
logopath = match.group('filename')
form_field = forms.ChoiceField(
label=_("Logo"), widget=forms.RadioSelect())
form_field.choices = ((logopath, _("Keep existing: ") + unicode(logopath)),
('delete', "Delete "))
form_field.initial = logopath
form_field.required = False
self.fields['logo'] = form_field
self.fields['logo'].label = _("Logo")
else:
del self.fields['logo']
self.fields['default_perspective'].label = _("Default Perspective")
self.fields['default_permissions'].label = _("Default Permissions")
self.fields['default_timezone'].label = _("Time Zone")
self.fields['language'].label = _("Language")
def _get_upload_name(self, filename):
"Returns an upload_to path to a new file"
while True:
hasher = hashlib.md5()
hasher.update(str(random.random()))
filepath = u"core/" + hasher.hexdigest() + u"__" + filename
fullpath = settings.MEDIA_ROOT + filepath
if not default_storage.exists(fullpath):
return filepath
def _handle_uploaded_file(self, field_name):
"Process an uploaded file"
try:
file = self.files[field_name]
filepath = self._get_upload_name(file.name)
except KeyError:
return ''
destination = open(settings.MEDIA_ROOT + filepath, 'wb+')
for chunk in file.chunks():
destination.write(chunk)
destination.close()
return filepath
def save(self):
"Form processor"
try:
ModuleSetting.set_for_module('default_perspective',
self.cleaned_data[
'default_perspective'].id,
'treeio.core')
ModuleSetting.set_for_module('default_permissions',
self.cleaned_data[
'default_permissions'],
'treeio.core')
ModuleSetting.set_for_module('default_timezone',
self.cleaned_data['default_timezone'],
'treeio.core')
ModuleSetting.set_for_module('language',
self.cleaned_data['language'],
'treeio.core')
if getattr(settings, 'HARDTREE_SUBSCRIPTION_CUSTOMIZATION', True):
if isinstance(self.fields['logo'], forms.FileField):
logopath = self._handle_uploaded_file('logo')
ModuleSetting.set_for_module(
'logopath', logopath, 'treeio.core')
elif isinstance(self.fields['logo'], forms.ChoiceField):
if self.cleaned_data['logo'] == 'delete':
try:
ModuleSetting.get_for_module(
'treeio.core', 'logopath').delete()
except:
pass
return True
except:
return False
class PerspectiveForm(forms.ModelForm):
""" Perspective form """
name = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}))
def __init__(self, user, *args, **kwargs):
super(PerspectiveForm, self).__init__(*args, **kwargs)
self.fields['modules'].help_text = ""
self.fields['name'].label = _("Name")
self.fields['modules'].label = _("Modules")
self.fields['details'].label = _("Details")
class Meta:
"Perspective Form"
model = Perspective
fields = ('name', 'modules', 'details')
class UserForm(forms.ModelForm):
""" User form """
perspective = ModelChoiceField(
label='Perspective', queryset=[], required=False)
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
self.instance = kwargs['instance']
else:
self.fields['password'] = forms.CharField(max_length=255, label=_("Password"),
widget=forms.PasswordInput(render_value=False))
self.fields['password_again'] = forms.CharField(max_length=255, label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False))
self.fields['name'].label = _("Username")
self.fields['name'].help_text = _("Used to log in")
self.fields['default_group'].label = _("Default group")
self.fields['other_groups'].label = _("Other groups")
self.fields['other_groups'].help_text = ""
self.fields['perspective'].label = _("Perspective")
self.fields['perspective'].queryset = Perspective.objects.all()
if self.instance:
try:
self.fields[
'perspective'].initial = self.instance.get_perspective()
except:
pass
def clean_name(self):
"Clean Name"
data = self.cleaned_data['name']
query = Q(name=data)
if self.instance and self.instance.id:
query = query & ~Q(id=self.instance.id)
existing = User.objects.filter(query)
if existing:
raise forms.ValidationError(
_("User with username %s already exists.") % data)
if self.instance and not self.instance.id:
# Check Hardtree Subscription user limit
user_limit = getattr(
settings, 'HARDTREE_SUBSCRIPTION_USER_LIMIT', 0)
if user_limit > 0:
user_number = User.objects.filter(disabled=False).count()
if user_number >= user_limit:
raise forms.ValidationError(
_("Sorry, but your subscription does not allow more than %d users. You're currently at your limit.") % (user_limit))
return data
def clean_password_again(self):
"Clean password again"
password1 = self.cleaned_data['password']
password2 = self.cleaned_data['password_again']
if not password1 == password2:
raise forms.ValidationError(_("Passwords do not match"))
return password2
def clean_disabled(self):
"Ensure the admin does not go over subscription limit by re-enabling users"
enable = not self.cleaned_data['disabled']
if self.instance and self.instance.id and enable and self.instance.disabled:
user_limit = getattr(
settings, 'HARDTREE_SUBSCRIPTION_USER_LIMIT', 0)
if user_limit > 0:
user_number = User.objects.filter(disabled=False).count()
if user_number >= user_limit:
raise forms.ValidationError(
_("Sorry, but your subscription does not allow more than %d users. You're currently at your limit.") % (user_limit))
return self.cleaned_data['disabled']
def save(self, *args, **kwargs):
"Form processor"
if self.instance.id:
self.instance.user.username = self.instance.name
self.instance.user.save()
super(UserForm, self).save(*args, **kwargs)
else:
new_user = django_auth.User(
username=self.cleaned_data['name'], password='')
new_user.set_password(self.cleaned_data['password'])
models.signals.post_save.disconnect(user_autocreate_handler, sender=django_auth.User)
new_user.save()
if getattr(settings, 'HARDTREE_SIGNALS_AUTOCREATE_USER', False):
models.signals.post_save.connect(user_autocreate_handler, sender=django_auth.User)
self.instance.user = new_user
super(UserForm, self).save(*args, **kwargs)
if self.cleaned_data['perspective']:
self.instance.set_perspective(self.cleaned_data['perspective'])
return self.instance
class Meta:
"User Form"
model = User
fields = ('name', 'default_group', 'other_groups', 'disabled')
class PasswordForm(forms.Form):
""" Password form """
new_password = forms.CharField(max_length=255, label=_("New Password"),
widget=forms.PasswordInput(render_value=False))
new_password_again = forms.CharField(max_length=255, label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False))
user = None
def __init__(self, user, *args, **kwargs):
super(PasswordForm, self).__init__(*args, **kwargs)
self.user = user
self.fields['new_password'].label = _("New Password")
self.fields['new_password_again'].label = _("Confirm Password")
def clean_new_password_again(self):
"Clean New Password Again"
password1 = self.cleaned_data['new_password']
password2 = self.cleaned_data['new_password_again']
if not password1 == password2:
raise forms.ValidationError(_("Passwords do not match"))
return password2
def save(self):
"Save"
password1 = self.cleaned_data['new_password']
self.user.set_password(password1)
return self.user.save()
class GroupForm(forms.ModelForm):
""" Group form """
perspective = ModelChoiceField(
label=_('Perspective'), queryset=[], required=False)
def __init__(self, *args, **kwargs):
super(GroupForm, self).__init__(*args, **kwargs)
self.fields['perspective'].label = _('Perspective')
self.fields['perspective'].queryset = Perspective.objects.all()
if self.instance:
try:
self.fields[
'perspective'].initial = self.instance.get_perspective()
except:
pass
def save(self, *args, **kwargs):
instance = super(GroupForm, self).save(*args, **kwargs)
if instance.id and self.cleaned_data['perspective']:
instance.set_perspective(self.cleaned_data['perspective'])
return instance
class Meta:
"Group Form"
model = Group
fields = ('name', 'parent', 'details')
class PageForm(forms.ModelForm):
""" Static Page form """
title = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}))
def __init__(self, *args, **kwargs):
super(PageForm, self).__init__(*args, **kwargs)
class Meta:
"Page Form"
model = Page
fields = ('name', 'title', 'folder', 'published', 'body')
class PageFolderForm(forms.ModelForm):
""" PageFolder for Static Pages form """
name = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}))
def __init__(self, *args, **kwargs):
super(PageFolderForm, self).__init__(*args, **kwargs)
class Meta:
"Page Folder Form"
model = PageFolder
fields = ('name', 'details')
class FilterForm(forms.ModelForm):
""" Filter form for Modules definition """
def __init__(self, user, _type=None, *args, **kwargs):
if _type is None:
_type = []
super(FilterForm, self).__init__(*args, **kwargs)
if 'perspective' in _type:
del self.fields['name']
self.fields['modules'].help_text = ""
if 'module' in _type:
del self.fields['name']
self.fields['modules'].help_text = ""
class Meta:
"Filter"
model = Perspective
fields = ('name', 'modules')
from treeio.identities.forms import ContactForm # todo: move import to top of file
class ContactSetupForm(ContactForm):
""" ContactSetupForm """
name = forms.CharField(
max_length=256, widget=forms.TextInput(attrs={'size': '50'}))
instance = None
files = {}
def __init__(self, contact_type, instance=None, *args, **kwargs):
"Populates form with fields from given ContactType"
if instance:
self.instance = instance
values = instance.contactvalue_set.all()
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _('Name')
if 'files' in kwargs:
self.files = kwargs['files']
for field in contact_type.fields.all():
if self.instance:
initial_field_name = self._get_free_field_name(field)
self.fields[initial_field_name] = self._get_form_field(field)
for value in values:
if value.field == field:
field_name = self._get_free_field_name(field)
self.fields[field_name] = self._get_form_field(
field, value)
if initial_field_name in self.fields:
del self.fields[initial_field_name]
else:
field_name = self._get_free_field_name(field)
self.fields[field_name] = self._get_form_field(field)
if self.instance:
self.fields['name'].initial = self.instance.name
|
|
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
If should_use_hermetic_xcode.py emits "1", and the current toolchain is out of
date:
* Downloads the hermetic mac toolchain
* Requires CIPD authentication. Run `cipd auth-login`, use Google account.
* Accepts the license.
* If xcode-select and xcodebuild are not passwordless in sudoers, requires
user interaction.
* Downloads standalone binaries from [a possibly different version of Xcode].
The toolchain version can be overridden by setting MAC_TOOLCHAIN_REVISION with
the full revision, e.g. 9A235.
"""
from __future__ import print_function
import argparse
import os
import pkg_resources
import platform
import plistlib
import shutil
import subprocess
import sys
def LoadPList(path):
"""Loads Plist at |path| and returns it as a dictionary."""
if sys.version_info.major == 2:
return plistlib.readPlist(path)
with open(path, 'rb') as f:
return plistlib.load(f)
# This contains binaries from Xcode 12.5 12E262, along with the macOS 11 SDK.
# To build these packages, see comments in build/xcode_binaries.yaml
MAC_BINARIES_LABEL = 'infra_internal/ios/xcode/xcode_binaries/mac-amd64'
MAC_BINARIES_TAG = 'pBipKbKSkYGXpuOBm4-8zuvfIGeFtpGbQ4IHM9YW0xMC'
# The toolchain will not be downloaded if the minimum OS version is not met. 19
# is the major version number for macOS 10.15. 12B5044c (Xcode 12.2rc) only runs
# on 10.15.4 and newer.
MAC_MINIMUM_OS_VERSION = [19, 4]
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
TOOLCHAIN_ROOT = os.path.join(BASE_DIR, 'mac_files')
TOOLCHAIN_BUILD_DIR = os.path.join(TOOLCHAIN_ROOT, 'Xcode.app')
# Always integrity-check the entire SDK. Mac SDK packages are complex and often
# hit edge cases in cipd (eg https://crbug.com/1033987,
# https://crbug.com/915278), and generally when this happens it requires manual
# intervention to fix.
# Note the trailing \n!
PARANOID_MODE = '$ParanoidMode CheckIntegrity\n'
def PlatformMeetsHermeticXcodeRequirements():
if sys.platform != 'darwin':
return True
needed = MAC_MINIMUM_OS_VERSION
major_version = [int(v) for v in platform.release().split('.')[:len(needed)]]
return major_version >= needed
def _UseHermeticToolchain():
current_dir = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.join(current_dir, 'mac/should_use_hermetic_xcode.py')
proc = subprocess.Popen([script_path, 'mac'], stdout=subprocess.PIPE)
return '1' in proc.stdout.readline().decode()
def RequestCipdAuthentication():
"""Requests that the user authenticate to access Xcode CIPD packages."""
print('Access to Xcode CIPD package requires authentication.')
print('-----------------------------------------------------------------')
print()
print('You appear to be a Googler.')
print()
print('I\'m sorry for the hassle, but you may need to do a one-time manual')
print('authentication. Please run:')
print()
print(' cipd auth-login')
print()
print('and follow the instructions.')
print()
print('NOTE: Use your google.com credentials, not chromium.org.')
print()
print('-----------------------------------------------------------------')
print()
sys.stdout.flush()
def PrintError(message):
# Flush buffers to ensure correct output ordering.
sys.stdout.flush()
sys.stderr.write(message + '\n')
sys.stderr.flush()
def InstallXcodeBinaries():
"""Installs the Xcode binaries needed to build Chrome and accepts the license.
This is the replacement for InstallXcode that installs a trimmed down version
of Xcode that is OS-version agnostic.
"""
# First make sure the directory exists. It will serve as the cipd root. This
# also ensures that there will be no conflicts of cipd root.
binaries_root = os.path.join(TOOLCHAIN_ROOT, 'xcode_binaries')
if not os.path.exists(binaries_root):
os.makedirs(binaries_root)
# 'cipd ensure' is idempotent.
args = ['cipd', 'ensure', '-root', binaries_root, '-ensure-file', '-']
p = subprocess.Popen(args,
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate(input=PARANOID_MODE + MAC_BINARIES_LABEL +
' ' + MAC_BINARIES_TAG)
if p.returncode != 0:
print(stdout)
print(stderr)
RequestCipdAuthentication()
return 1
if sys.platform != 'darwin':
return 0
# Accept the license for this version of Xcode if it's newer than the
# currently accepted version.
cipd_xcode_version_plist_path = os.path.join(binaries_root,
'Contents/version.plist')
cipd_xcode_version_plist = LoadPList(cipd_xcode_version_plist_path)
cipd_xcode_version = cipd_xcode_version_plist['CFBundleShortVersionString']
cipd_license_path = os.path.join(binaries_root,
'Contents/Resources/LicenseInfo.plist')
cipd_license_plist = LoadPList(cipd_license_path)
cipd_license_version = cipd_license_plist['licenseID']
should_overwrite_license = True
current_license_path = '/Library/Preferences/com.apple.dt.Xcode.plist'
if os.path.exists(current_license_path):
current_license_plist = LoadPList(current_license_path)
xcode_version = current_license_plist.get(
'IDEXcodeVersionForAgreedToGMLicense')
if (xcode_version is not None and pkg_resources.parse_version(xcode_version)
>= pkg_resources.parse_version(cipd_xcode_version)):
should_overwrite_license = False
if not should_overwrite_license:
return 0
# Use puppet's sudoers script to accept the license if its available.
license_accept_script = '/usr/local/bin/xcode_accept_license.py'
if os.path.exists(license_accept_script):
args = [
'sudo', license_accept_script, '--xcode-version', cipd_xcode_version,
'--license-version', cipd_license_version
]
subprocess.check_call(args)
return 0
# Otherwise manually accept the license. This will prompt for sudo.
print('Accepting new Xcode license. Requires sudo.')
sys.stdout.flush()
args = [
'sudo', 'defaults', 'write', current_license_path,
'IDEXcodeVersionForAgreedToGMLicense', cipd_xcode_version
]
subprocess.check_call(args)
args = [
'sudo', 'defaults', 'write', current_license_path,
'IDELastGMLicenseAgreedTo', cipd_license_version
]
subprocess.check_call(args)
args = ['sudo', 'plutil', '-convert', 'xml1', current_license_path]
subprocess.check_call(args)
return 0
def main():
if not _UseHermeticToolchain():
print('Skipping Mac toolchain installation for mac')
return 0
parser = argparse.ArgumentParser(description='Download hermetic Xcode.')
args = parser.parse_args()
if not PlatformMeetsHermeticXcodeRequirements():
print('OS version does not support toolchain.')
return 0
return InstallXcodeBinaries()
if __name__ == '__main__':
sys.exit(main())
|
|
#!/usr/bin/env python
#coding: utf-8
#file : server_modules.py
#author : ning
#date : 2014-02-24 13:00:28
import os
import sys
from utils import *
import conf
class Base:
'''
Sub class should implement:
_alive, _pre_deploy, status, and init self.args
'''
def __init__(self, name, host, port, path):
self.args = {
'name' : name,
'host' : host,
'port' : port,
'path' : path,
#startcmd and runcmd will used to generate the control script
#used for the start cmd
'startcmd' : '',
#process name you see in `ps -aux`, used this to generate stop cmd
'runcmd' : '',
'logfile' : '',
}
def __str__(self):
return TT('[$name:$host:$port]', self.args)
def deploy(self):
logging.info('deploy %s' % self)
self._run(TTCMD('mkdir -p $path/bin && \
mkdir -p $path/conf && \
mkdir -p $path/log && \
mkdir -p $path/data',
self.args))
self._pre_deploy()
self._gen_control_script()
def _gen_control_script(self):
content = file(os.path.join(WORKDIR, 'conf/control.sh')).read()
content = TT(content, self.args)
control_filename = TT('${path}/${name}_control', self.args)
fout = open(control_filename, 'w+')
fout.write(content)
fout.close()
os.chmod(control_filename, 0755)
def start(self):
if self._alive():
logging.warn('%s already running' %(self) )
return
logging.debug('starting %s' % self)
t1 = time.time()
sleeptime = .1
cmd = TT("cd $path && ./${name}_control start", self.args)
self._run(cmd)
while not self._alive():
lets_sleep(sleeptime)
if sleeptime < 5:
sleeptime *= 2
else:
sleeptime = 5
logging.warn('%s still not alive' % self)
t2 = time.time()
logging.info('%s start ok in %.2f seconds' %(self, t2-t1) )
def stop(self):
if not self._alive():
logging.warn('%s already stop' %(self) )
return
cmd = TT("cd $path && ./${name}_control stop", self.args)
self._run(cmd)
t1 = time.time()
while self._alive():
lets_sleep()
t2 = time.time()
logging.info('%s stop ok in %.2f seconds' %(self, t2-t1) )
def pid(self):
cmd = TT("pgrep -f '^$runcmd'", self.args)
return self._run(cmd)
def status(self):
logging.warn("status: not implement")
def _alive(self):
logging.warn("_alive: not implement")
def _run(self, raw_cmd):
ret = system(raw_cmd, logging.debug)
logging.debug('return : [%d] [%s] ' % (len(ret), shorten(ret)) )
return ret
def clean(self):
cmd = TT("rm -rf $path", self.args)
self._run(cmd)
def host(self):
return self.args['host']
def port(self):
return self.args['port']
class RedisServer(Base):
def __init__(self, host, port, path, cluster_name, server_name, auth = None):
Base.__init__(self, 'redis', host, port, path)
self.args['startcmd'] = TT('bin/redis-server conf/redis.conf', self.args)
self.args['runcmd'] = TT('redis-server \*:$port', self.args)
self.args['conf'] = TT('$path/conf/redis.conf', self.args)
self.args['pidfile'] = TT('$path/log/redis.pid', self.args)
self.args['logfile'] = TT('$path/log/redis.log', self.args)
self.args['dir'] = TT('$path/data', self.args)
self.args['REDIS_CLI'] = conf.BINARYS['REDIS_CLI']
self.args['cluster_name'] = cluster_name
self.args['server_name'] = server_name
self.args['auth'] = auth
def _info_dict(self):
cmd = TT('$REDIS_CLI -h $host -p $port INFO', self.args)
if self.args['auth']:
cmd = TT('$REDIS_CLI -h $host -p $port -a $auth INFO', self.args)
info = self._run(cmd)
info = [line.split(':', 1) for line in info.split('\r\n')
if not line.startswith('#')]
info = [i for i in info if len(i) > 1]
return defaultdict(str, info) #this is a defaultdict, be Notice
def _ping(self):
cmd = TT('$REDIS_CLI -h $host -p $port PING', self.args)
if self.args['auth']:
cmd = TT('$REDIS_CLI -h $host -p $port -a $auth PING', self.args)
return self._run(cmd)
def _alive(self):
return strstr(self._ping(), 'PONG')
def _gen_conf(self):
content = file(os.path.join(WORKDIR, 'conf/redis.conf')).read()
content = TT(content, self.args)
if self.args['auth']:
content += '\r\nrequirepass %s' % self.args['auth']
return content
def _pre_deploy(self):
self.args['BINS'] = conf.BINARYS['REDIS_SERVER_BINS']
self._run(TT('cp $BINS $path/bin/', self.args))
fout = open(TT('$path/conf/redis.conf', self.args), 'w+')
fout.write(self._gen_conf())
fout.close()
def status(self):
uptime = self._info_dict()['uptime_in_seconds']
if uptime:
logging.info('%s uptime %s seconds' % (self, uptime))
else:
logging.error('%s is down' % self)
def isslaveof(self, master_host, master_port):
info = self._info_dict()
if info['master_host'] == master_host and \
int(info['master_port']) == master_port:
logging.debug('already slave of %s:%s' % (master_host, master_port))
return True
def slaveof(self, master_host, master_port):
cmd = 'SLAVEOF %s %s' % (master_host, master_port)
return self.rediscmd(cmd)
def rediscmd(self, cmd):
args = copy.deepcopy(self.args)
args['cmd'] = cmd
cmd = TT('$REDIS_CLI -h $host -p $port $cmd', args)
logging.info('%s %s' % (self, cmd))
return self._run(cmd)
class Memcached(Base):
def __init__(self, host, port, path, cluster_name, server_name):
Base.__init__(self, 'memcached', host, port, path)
self.args['startcmd'] = TT('bin/memcached -d -p $port', self.args)
self.args['runcmd'] = self.args['startcmd']
self.args['cluster_name'] = cluster_name
self.args['server_name'] = server_name
def _alive(self):
cmd = TT('echo "stats" | socat - TCP:$host:$port', self.args)
ret = self._run(cmd)
return strstr(ret, 'END')
def _pre_deploy(self):
self.args['BINS'] = conf.BINARYS['MEMCACHED_BINS']
self._run(TT('cp $BINS $path/bin/', self.args))
class NutCracker(Base):
def __init__(self, host, port, path, cluster_name, masters, mbuf=512,
verbose=5, is_redis=True, redis_auth=None):
Base.__init__(self, 'nutcracker', host, port, path)
self.masters = masters
self.args['mbuf'] = mbuf
self.args['verbose'] = verbose
self.args['redis_auth'] = redis_auth
self.args['conf'] = TT('$path/conf/nutcracker.conf', self.args)
self.args['pidfile'] = TT('$path/log/nutcracker.pid', self.args)
self.args['logfile'] = TT('$path/log/nutcracker.log', self.args)
self.args['status_port'] = self.args['port'] + 1000
self.args['startcmd'] = TTCMD('bin/nutcracker -d -c $conf -o $logfile \
-p $pidfile -s $status_port \
-v $verbose -m $mbuf -i 1', self.args)
self.args['runcmd'] = TTCMD('bin/nutcracker -d -c $conf -o $logfile \
-p $pidfile -s $status_port', self.args)
self.args['cluster_name']= cluster_name
self.args['is_redis']= str(is_redis).lower()
def _alive(self):
return self._info_dict()
def _gen_conf_section(self):
template = ' - $host:$port:1 $server_name'
cfg = '\n'.join([TT(template, master.args) for master in self.masters])
return cfg
def _gen_conf(self):
content = '''
$cluster_name:
listen: 0.0.0.0:$port
hash: fnv1a_64
distribution: modula
preconnect: true
auto_eject_hosts: false
redis: $is_redis
backlog: 512
timeout: 400
client_connections: 0
server_connections: 1
server_retry_timeout: 2000
server_failure_limit: 2
servers:
'''
if self.args['redis_auth']:
content = content.replace('redis: $is_redis',
'redis: $is_redis\r\n redis_auth: $redis_auth')
content = TT(content, self.args)
return content + self._gen_conf_section()
def _pre_deploy(self):
self.args['BINS'] = conf.BINARYS['NUTCRACKER_BINS']
self._run(TT('cp $BINS $path/bin/', self.args))
fout = open(TT('$path/conf/nutcracker.conf', self.args), 'w+')
fout.write(self._gen_conf())
fout.close()
def version(self):
#This is nutcracker-0.4.0
s = self._run(TT('$BINS --version', self.args))
return s.strip().replace('This is nutcracker-', '')
def _info_dict(self):
try:
c = telnetlib.Telnet(self.args['host'], self.args['status_port'])
ret = c.read_all()
return json_decode(ret)
except Exception, e:
logging.debug('can not get _info_dict of nutcracker, \
[Exception: %s]' % (e, ))
return None
def reconfig(self, masters):
self.masters = masters
self.stop()
self.deploy()
self.start()
logging.info('proxy %s:%s is updated' % (self.args['host'], self.args['port']))
def logfile(self):
return self.args['logfile']
def cleanlog(self):
cmd = TT("rm '$logfile'", self.args)
self._run(cmd)
def signal(self, signo):
self.args['signo'] = signo
cmd = TT("pkill -$signo -f '^$runcmd'", self.args)
self._run(cmd)
def reload(self):
self.signal('USR1')
def set_config(self, content):
fout = open(TT('$path/conf/nutcracker.conf', self.args), 'w+')
fout.write(content)
fout.close()
self.reload()
|
|
#!/usr/bin/python
#
# An autodetection utility for the Blackberry NDK
#
# WARNING: the paths to qde, project and project name must not contain any
# spaces for the tools to work correctly
import os, sys, platform, subprocess, pprint, shutil, tempfile, telnetlib, time
from optparse import OptionParser
class Device:
''' TODO Mac: Look at how qde works with sim for this class '''
pass
# def __init__(self, name, port=-1, emulator=False, offline=False):
# self.name = name
# self.port = port
# self.emulator = emulator
# self.offline = offline
#
# def get_name(self):
# return self.name
#
# def get_port(self):
# return self.port
#
# def is_emulator(self):
# return self.emulator
#
# def is_device(self):
# return not self.emulator
#
# def is_offline(self):
# return self.offline
class BlackberryNDK:
def __init__(self, blackberryNdk, log = None):
if platform.system() == 'Windows':
self.packagerProgram = 'blackberry-nativepackager.bat'
self.deployProgram = 'blackberry-deploy.bat'
self.barSignerProgram = 'blackberry-signer.bat'
else:
self.packagerProgram = 'blackberry-nativepackager'
self.deployProgram = 'blackberry-deploy'
self.barSignerProgram = 'blackberry-signer'
self.log = log
self.blackberryNdk = self._findNdk(blackberryNdk)
if self.blackberryNdk is None:
raise Exception('No Blackberry NDK directory found')
self.version = self._findVersion()
self._sourceEnvironment()
self.qde = self._findQde()
self.lastLineCount = 0
def getVersion(self):
return self.version
def getBlackberryNdk(self):
return self.blackberryNdk
def _findNdk(self, supplied):
if supplied is not None:
if os.path.exists(supplied):
return supplied
else:
return None
if platform.system() == 'Windows':
default_dirs = ['C:\\bbndk']
else:
default_dirs = ['/Applications/bbndk', '/Developer/SDKs/bbndk', '/opt/bbndk', '~/bbndk', '~/opt/bbndk']
for default_dir in default_dirs:
if os.path.exists(default_dir):
return default_dir
return None
def _findVersion(self):
infoPath = os.path.join(self.blackberryNdk, 'install', 'info.txt')
if os.path.exists(infoPath):
try:
f = open(infoPath, 'rU')
for line in f:
(key, val) = line.split('=', 1)
if key == 'host':
f.close()
return val.strip()
except IOError, e:
print >>sys.stderr, e
return None
def _sourceEnvironment(self):
if platform.system() == 'Windows':
envFile = os.path.join(self.blackberryNdk, 'bbndk-env.bat')
command = '%s ' % envFile + '&& set'
else:
envFile = os.path.join(self.blackberryNdk, 'bbndk-env.sh')
command = ['bash', '-c', 'source %s && env' % envFile]
try:
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
except OSError, e:
print >>sys.stderr, e
return
for line in proc.stdout:
# This leaks memory on mac osx, see man putenv
(key, _, value) = line.partition("=")
os.environ[key] = value.strip()
proc.communicate()
self.log and self.log.debug('os.environ:\n' + pprint.pformat(dict(os.environ)))
def _findQde(self):
cmd = 'qde'
qnxHost = os.environ.get('QNX_HOST')
if qnxHost == None:
return None
if platform.system() == 'Windows':
dir = os.path.join(qnxHost, 'usr', 'qde', 'eclipse')
cmd += '.exe'
elif platform.system() == 'Darwin':
dir = os.path.join(qnxHost, 'usr', 'qde', 'eclipse', 'qde.app', 'Contents', 'MacOS')
elif platform.system() == 'Linux':
dir = os.path.join(qnxHost, 'usr', 'bin')
qde = os.path.join(dir, cmd)
if os.path.exists(qde):
return qde
return None
def _run(self, command, echoCommand = True):
assert type(command) is list
# if no log, don't output
try:
# if no log, don't output
if self.log == None:
logfile = os.devnull
else:
logfile = self.log.getLogfile()
if echoCommand:
self.log.info('Command: ' + ' '.join(command))
# if no logfile, output to stdout
if logfile == None:
subprocess.check_call(command)
return 0
else:
with open(logfile, 'a') as f:
# Need this write() or else subprocess will overwrite for some reason
f.write('\n')
subprocess.check_call(command, stdout = f, stderr = f)
return 0
except subprocess.CalledProcessError, cpe:
print >>sys.stderr, cpe, cpe.output
return cpe.returncode
except OSError, e:
print >>sys.stderr, e
return e.errno
def importProject(self, project, workspace = None):
assert os.path.exists(project)
if workspace is None:
workspace = os.path.dirname(project)
command = [self.qde, '-nosplash', '-application', 'org.eclipse.cdt.managedbuilder.core.headlessbuild', '-consoleLog', '-data', workspace, '-import', project]
self._run(command)
visited = []
def _copy_file(self, src, dest):
for path, dirs, files in os.walk(src, topdown=True):
if path not in self.visited:
for di in dirs:
print dest, di
self._copy_file(os.path.join(path, di), os.path.join(dest, di))
if not os.path.exists(dest):
os.makedirs(dest)
for fi in files:
shutil.copy(os.path.join(path, fi), dest)
self.visited.append(path)
def build(self, project, cpu, variant, name):
assert os.path.exists(project)
templateDir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
# BB NDK makefiles do not allow spaces in path names and cause build problem.
# The solution is to use python temporary directories without spaces to do builds.
tmpPathSDK = tempfile.mkdtemp()
self._copy_file(os.path.join(templateDir, "ticore"), os.path.join(tmpPathSDK, "ticore"))
self._copy_file(os.path.join(templateDir, "tibb"), os.path.join(tmpPathSDK, "tibb"))
self._copy_file(os.path.join(templateDir, "libv8"), os.path.join(tmpPathSDK, "libv8"))
tiappName = 'TIAPP_NAME=' + name
cpuList = 'CPULIST=' + cpu
bbRoot = 'BB_ROOT=' + tmpPathSDK
variant = 'VARIANTLIST=' + ('g' if variant.endswith('-g') else '')
oldPath = os.getcwd()
os.chdir(project)
tmpPathProj = tempfile.mkdtemp()
self._copy_file(os.getcwd(), tmpPathProj)
projPath = os.getcwd()
os.chdir(tmpPathProj)
command = ['make', tiappName, cpuList, bbRoot, variant]
retCode = self._run(command)
self._copy_file(tmpPathProj, projPath)
try:
shutil.rmtree(tmpPathSDK)
shutil.rmtree(tmpPathProj)
except:
print "Error removing temporary file"
os.chdir(oldPath)
return retCode
def _getBuildID(self, buildDir):
manifestPath = os.path.join(buildDir, 'Resources', 'blackberry', 'Ti.Manifest')
if os.path.exists(manifestPath):
try:
f = open(manifestPath, 'rU')
for line in f:
(key, val) = line.split('=', 1)
if key == 'buildID':
f.close()
return val.strip()
except IOError, e:
print >>sys.stderr, e
return '1'
def package(self, package, appFile, projectName, type, debugToken, isUnitTest = False):
templateDir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
buildDir = os.path.abspath(os.path.join(appFile, '..', '..', '..'))
projectDir = os.path.abspath(os.path.join(buildDir, '..', '..'))
# BuildID is is a 0-65535 value that identifies this package it must be incremented before bar signing
buildID = self._getBuildID(projectDir)
# Copy the framework's JavaScript
frameworkDir = os.path.join(buildDir, 'framework')
if os.path.exists(frameworkDir):
shutil.rmtree(frameworkDir)
shutil.copytree(os.path.join(templateDir, 'tibb', 'titanium', 'javascript'), frameworkDir)
# Copy all needed resources to assets
assetsDir = os.path.join(buildDir, 'assets')
resourcesDir = os.path.join(projectDir, 'Resources')
i18n = os.path.join(projectDir, 'i18n')
i18nAssetsDir = os.path.join(assetsDir, 'i18n')
blackberryResourcesDir = os.path.join(resourcesDir, 'blackberry')
if os.path.exists(assetsDir):
shutil.rmtree(assetsDir)
os.makedirs(assetsDir)
if os.path.exists(resourcesDir):
for entry in os.listdir(resourcesDir):
fullEntry = os.path.join(resourcesDir, entry)
if os.path.isdir(fullEntry):
if entry != "android" and entry != "iphone" and entry != "mobileweb" and entry != "blackberry":
shutil.copytree(fullEntry, os.path.join(assetsDir, entry))
else:
shutil.copy2(fullEntry, os.path.join(assetsDir, entry))
# Copy the i18n folder to assets
if os.path.exists(i18n):
for entry in os.listdir(i18n):
fullEntry = os.path.join(i18n, entry)
if os.path.isdir(fullEntry):
shutil.copytree(fullEntry, os.path.join(i18nAssetsDir, entry))
else:
shutil.copy2(fullEntry, os.path.join(i18nAssetsDir, entry))
# Copy application properties file into assets.
shutil.copy(os.path.join(buildDir, 'app_properties.ini'), assetsDir)
# copy the blackberry dir after so it will overwrite, if necessary
if os.path.exists(blackberryResourcesDir):
for root, dirs, files in os.walk(blackberryResourcesDir):
destRoot = root.replace(blackberryResourcesDir, assetsDir, 1)
if not os.path.exists(destRoot):
os.makedirs(destRoot)
for filename in files:
fullFilenameSrc = os.path.join(root, filename)
fullFilenameDest = fullFilenameSrc.replace(blackberryResourcesDir, assetsDir, 1)
shutil.copy2(fullFilenameSrc, fullFilenameDest)
# TODO: minimize .js files in Release mode
command = [self.packagerProgram, '-package', package, 'bar-descriptor.xml', '-e', appFile, projectName, '-buildID', buildID, 'assets', 'framework']
if isUnitTest:
command.append('icon.png')
if type != 'distribute':
command.append('-devMode')
if debugToken != None:
command.extend(['-debugToken', debugToken])
return self._run(command)
def distribute(self, projectName, appFile, storePass, outputDir = None):
command = [self.barSignerProgram, '-storepass', storePass, appFile+'.bar']
if outputDir != None:
os.path.join(outputDir, projectName)
shutil.copy2(appFile+'.bar', os.path.join(outputDir, projectName+'.bar'))
return self._run(command)
def deploy(self, deviceIP, package, password = None):
if deviceIP == None:
return 0
command = [self.deployProgram, '-installApp', '-launchApp', '-device', deviceIP, '-package', package]
if password != None:
command.append('-password')
command.append(password)
return self._run(command)
def uninstallApp(self, deviceIP, package, password = None):
command = [self.deployProgram, '-uninstallApp', '-device', deviceIP, '-package', package]
if password != None:
command.append('-password')
command.append(password)
return self._run(command)
def terminateApp(self, deviceIP, package, password = None):
command = [self.deployProgram, '-terminateApp', '-device', deviceIP, '-package', package]
if password != None:
command.append('-password')
command.append(password)
return self._run(command)
def isAppRunning(self, deviceIP, package, password = None):
command = [self.deployProgram, '-isAppRunning', '-device', deviceIP, '-package', package]
if password != None:
command.append('-password')
command.append(password)
return self._run(command, echoCommand = False)
def printExitCode(self, deviceIP, package, password = None):
command = [self.deployProgram, '-printExitCode', '-device', deviceIP, '-package', package]
if password != None:
command.append('-password')
command.append(password)
return self._run(command, echoCommand = False)
def getFile(self, deviceIP, package, hostFile, deviceFile, password = None):
command = [self.deployProgram, '-getFile', deviceFile, hostFile, '-device', deviceIP, '-package', package]
if password != None:
command.append('-password')
command.append(password)
return self._run(command)
def putFile(self, deviceIP, package, hostFile, deviceFile, password = None):
command = [self.deployProgram, '-putFile', hostFile, deviceFile, '-device', deviceIP, '-package', package]
if password != None:
command.append('-password')
command.append(password)
return self._run(command)
def _isAppRunning(self, deviceIP, package, password = None):
command = [self.deployProgram, '-isAppRunning', '-device', deviceIP, '-package', package]
if password != None:
command.append('-password')
command.append(password)
output = subprocess.check_output(command)
return output.find("result::true") != -1
def _printAppLog(self, deviceIP, package, password = None):
hostFile = "-"
deviceFile = "logs/log"
command = [self.deployProgram, '-getFile', deviceFile, hostFile, '-device', deviceIP, '-package', package]
if password != None:
command.append('-password')
command.append(password)
output = subprocess.check_output(command)
output = output.split('\n')
for k in range (len(output)):
if k > self.lastLineCount:
print output[k]
self.lastLineCount = k
return 0
def appLog(self, deviceIP, package, password = None):
while self._isAppRunning(deviceIP, package, password):
time.sleep(2)
self._printAppLog(deviceIP, package, password)
def buildTibb(self, tibbPath, buildType):
assert os.path.exists(tibbPath)
oldPath = os.getcwd()
os.chdir(tibbPath)
command = ['make', buildType]
retCode = self._run(command)
os.chdir(oldPath)
return retCode
def __runUnitTests(ipAddress = None):
# on windows the double dirname need to be done on 2 lines
baseDir = os.path.abspath(os.path.dirname(sys.argv[0]))
baseDir = os.path.dirname(baseDir)
sys.path.append(os.path.join(baseDir, 'common'))
from tiunittest import UnitTest
import tempfile
# if there are spaces in the temp directory, try to use the working directory instead
if tempfile.gettempdir().find(' ') != -1:
if os.getcwd().find(' '):
print 'Please run the unit tests from a directory with no spaces'
sys.exit(1)
else:
tempfile.tempdir = os.getcwd()
os.environ['TEMP'] = tempfile.tempdir
os.environ['TMP'] = tempfile.tempdir
os.environ['TMPDIR'] = tempfile.tempdir
import shutil
print '\nRunning Unit Tests...\n'
with UnitTest('Test source environement..'):
ndk._sourceEnvironment()
for key in ['QNX_TARGET', 'QNX_HOST', 'QNX_CONFIGURATION', 'MAKEFLAGS', 'PATH']:
assert key in os.environ
with UnitTest('Test find qde..'):
qde = ndk._findQde()
assert os.path.exists(qde)
with UnitTest('Test import project with workspace..'):
workspace = tempfile.mkdtemp()
projectSrc = os.path.join(ndk.blackberryNdk, 'target', 'qnx6', 'usr', 'share', 'samples', 'ndk', 'HelloWorldDisplay')
projectName = 'HelloWorldDisplayMakefile'
project = os.path.join(workspace, projectName)
shutil.copytree(projectSrc, project)
ndk.importProject(project, workspace)
passed = os.path.exists(os.path.join(workspace, '.metadata'))
shutil.rmtree(workspace)
assert passed
with UnitTest('Test import project no workspace..'):
workspace = tempfile.mkdtemp()
projectSrc = os.path.join(ndk.blackberryNdk, 'target', 'qnx6', 'usr', 'share', 'samples', 'ndk', 'HelloWorldDisplay')
project = os.path.join(workspace, projectName)
shutil.copytree(projectSrc, project)
ndk.importProject(project)
passed = os.path.exists(os.path.join(workspace, '.metadata'))
assert passed
with UnitTest('Test build project (x86)..'):
cpu = 'x86'
ndk.build(project, cpu, projectName)
assert os.path.exists(os.path.join(project, 'x86', 'o', projectName))
assert os.path.exists(os.path.join(project, 'x86', 'o-g', projectName))
oldDir = os.getcwd()
os.chdir(project)
with UnitTest('Test package project..'):
cpu = 'x86'
variant = 'o-g'
barPath = os.path.join(project, cpu, variant, '%s.bar' % projectName)
savePath = os.path.join(project, cpu, variant, projectName)
assert 0 == ndk.package(barPath, savePath, os.path.basename(project), 'simulator', None, isUnitTest = True)
assert os.path.exists(barPath)
os.chdir(oldDir)
if ipAddress != None:
with UnitTest('Test deploy project to simulator..'):
assert 0 == ndk.deploy(ipAddress, barPath)
with UnitTest('Test build project (arm)..'):
cpu = 'arm'
ndk.build(project, cpu, projectName)
assert os.path.exists(os.path.join(project, 'arm', 'o.le-v7', projectName))
assert os.path.exists(os.path.join(project, 'arm', 'o.le-v7-g', projectName))
shutil.rmtree(workspace)
print '\nFinished Running Unit Tests'
UnitTest.printDetails()
if __name__ == "__main__":
# Setup script usage using optparse
parser = OptionParser(usage='[ndk_path] [-t] [--ip_address IP ADDRESS]', description='Prints the NDK directory and version')
parser.add_option('-t', '--test', help='run unit tests', action='store_true', dest='test')
parser.add_option('--ip_address', help='simulator IP address for unit tests', dest='ip_address')
(options, args) = parser.parse_args()
try:
ndk = BlackberryNDK(args[0].decode('utf-8') if len(args) != 0 else None)
print "BLACKBERRY_NDK=%s" % ndk.getBlackberryNdk()
print "BLACKBERRY_NDK_VERSION=%s" % ndk.getVersion()
except Exception, e:
print >>sys.stderr, e
sys.exit(1)
if options.test:
__runUnitTests(options.ip_address.decode('utf-8') if options.ip_address != None else None)
|
|
import copy
import json
import logging
import os
import pprint
import subprocess
import tempfile, shutil
import time
from bson.objectid import ObjectId
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from hashlib import md5
from mongoengine.base import ValidationError
from crits.backdoors.backdoor import Backdoor
from crits.campaigns.forms import CampaignForm
from crits.config.config import CRITsConfig
from crits.core import form_consts
from crits.core.class_mapper import class_from_value, class_from_id
from crits.core.crits_mongoengine import EmbeddedSource, EmbeddedCampaign
from crits.core.crits_mongoengine import json_handler, create_embedded_source
from crits.core.data_tools import convert_string_to_bool, validate_md5_checksum
from crits.core.data_tools import validate_sha1_checksum, validate_sha256_checksum
from crits.core.exceptions import ZipFileError
from crits.core.forms import DownloadFileForm
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.handsontable_tools import convert_handsontable_to_rows, parse_bulk_upload
from crits.core.mongo_tools import get_file
from crits.core.source_access import SourceAccess
from crits.core.user_tools import is_admin, user_sources, get_user_organization
from crits.core.user_tools import is_user_subscribed, is_user_favorite
from crits.notifications.handlers import remove_user_from_notification
from crits.objects.handlers import object_array_to_dict
from crits.objects.handlers import validate_and_add_new_handler_object
from crits.samples.forms import XORSearchForm, UnzipSampleForm, UploadFileForm
from crits.samples.sample import Sample
from crits.samples.yarahit import YaraHit
from crits.services.analysis_result import AnalysisResult
from crits.services.handlers import run_triage, get_supported_services
from crits.stats.handlers import generate_yara_hits
from crits.vocabulary.relationships import RelationshipTypes
logger = logging.getLogger(__name__)
def generate_sample_csv(request):
"""
Generate a CSV file of the Sample information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, Sample)
return response
def get_sample_details(sample_md5, analyst, format_=None):
"""
Generate the data to render the Sample details template.
:param sample_md5: The MD5 of the Sample to get details for.
:type sample_md5: str
:param analyst: The user requesting this information.
:type analyst: str
:param format_: The format of the details page.
:type format_: str
:returns: template (str), arguments (dict)
"""
template = None
sources = user_sources(analyst)
sample = Sample.objects(md5=sample_md5,
source__name__in=sources).first()
if not sample:
return ('error.html', {'error': "File not yet available or you do not have access to view it."})
sample.sanitize_sources(username=analyst)
if format_:
exclude = [
"source",
"relationships",
"schema_version",
"campaign",
"analysis",
"bucket_list",
"ticket",
"releasability",
"unsupported_attrs",
"status",
"objects",
"modified",
"analyst",
"_id"
]
if format_ == "yaml":
data = sample.to_yaml(exclude)
return "yaml", data
if format_ == "json":
data = sample.to_json(exclude)
return "json", data
if not sample:
template = "error.html"
args = {'error': "No sample found"}
elif format_ == "text":
template = "samples_detail_text.html"
args = {'sample': sample}
else:
#create forms
xor_search_form = XORSearchForm()
campaign_form = CampaignForm()
unzip_sample_form = UnzipSampleForm()
download_form = DownloadFileForm(initial={"obj_type":'Sample',
"obj_id":sample.id,
"meta_format": "none"})
# do we have the binary?
if isinstance(sample.filedata.grid_id, ObjectId):
binary_exists = 1
else:
binary_exists = 0
sample.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, sample.id, 'Sample')
# subscription
subscription = {
'type': 'Sample',
'id': sample.id,
'subscribed': is_user_subscribed("%s" % analyst,
'Sample',
sample.id),
}
#objects
objects = sample.sort_objects()
#relationships
relationships = sample.sort_relationships("%s" % analyst,
meta=True)
# relationship
relationship = {
'type': 'Sample',
'value': sample.id
}
#comments
comments = {'comments': sample.get_comments(),
'url_key': sample_md5}
#screenshots
screenshots = sample.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Sample', sample.id)
# services
service_list = get_supported_services('Sample')
# analysis results
service_results = sample.get_analysis_results()
# template
from crits.services.core import ServiceManager
service_manager = ServiceManager()
tmp_service_results = []
for result in service_results:
result.template = service_manager.get_service_class(result.service_name).template
tmp_service_results.append(result)
service_results = tmp_service_results
args = {'objects': objects,
'relationships': relationships,
'comments': comments,
'relationship': relationship,
'subscription': subscription,
'sample': sample, 'sources': sources,
'campaign_form': campaign_form,
'download_form': download_form,
'xor_search_form': xor_search_form,
'unzip_sample_form': unzip_sample_form,
'binary_exists': binary_exists,
'favorite': favorite,
'screenshots': screenshots,
'service_list': service_list,
'service_results': service_results}
return template, args
def generate_sample_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Sample
type_ = "sample"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type, details_url, details_url_key,
request, includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtlist_by_org":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
get_values = request.GET.copy()
get_values['source'] = get_user_organization("%s" % request.user.username)
request.GET = get_values
fields = mapper['fields']
response = jtable_ajax_list(obj_type,details_url,details_url_key,
request, includes=fields)
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Samples",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Samples'",
'text': "'All'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Samples'",
'text': "'New'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Samples'",
'text': "'In Progress'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Samples'",
'text': "'Analyzed'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Samples'",
'text': "'Deprecated'",
'click': "function () {$('#sample_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Sample'",
'text': "'Add Sample'",
'click': "function () {$('#new-sample').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def generate_yarahit_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
refresh = request.GET.get("refresh", "no")
if refresh == "yes":
generate_yara_hits()
obj_type = YaraHit
type_ = "yarahit"
if option == "jtlist":
# Sets display url
details_url = 'crits.samples.views.samples_listing'
details_url_key = "detectexact"
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Yara Hits",
'default_sort': "result ASC",
'listurl': reverse('crits.samples.views.%ss_listing' % (type_,),
args=('jtlist',)),
'deleteurl': "",
'searchurl': reverse('crits.samples.views.%ss_listing' % (type_,)),
'fields': ["result", "engine", "version", "sample_count","_id"],
'hidden_fields': ["_id"],
'linked_fields': []
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'Refresh Yara Hits'",
'text': "'Refresh Stats'",
'click': "function () {$.get('"+reverse('crits.samples.views.%ss_listing' % type_)+"', {'refresh': 'yes'}, function () { $('#yarahits_listing').jtable('reload');});}"
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%ss_listing' % type_,
'button' : '%ss_button' % type_},
RequestContext(request))
else:
return render_to_response("%ss_listing.html" % type_,
{'jtable': jtable,
'jtid': '%ss_listing' % type_},
RequestContext(request))
def get_filename(md5=None):
"""
Get the filename of a sample by MD5.
:param md5: The MD5 of the sample to get the filename of.
:type md5: str
:returns: None, str
"""
if not md5:
return None
sample = Sample.objects(md5=md5).first()
if not sample:
return None
return sample.filename
def get_md5_hash(oid=None):
"""
Get the MD5 of a sample by ObjectId.
:param oid: The ObjectId of the sample to get the MD5 of.
:type oid: str
:returns: None, str
"""
if oid is None:
return None
else:
sample = Sample.objects(id=oid).first()
if not sample:
return None
return sample.md5
def delete_sample(sample_md5, username=None):
"""
Delete a sample from CRITs.
:param sample_md5: The MD5 of the sample to delete.
:type sample_md5: str
:param username: The user deleting this sample.
:type username: str
:returns: bool
"""
if is_admin(username):
sample = Sample.objects(md5=sample_md5).first()
if sample:
sample.delete(username=username)
return True
else:
return False
else:
return False
def mail_sample(sample_md5, recips=None):
"""
Mail a sample to a list of recipients.
:param sample_md5: The MD5 of the sample to send.
:type sample_md5: str
:param recips: List of recipients.
:type recips: list
:returns: None, str
"""
crits_config = CRITsConfig.objects().first()
if recips is not None:
sample = Sample.objects(md5=sample_md5).first()
if not sample:
return None
try:
send_mail('Details for %s' % sample_md5,
'%s' % pprint.pformat(sample.to_json()),
crits_config.crits_email,
recips,
fail_silently=False)
except Exception as e:
logger.error(e)
return str(e.args)
return None
def get_source_counts(analyst):
"""
Get the sources for a user.
:param analyst: The user to get sources for.
:type analyst: str
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
allowed = user_sources(analyst)
sources = SourceAccess.objects(name__in=allowed)
return sources
def get_yara_hits(version=None):
"""
Get the yara hits in the database.
:param version: The yara hit version to search for.
:type version: str
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
if version:
hits = YaraHit.objects(version=version).order_by('+result')
else:
hits = YaraHit.objects().order_by('+result')
return hits
def handle_unzip_file(md5, user=None, password=None):
"""
Unzip a sample.
:param md5: The MD5 of the sample to unzip.
:type md5: str
:param user: The user unzipping this sample.
:type user: str
:param password: Password to use to unzip the sample.
:type password: str
:returns: list
:raises: ZipFileError, Exception
"""
sample = class_from_value('Sample', md5)
if not sample:
return None
data = sample.filedata.read()
source = sample.source[0].name
campaign = sample.campaign
reference = ''
return unzip_file(md5, user, password, data, source, method="Unzip Existing Sample",
reference=reference, campaign=campaign, related_md5=md5, )
def unzip_file(filename, user=None, password=None, data=None, source=None,
method='Zip', reference='', campaign=None, confidence='low',
related_md5=None, related_id=None, related_type='Sample',
bucket_list=None, ticket=None, inherited_source=None,
is_return_only_md5=True, backdoor_name=None,
backdoor_version=None):
"""
Unzip a file.
:param filename: The name of the file to unzip.
:type filename: str
:param user: The user unzipping the file.
:type user: str
:param password: The password to use to unzip the file.
:type password: str
:param data: The filedata.
:type data: str
:param source: The name of the source that provided the data.
:type source: str
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param backdoor_name: Name of backdoor to relate this object to.
:type is_return_only_md5: str
:param is_return_only_md5: Only return the MD5s.
:type backdoor_name: str
:param backdoor_version: Version of backdoor to relate this object to.
:type backdoor_version: str
:returns: list
:raises: ZipFileError, Exception
"""
crits_config = CRITsConfig.objects().first()
temproot = crits_config.temp_dir
samples = []
zipdir = ""
extractdir = ""
try:
zip_md5 = md5(data).hexdigest()
# 7z doesn't decompress archives via stdin, therefore
# we need to write it out as a file first
zipdir = tempfile.mkdtemp(dir=temproot)
zipfile = open(zipdir + "/" + filename, "wb")
zipfile.write(data)
zipfile.close()
# Build argument string to popen()
args = [crits_config.zip7_path]
if not os.access(crits_config.zip7_path, os.X_OK):
errmsg = "7z is not executable at path specified in the config setting: %s\n" % crits_config.zip7_path
raise ZipFileError, errmsg
args.append("e")
extractdir = tempfile.mkdtemp(dir=temproot)
args.append("-o" + extractdir) # Set output directory
# Apparently 7z doesn't mind being handed a password to an
# archive that isn't encrypted - but blocks for the opposite
# case, so we'll always give it something for a password argument
if password is None:
args.append("-pNone")
else:
args.append("-p" + password)
args.append("-y") # 'Yes' on all queries - avoid blocking
args.append(zipdir + "/" + filename)
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Give the process 30 seconds to complete, otherwise kill it
waitSeconds = 30
while (proc.poll() is None and waitSeconds):
time.sleep(1)
waitSeconds -= 1
if proc.returncode: # 7z spit out an error
errmsg = "Error while extracting archive\n" + proc.stdout.read()
raise ZipFileError, errmsg
elif not waitSeconds: # Process timed out
proc.terminate()
raise ZipFileError, "Unzip process failed to terminate"
else:
if related_md5 and related_md5 == zip_md5:
relationship = RelationshipTypes.COMPRESSED_INTO
else:
relationship = RelationshipTypes.RELATED_TO
for root, dirs, files in os.walk(extractdir):
for filename in files:
filepath = extractdir + "/" + filename
filehandle = open(filepath, 'rb')
new_sample = handle_file(filename, filehandle.read(),
source, method, reference,
related_md5=related_md5,
related_id=related_id,
related_type=related_type, backdoor='',
user=user, campaign=campaign,
confidence=confidence,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
relationship=relationship,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
if new_sample:
samples.append(new_sample)
filehandle.close()
except ZipFileError: # Pass this error up the chain
raise
except Exception, ex:
errmsg = ''
for err in ex.args:
errmsg = errmsg + " " + str(err)
raise ZipFileError, errmsg
finally:
if os.path.isdir(zipdir):
shutil.rmtree(zipdir)
if os.path.isdir(extractdir):
shutil.rmtree(extractdir)
return samples
def handle_file(filename, data, source, method='Generic', reference='',
related_md5=None, related_id=None, related_type='Sample',
backdoor=None, user='', campaign=None, confidence='low',
md5_digest=None, sha1_digest=None, sha256_digest=None,
size=None, mimetype=None, bucket_list=None, ticket=None,
relationship=None, inherited_source=None, is_validate_only=False,
is_return_only_md5=True, cache={}, backdoor_name=None,
backdoor_version=None):
"""
Handle adding a file.
:param filename: The name of the file.
:type filename: str
:param data: The filedata.
:type data: str
:param source: The name of the source that provided the data.
:type source: list, str, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param backdoor: The backdoor to assign to this sample.
:type backdoor: str
:param user: The user uploading this sample.
:type user: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param md5_digest: The MD5 of this sample.
:type md5_digest: str
:param sha1_digest: The SHA1 of this sample.
:type sha1_digest: str
:param sha256_digest: The SHA256 of this sample.
:type sha256_digest: str
:param size: the Size of this sample.
:type size: str
:param mimetype: The Mimetype of this sample.
:type mimetype: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param relationship: The relationship between this sample and the parent.
:type relationship: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param is_validate_only: Only validate, do not add.
:type is_validate_only: bool
:param is_return_only_md5: Only return the MD5s.
:type is_return_only_md5: bool
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:param backdoor_name: Name of the backdoor to relate the file to.
:type backdoor_name: str
:param backdoor_version: Version of the backdoor to relate the file to.
:type backdoor_version: str
:returns: str,
dict with keys:
"success" (boolean),
"message" (str),
"object" (the sample),
"""
retVal = {}
retVal['success'] = True
retVal['message'] = ""
is_sample_new = False
# get sample from database, or create it if one doesn't exist
if not data and not md5_digest:
retVal['success'] = False
retVal['message'] = "At least MD5 hash is required."
return retVal
if md5_digest:
# validate md5
md5_digest = md5_digest.lower().strip()
validate_md5_result = validate_md5_checksum(md5_digest)
retVal['message'] += validate_md5_result.get('message')
retVal['success'] = validate_md5_result.get('success')
if retVal['success'] == False:
if is_return_only_md5 == True:
return None
else:
return retVal
if sha1_digest != None and sha1_digest != "":
sha1_digest = sha1_digest.lower().strip()
validate_sha1_result = validate_sha1_checksum(sha1_digest)
retVal['message'] += validate_sha1_result.get('message')
retVal['success'] = validate_sha1_result.get('success')
if retVal['success'] == False:
if is_return_only_md5 == True:
return None
else:
return retVal
if sha256_digest != None and sha256_digest != "":
sha256_digest = sha256_digest.lower().strip()
validate_sha256_result = validate_sha256_checksum(sha256_digest)
retVal['message'] += validate_sha256_result.get('message')
retVal['success'] = validate_sha256_result.get('success')
if retVal['success'] == False:
if is_return_only_md5 == True:
return None
else:
return retVal
if data:
md5_digest = md5(data).hexdigest()
validate_md5_result = validate_md5_checksum(md5_digest)
retVal['message'] += validate_md5_result.get('message')
retVal['success'] = validate_md5_result.get('success')
if retVal['success'] == False:
if is_return_only_md5 == True:
return None
else:
return retVal
if related_id or related_md5:
if related_id:
related_obj = class_from_id(related_type, related_id)
else:
related_obj = class_from_value(related_type, related_md5)
if not related_obj:
retVal['message'] += (' Related %s not found. Sample not uploaded.'
% (related_type))
retVal['success'] = False
else:
related_obj = None
cached_results = cache.get(form_consts.Sample.CACHED_RESULTS)
if cached_results != None:
sample = cached_results.get(md5_digest)
else:
sample = Sample.objects(md5=md5_digest).first()
if not sample:
is_sample_new = True
sample = Sample()
sample.filename = filename or md5_digest
sample.md5 = md5_digest
sample.sha1 = sha1_digest
sample.sha256 = sha256_digest
sample.size = size
sample.mimetype = mimetype
else:
if filename not in sample.filenames and filename != sample.filename:
sample.filenames.append(filename)
if cached_results != None:
cached_results[md5_digest] = sample
# attempt to discover binary in GridFS before assuming we don't
# have it
sample.discover_binary()
if data:
# we already have this binary so generate metadata from it
if sample.filedata.grid_id:
sample._generate_file_metadata(data)
# add the binary to gridfs and generate metadata
else:
sample.add_file_data(data)
# if we didn't get data:
else:
if sample.filedata:
# get data from db and add metadata in case it doesn't exist
data = sample.filedata.read()
sample._generate_file_metadata(data)
else:
if md5_digest:
# no data and no binary, add limited metadata
sample.md5 = md5_digest
else:
retVal['message'] += ("The MD5 digest and data, or the file "
"data itself, need to be supplied.")
retVal['success'] = False
if sha1_digest:
sample.sha1 = sha1_digest
if sha256_digest:
sample.sha256 = sha256_digest
#add copy of inherited source(s) to Sample
if isinstance(inherited_source, EmbeddedSource):
sample.add_source(copy.copy(inherited_source))
elif isinstance(inherited_source, list) and len(inherited_source) > 0:
for s in inherited_source:
if isinstance(s, EmbeddedSource):
sample.add_source(copy.copy(s))
# generate new source information and add to sample
if isinstance(source, basestring) and len(source) > 0:
s = create_embedded_source(source,
method=method,
reference=reference,
analyst=user)
# this will handle adding a new source, or an instance automatically
sample.add_source(s)
elif isinstance(source, EmbeddedSource):
sample.add_source(source, method=method, reference=reference)
elif isinstance(source, list) and len(source) > 0:
for s in source:
if isinstance(s, EmbeddedSource):
sample.add_source(s, method=method, reference=reference)
if bucket_list:
sample.add_bucket_list(bucket_list, user)
if ticket:
sample.add_ticket(ticket, user)
# if no proper source has been provided, don't add the sample
if len(sample.source) == 0:
retVal['message'] += "The sample does not have a source."
retVal['success'] = False
elif is_validate_only == False:
# assume it's a list of EmbeddedCampaign, but check if it's a string
# if it is a string then create a new EmbeddedCampaign
if campaign != None:
campaign_array = campaign
if isinstance(campaign, basestring):
campaign_array = [EmbeddedCampaign(name=campaign, confidence=confidence, analyst=user)]
for campaign_item in campaign_array:
sample.add_campaign(campaign_item)
# save sample to get an id since the rest of the processing needs it
sample.save(username=user)
sources = user_sources(user)
if backdoor_name:
# Relate this to the backdoor family if there is one.
backdoor = Backdoor.objects(name=backdoor_name,
source__name__in=sources).first()
if backdoor:
backdoor.add_relationship(sample,
RelationshipTypes.RELATED_TO,
analyst=user)
backdoor.save()
# Also relate to the specific instance backdoor.
if backdoor_version:
backdoor = Backdoor.objects(name=backdoor_name,
version=backdoor_version,
source__name__in=sources).first()
if backdoor:
backdoor.add_relationship(sample,
RelationshipTypes.RELATED_TO,
analyst=user)
backdoor.save()
# reloading clears the _changed_fields of the sample object. this prevents
# situations where we save again below and the shard key (md5) is
# still marked as changed.
sample.reload()
# run sample triage:
if len(AnalysisResult.objects(object_id=str(sample.id))) < 1 and data:
run_triage(sample, user)
# update relationship if a related top-level object is supplied
if related_obj and sample:
if related_obj.id != sample.id: #don't form relationship to itself
if not relationship:
if related_obj._meta['crits_type'] == 'Email':
relationship = RelationshipTypes.CONTAINED_WITHIN
else:
relationship = RelationshipTypes.RELATED_TO
sample.add_relationship(related_obj,
relationship,
analyst=user,
get_rels=False)
sample.save(username=user)
if is_sample_new == True:
# New sample, and successfully uploaded
if is_validate_only == False:
retVal['message'] += ('Success: Added new sample <a href="%s">%s.</a>'
% (reverse('crits.samples.views.detail',
args=[sample.md5.lower()]),
sample.md5.lower()))
# Update Cache
if cached_results != None:
cached_results[sample.md5] = sample
else:
# Duplicate sample, but uploaded anyways
if is_validate_only == False:
message = ('Success: Updated sample <a href="%s">%s.</a>'
% (reverse('crits.samples.views.detail',
args=[sample.md5.lower()]),
sample.md5.lower()))
retVal['message'] += message
retVal['status'] = form_consts.Status.DUPLICATE
retVal['warning'] = message
# Duplicate sample, but only validation
else:
if sample.id != None:
warning_message = ('Warning: Trying to add file [' +
filename + ']'
' when MD5 already exists as file [' +
sample.filename + ']'
'<a href="%s">%s.</a>'
% (reverse('crits.samples.views.detail',
args=[sample.md5.lower()]),
sample.md5.lower()))
retVal['message'] += warning_message
retVal['status'] = form_consts.Status.DUPLICATE
retVal['warning'] = warning_message
if is_return_only_md5 == True:
return md5_digest
else:
retVal['object'] = sample
return retVal
def handle_uploaded_file(f, source, method='', reference='', file_format=None,
password=None, user=None, campaign=None, confidence='low',
related_md5=None, related_id=None, related_type='Sample',
filename=None, md5=None, sha1=None, sha256=None, size=None,
mimetype=None, bucket_list=None, ticket=None,
inherited_source=None, is_validate_only=False,
is_return_only_md5=True, cache={}, backdoor_name=None,
backdoor_version=None):
"""
Handle an uploaded file.
:param f: The uploaded file.
:type f: file handle
:param source: The name of the source that provided the data.
:type source: list, str, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param method: The source method to assign to the data.
:type method: str
:param reference: A reference to the data source.
:type reference: str
:param file_format: The format the file was uploaded in.
:type file_format: str
:param password: A password necessary to access the file data.
:type password: str
:param user: The user uploading this sample.
:type user: str
:param campaign: The campaign to attribute to the data.
:type campaign: str
:param confidence: The confidence level of the campaign attribution.
:type confidence: str ('low', 'medium', 'high')
:param related_md5: The MD5 of a related sample.
:type related_md5: str
:param related_id: The ObjectId of a related top-level object.
:type related_id: str
:param related_type: The type of the related top-level object.
:type related_type: str
:param filename: The filename of the sample.
:type filename: str
:param md5: The MD5 of the sample.
:type md5: str
:param sha1: The SHA1 of the sample.
:type sha1: str
:param sha256: The SHA256 of the sample.
:type sha256: str
:param size; The size of the sample.
:type size: str
:param mimetype: The mimetype of the sample.
:type mimetype: str
:param bucket_list: The bucket(s) to assign to this data.
:type bucket_list: str
:param ticket: The ticket to assign to this data.
:type ticket: str
:param inherited_source: Source(s) to be inherited by the new Sample
:type inherited_source: list, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param is_validate_only: Only validate, do not add.
:type is_validate_only: bool
:param is_return_only_md5: Only return the MD5s.
:type is_return_only_md5: bool
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:param backdoor_name: Name of backdoor to relate this object to.
:type backdoor_name: str
:param backdoor_version: Version of backdoor to relate this object to.
:type backdoor_version: str
:returns: list
"""
samples = list()
if not source:
return [{'success': False, 'message': "Missing source information."}]
if method:
method = " - " + method
if f:
method = "File Upload" + method
elif md5:
method = "Metadata Upload" + method
else:
method = "Upload" + method
try:
data = f.read()
except AttributeError:
data = f
if not filename:
filename = getattr(f, 'name', None)
if not filename:
try:
filename = md5(data).hexdigest()
except:
filename = "unknown"
if file_format == "zip" and f:
return unzip_file(
filename,
user=user,
password=password,
data=data,
source=source,
method=method,
reference=reference,
campaign=campaign,
confidence=confidence,
related_md5=related_md5,
related_id=related_id,
related_type=related_type,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
is_return_only_md5=is_return_only_md5,
backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
else:
new_sample = handle_file(filename, data, source, method, reference,
related_md5=related_md5, related_id=related_id,
related_type=related_type, backdoor='',
user=user, campaign=campaign,
confidence=confidence, md5_digest=md5,
sha1_digest=sha1, sha256_digest=sha256,
size=size, mimetype=mimetype,
bucket_list=bucket_list, ticket=ticket,
inherited_source=inherited_source,
is_validate_only=is_validate_only,
is_return_only_md5=is_return_only_md5,
cache=cache, backdoor_name=backdoor_name,
backdoor_version=backdoor_version)
if new_sample:
samples.append(new_sample)
return samples
def add_new_sample_via_bulk(data, rowData, request, errors, is_validate_only=False, cache={}):
"""
Add a new sample from bulk upload.
:param data: The data about the sample.
:type data: dict
:param rowData: Object data in the row.
:type rowData: dict
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param errors: List of existing errors to append to.
:type errors: list
:param is_validate_only: Only validate, do not add.
:type is_validate_only: bool
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
returns: tuple of result, errors, return value
"""
username = request.user.username
result = False
retVal = {}
retVal['success'] = True
files = None
if request.FILES:
files = request.FILES
#upload_type = data.get('upload_type')
#filedata = data.get('filedata')
filename = data.get('filename')
campaign = data.get('campaign')
confidence = data.get('confidence')
md5 = data.get('md5')
sha1 = data.get('sha1')
sha256 = data.get('sha256')
size = data.get('size')
mimetype = data.get('mimetype')
fileformat = data.get('file_format')
password = data.get('password')
#is_email_results = data.get('email')
related_md5 = data.get('related_md5')
source = data.get('source')
method = data.get('method', '')
reference = data.get('reference')
bucket_list = data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
ticket = data.get(form_consts.Common.TICKET_VARIABLE_NAME)
samples = handle_uploaded_file(files, source, method, reference,
file_format=fileformat,
password=password,
user=username,
campaign=campaign,
confidence=confidence,
related_md5=related_md5,
filename=filename,
md5=md5,
sha1=sha1,
sha256=sha256,
size=size,
mimetype=mimetype,
bucket_list=bucket_list,
ticket=ticket,
is_validate_only=is_validate_only,
is_return_only_md5=False,
cache=cache)
# This block tries to add objects to the item
if not errors or is_validate_only == True:
result = True
objectsData = rowData.get(form_consts.Common.OBJECTS_DATA)
for sample in samples:
# repack message field into top of structure
if retVal.get('message'):
if sample.get('success') == False:
retVal['success'] = False
result = False
errors.append(sample.get('message'))
else:
retVal['message'] += sample.get('message')
else:
if sample.get('success') == False:
retVal['success'] = False
result = False
errors.append(sample.get('message'))
else:
retVal['message'] = sample.get('message')
if sample.get('warning'):
retVal['warning'] = sample.get('warning')
if sample.get('status'):
retVal['status'] = sample.get('status')
# add new objects if they exist
if objectsData:
objectsData = json.loads(objectsData)
for object_row_counter, objectData in enumerate(objectsData, 1):
if sample.get('object') != None and is_validate_only == False:
objectDict = object_array_to_dict(objectData, "Sample",
sample.get('object').id)
else:
if sample.get('object'):
if sample.get('object').id:
objectDict = object_array_to_dict(objectData, "Sample",
sample.get('object').id)
else:
objectDict = object_array_to_dict(objectData, "Sample", "")
else:
objectDict = object_array_to_dict(objectData, "Sample", "")
(object_result, object_errors, object_retVal) = validate_and_add_new_handler_object(
None, objectDict, request, errors, object_row_counter,
is_validate_only=is_validate_only, cache=cache)
# if there was an error, mark the overall
# operation as failed
if object_retVal.get('success') == False:
retVal['success'] = False
result = False
if object_retVal.get('message'):
errors.append(object_retVal['message'])
else:
errors += "Failed to add Sample: " + md5
return result, errors, retVal
def parse_row_to_bound_sample_form(request, rowData, cache, upload_type="File Upload"):
"""
Parse a mass upload row into an UploadFileForm.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param rowData: The data in the row.
:type rowData: dict
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:param upload_type: The type of upload.
:type upload_type: str
:returns: :class:`crits.samples.forms.UploadFileForm`
"""
filedata = None
fileformat = None
password = None
filename = None
md5 = None
sha1 = None
sha256 = None
size = None
mimetype = None
if not upload_type:
upload_type = rowData.get(form_consts.Sample.UPLOAD_TYPE, "")
if upload_type == form_consts.Sample.UploadType.FILE_UPLOAD:
filedata = rowData.get(form_consts.Sample.FILE_DATA, "")
fileformat = rowData.get(form_consts.Sample.FILE_FORMAT, "")
password = rowData.get(form_consts.Sample.PASSWORD, "")
elif upload_type == form_consts.Sample.UploadType.METADATA_UPLOAD:
filename = rowData.get(form_consts.Sample.FILE_NAME, "")
md5 = rowData.get(form_consts.Sample.MD5, "")
sha1 = rowData.get(form_consts.Sample.SHA1, "")
sha256 = rowData.get(form_consts.Sample.SHA256, "")
size = rowData.get(form_consts.Sample.SIZE, 0)
mimetype = rowData.get(form_consts.Sample.MIMETYPE, "")
campaign = rowData.get(form_consts.Sample.CAMPAIGN, "")
confidence = rowData.get(form_consts.Sample.CAMPAIGN_CONFIDENCE, "")
is_email_results = convert_string_to_bool(rowData.get(form_consts.Sample.EMAIL_RESULTS, ""))
related_md5 = rowData.get(form_consts.Sample.RELATED_MD5, "")
source = rowData.get(form_consts.Sample.SOURCE, "")
method = rowData.get(form_consts.Sample.SOURCE_METHOD, "")
reference = rowData.get(form_consts.Sample.SOURCE_REFERENCE, "")
bucket_list = rowData.get(form_consts.Sample.BUCKET_LIST, "")
ticket = rowData.get(form_consts.Common.TICKET, "")
data = {
'upload_type': upload_type,
'filedata': filedata,
'filename': filename,
'md5': md5,
'sha1': sha1,
'sha256': sha256,
'size': size,
'mimetype': mimetype,
'file_format': fileformat,
'campaign': campaign,
'confidence': confidence,
'password': password,
'email': is_email_results,
'related_md5': related_md5,
'source': source,
'method': method,
'reference': reference,
'bucket_list': bucket_list,
'ticket': ticket
}
bound_md5_sample_form = cache.get('sample_form')
if bound_md5_sample_form == None:
bound_md5_sample_form = UploadFileForm(request.user, data, request.FILES)
cache['sample_form'] = bound_md5_sample_form
else:
bound_md5_sample_form.data = data
bound_md5_sample_form.full_clean()
return bound_md5_sample_form
def parse_row_to_bound_md5_sample_form(request, rowData, cache):
"""
Parse a mass upload row into an UploadFileForm.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param rowData: The data in the row.
:type rowData: dict
:param cache: Cached data, typically for performance enhancements
during bulk operations.
:type cache: dict
:returns: :class:`crits.samples.forms.UploadFileForm`
"""
upload_type = form_consts.Sample.UploadType.METADATA_UPLOAD
return parse_row_to_bound_sample_form(request, rowData, cache, upload_type=upload_type)
def process_bulk_add_md5_sample(request, formdict):
"""
Performs the bulk add of MD5 samples by parsing the request data. Batches
some data into a cache object for performance by reducing large
amounts of single database queries.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param formdict: The form representing the bulk uploaded data.
:type formdict: dict
:returns: :class:`django.http.HttpResponse`
"""
md5_samples = []
cached_results = {}
cleanedRowsData = convert_handsontable_to_rows(request)
for rowData in cleanedRowsData:
if rowData != None and rowData.get(form_consts.Sample.MD5) != None:
md5_samples.append(rowData.get(form_consts.Sample.MD5).lower())
md5_results = Sample.objects(md5__in=md5_samples)
for md5_result in md5_results:
cached_results[md5_result.md5] = md5_result
cache = {form_consts.Sample.CACHED_RESULTS: cached_results, 'cleaned_rows_data': cleanedRowsData}
response = parse_bulk_upload(request, parse_row_to_bound_md5_sample_form, add_new_sample_via_bulk, formdict, cache)
return response
def update_sample_filename(id_, filename, analyst):
"""
Update a Sample filename.
:param id_: ObjectId of the Sample.
:type id_: str
:param filename: The new filename.
:type filename: str
:param analyst: The user setting the new filename.
:type analyst: str
:returns: dict with key 'success' (boolean) and 'message' (str) if failed.
"""
if not filename:
return {'success': False, 'message': "No filename to change"}
sample = Sample.objects(id=id_).first()
if not sample:
return {'success': False, 'message': "No sample to change"}
sample.filename = filename.strip()
try:
sample.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': e}
def modify_sample_filenames(id_, tags, analyst):
"""
Modify the filenames for a Sample.
:param id_: ObjectId of the Sample.
:type id_: str
:param tags: The new filenames.
:type tags: list
:param analyst: The user setting the new filenames.
:type analyst: str
:returns: dict with key 'success' (boolean) and 'message' (str) if failed.
"""
sample = Sample.objects(id=id_).first()
if sample:
sample.set_filenames(tags)
try:
sample.save(username=analyst)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': "Invalid value: %s" % e}
else:
return {'success': False}
|
|
# B's Battle Ship - this version creates player board and randomly generates fleet for the enemy.
######################### SET UP #################################
# import appropriate modules, initialize program,
# and set initial values for parameters
import pygame
from pygame.locals import *
import random
import sys
import math
pygame.init()
pygame.font.init()
canvas_size = (1250, 750)
canv_center = [int(canvas_size[0]/2),int(canvas_size[1]/2)]
GRID_SIZE = 25 # We define the canvas in terms of a 50 x 30 grid, each square is 25x25 pixels
running = True
# Color calls:
GREEN = (0,128,0)
WHITE = (255,255,255)
RED = (255,0,0)
BLUE = (0,0,255)
BLACK = (0,0,0)
YELLOW = (255,255,0)
GREY = (96,96,96)
ship = set() # this will hold the currently selected ship
message = "B's Battleship Game"
# Fonts
# Bf = pygame.font.SysFont("Fonts\times.ttf", 80, False, False )
# mf = pygame.font.SysFont("Fonts\times.ttf", 40, False, False )
# sf = pygame.font.SysFont("Fonts\times.ttf", 20, False, False )
Bf = pygame.font.SysFont(None, 80, False, False )
mf = pygame.font.SysFont(None, 40, False, False )
sf = pygame.font.SysFont(None, 25, False, False )
# helper functions
def text(string, font = sf, color = WHITE):
### returns surface consisting of text in the font and color specified. May then be blitted.
return font.render(string,0,color)
# Class Definitions
class Ship:
"""
Ship class defines a single ship. It requires a starting position, a color, and an orientation 0 = horizontal and 1 = vertical
"""
def __init__(self,length,berth_pos, color = WHITE,orientation = 0, name = "a_ship"):
self.berth_pos = berth_pos #location of the helm in pixels ############################### change this to reference the grid?
self.helm_pos = list(berth_pos)
self.lilength = length
self.length = length*GRID_SIZE
self.orientation = orientation # 0 = default horizontal orientation, 1 will mean vertical
self.init_color = color
self.color = tuple(color)
self.width = GRID_SIZE
self.coord = set() #ships coordinates are the set of numbers in 0..99 that belong to the squares occupied by the ship
self.name = name
def rect(self): # returns a rectangular representation of the ship
o = self.orientation
g = GRID_SIZE
return Rect(self.helm_pos,[(1-o)*self.length + o*g,o*self.length + (1-o)*g])
def __str__(self): # returns the grid position of the ship
string = self.name + ": ship's helm = [" + str(self.helm_pos[0]) + ", " + str(self.helm_pos[1]) + "]"
return string
def draw(self): # draws a rectangle with the helm as one fixed corner using correct orientation
o = self.orientation
g = GRID_SIZE
rect = Rect(self.helm_pos,[(1-o)*self.length + o*g,o*self.length + (1-o)*g])
pygame.draw.rect(canvas,self.color,rect)
def flip(self): # changes orientation
self.orientation = (self.orientation + 1)%2
def inside(self,pos): # is the pos position inside the ship?
ships_inside = self.rect()
return ships_inside.collidepoint(pos[0],pos[1])
def nset(self,n): # returns a list indicating position of ship with respect to the 0..99 numbering of the board
lset = set()
for i in range(0,self.lilength):
lset.add(n + i*10**self.orientation)
return lset
def get_coordinates(self):
return self.coord
def update_coordinates(self, lset): #ships coordinates are the set of numbers in 0..99 that belong to the squares occupied by the ship
self.coord.update(lset)
class Fleet:
"""
Fleet class creates a fleet for the player berthed to the side for placement
and for the enemy (the computer) randomly set onto the enemy board
"""
def __init__(self,visible = True,name = "a_fleet"):
if visible == True:
self.carrier = Ship(5,[3*GRID_SIZE,8*GRID_SIZE],GREEN,0, "Air-craft carrier")
self.battleship = Ship(4,[3*GRID_SIZE,10*GRID_SIZE],WHITE,0, "Battleship")
self.destroyer = Ship(3,[3*GRID_SIZE,12*GRID_SIZE],GREY,0, "Destroyer")
self.submarine = Ship(3,[3*GRID_SIZE,14*GRID_SIZE],BLACK,0,"Submarine")
self.patrol = Ship(2,[3*GRID_SIZE,16*GRID_SIZE],YELLOW,0,"Patrol Boat")
self.name = name
else:
self.carrier = create_ship(5, enemy_board, "Air-craft carrier")
self.battleship = create_ship(4, enemy_board,"Battleship" )
self.destroyer = create_ship(3, enemy_board,"Destroyer")
self.submarine = create_ship(3, enemy_board,"Submarine")
self.patrol = create_ship(2, enemy_board,"Patrol Boat")
self.name = name
self.fleet = set([self.carrier,self.battleship,self.destroyer,self.submarine,self.patrol])
def __str__(self):
for ship in self.fleet:
print(ship)
return self.name
def draw(self):
for boat in self.fleet:
boat.draw()
def fleet_used(self): # the set of squares used by the fleet
lset = set()
for boat in self.fleet:
lset.update(boat.coord)
return lset
def select(self,pos):
# checks to see if the position given by pos is inside one of the fleet's boats and returns that boat
for boat in self.fleet:
if boat.inside(pos):
return boat
return set()
class Board:
"""
Board class defines a 10 x 10 grid using GRID_SIZE to determine the size of the squares.
methods draw the board and keep track of which squares are occupied
grid positions x,y lie in [0..9, 0..9]
"""
def __init__(self,origin = [0,0]):
self.origin = origin
self.outline = Rect(self.origin,[10*GRID_SIZE,10*GRID_SIZE])
self.unused = set()
for i in range(0,100):
self.unused.add(i)
self.used = set()
def draw(self):
for i in range(0,11):
pygame.draw.line(canvas,WHITE,self.v(i,0),self.v(i,10))
pygame.draw.line(canvas,WHITE,self.v(0,i),self.v(10,i))
pygame.draw.rect(canvas,RED,self.outline,2)
def chk_if_available(self,list):
value = set(list) <= self.unused
return value
def grid(self,pos): # Given pixel coordinates return grid position
return [(pos[0] - self.origin[0])//GRID_SIZE,(pos[1] - self.origin[1])//GRID_SIZE]
def invgrid(self,pos): # Given grid position return pixel coordinates pos is a list
return [pos[0]*GRID_SIZE + self.origin[0],pos[1]*GRID_SIZE + self.origin[1]]
def v(self,x,y): # returns pixel position given grid position given two number positions
return [self.origin[0] + x*GRID_SIZE, self.origin[1] + y*GRID_SIZE]
def get_used(self):
return self.used
def get_unused(self):
return self.unused
def update_used(self,lset):
self.used.update(lset)
def update_unused(self,lset):
self.unused.update(lset)
def remove_used(self,lset):
self.used -= lset
def remove_unused(self,lset):
self.unused -= lset
def select_square(self,pos): # return True if the square is occupied
x = pos[0]
y = pos[1]
###### Event Handlers and Needed Functions
def redraw_screen():
### blits the objects to the frame
global canvas, ship
canvas.fill(BLUE)
canvas.blit(text(message,Bf,WHITE),[10*GRID_SIZE,GRID_SIZE])
canvas.blit(text(player_header,sf,WHITE),[9*GRID_SIZE,6*GRID_SIZE])
canvas.blit(text(player_header2,sf,WHITE),[8 *GRID_SIZE,7 * GRID_SIZE])
canvas.blit(text(enemy_header,sf,WHITE),[27*GRID_SIZE,6*GRID_SIZE])
if ship != set():
cursor_pos = pygame.mouse.get_pos()
for i in range(0,2):
ship.helm_pos[i] = cursor_pos[i]-move_vector[i]
friendly_board.draw()
friendly.draw()
enemy_board.draw()
enemy.draw()
canvas.blit(text(big_message,Bf,WHITE),[5*GRID_SIZE,20*GRID_SIZE])
pygame.display.flip()
def create_ship(n, board = Board([25*GRID_SIZE,8*GRID_SIZE]), name = "a_ship"):
# generates a set containing all possible places a ship of size n could go
# on the given board
# and then randomly choose one of these to be the position of the ship.
# It updates the used and unused squares on the board and
# returns the helm position of the ship
avail = []
for y in range(0,10):
for x in range(0,11-n):
poss = []
for j in range(0,n):
poss.append(y*10 + x +j)
if set(poss) <= board.unused:
avail.append(poss)
for x in range(0,10):
for y in range(0,11-n):
poss = []
for j in range(0,n):
poss.append(x + 10*(y+j))
if set(poss) <= board.unused:
avail.append(poss)
lset = random.choice(avail)
pos = [lset[0]%10,lset[0]//10]
lship = Ship(n,board.invgrid(pos),(67 + lset[0],80 + lset[0],80 + lset[0]),1-(lset[1]%10 - lset[0]%10), name) # all the fancy formulas are to get shades of grey
#########when we play we need to change this to blue
lship.update_coordinates(lship.nset(lset[0])) # ships coordinates are the set of numbers in 0..99 that belong to the squares occupied by the ship
board.update_used(lship.coord)
board.remove_unused(lship.coord) # this would have been better as a single function which updates and removes
# print(name, lset)
return lship
############### RUN THE PROGRAM ######################
canvas = pygame.display.set_mode(canvas_size,DOUBLEBUF|RESIZABLE)
pygame.display.set_caption("B's Battleship Game")
canvas.fill((255,0,0))
friendly_board = Board([10*GRID_SIZE,8*GRID_SIZE])
enemy_board = Board([25*GRID_SIZE,8*GRID_SIZE])
message = "B's Battleship Game"
player_header = "Friendly Waters - drag your boats here"
player_header2 = "Press <space> to change orientation of boat."
enemy_header = "Enemy Waters"
friendly = Fleet(True,"Friendly Fleet")
print( friendly)
print()
enemy = Fleet(False, "Enemy Fleet")
print( enemy)
ready = False
big_message = ""
redraw_screen()
while running == True:
if len(friendly_board.get_used()) == 17:
big_message = "Press the 'p' key to begin your sea battle."
ready = True
#redraw_screen()
pygame.event.pump()
event=pygame.event.wait()
if event.type==QUIT:
pygame.display.quit()
running = False
sys.exit(0)
elif event.type==VIDEORESIZE:
canvas=pygame.display.set_mode(event.dict['size'],DOUBLEBUF|RESIZABLE)
#redraw_screen()
# canvas.fill((0,128,0))
# canvas.blit(text(message,mf),(25,25))
elif event.type == pygame.KEYDOWN:
if event.key == K_SPACE and ship != set():
ship.flip()
elif event.key == K_p and ready == True:
running = False
big_message = ""
elif event.type == MOUSEBUTTONDOWN:
new_pos = pygame.mouse.get_pos()
ship = friendly.select(new_pos)
if ship == set():
pass
else:
if ship.coord != set():
friendly_board.used.difference_update(ship.coord)
friendly_board.unused.update(ship.coord)
print("ship coord = ",ship.coord)
print("used = ", friendly_board.used)
print("unused = ",friendly_board.unused)
ship.color = RED
move_vector = [0,0]
for i in range(0,2):
move_vector[i] = new_pos[i] - ship.helm_pos[i]
elif event.type == MOUSEBUTTONUP and ship != set():
# if one of the ships is selected then deselect and determine where it should rest
ship.color = ship.init_color
#col = (col +1)%2
if friendly_board.outline.contains(ship.rect()):
x = (ship.helm_pos[0]+10 - friendly_board.origin[0])//GRID_SIZE
y = (ship.helm_pos[1]+10 - friendly_board.origin[1])//GRID_SIZE
pos = 10*y+x
if friendly_board.chk_if_available(ship.nset(pos)):
# if x,y available for ship's helm then place them there
print(friendly_board.chk_if_available(ship.nset(pos)))
ship.helm_pos[0] = friendly_board.origin[0] + GRID_SIZE*x
ship.helm_pos[1] = friendly_board.origin[1] + GRID_SIZE*y
friendly_board.unused.difference_update(ship.nset(pos))
friendly_board.used.update(ship.nset(pos))
ship.coord = ship.nset(pos)
print("ship coord = ",ship.coord)
print("used = ", friendly_board.used)
print("unused = ",friendly_board.unused)
ship = set()
move_vector = [0,0]
else:
ship.helm_pos = list(ship.berth_pos)
ship.coord = set()
ship.orientation = 0
ship = set()
else:
ship.helm_pos = list(ship.berth_pos)
ship.coord = set()
ship.orientation = 0
ship = set()
redraw_screen()
game_play = True
while game_play == True:
pygame.event.pump()
event=pygame.event.wait()
if event.type==QUIT:
pygame.display.quit()
running = game_play = False
sys.exit(0)
|
|
"""
The ``modelchain`` module contains functions and classes that combine
many of the PV power modeling steps. These tools make it easy to
get started with pvlib and demonstrate standard ways to use the
library. With great power comes great responsibility: users should take
the time to read the source code for the module.
"""
from functools import partial
import warnings
from pvlib import (atmosphere, clearsky, pvsystem, solarposition, temperature,
tools)
from pvlib.tracking import SingleAxisTracker
import pvlib.irradiance # avoid name conflict with full import
from pvlib.pvsystem import _DC_MODEL_PARAMS
from pvlib._deprecation import pvlibDeprecationWarning
def basic_chain(times, latitude, longitude,
module_parameters, temperature_model_parameters,
inverter_parameters,
irradiance=None, weather=None,
surface_tilt=None, surface_azimuth=None,
orientation_strategy=None,
transposition_model='haydavies',
solar_position_method='nrel_numpy',
airmass_model='kastenyoung1989',
altitude=None, pressure=None,
**kwargs):
"""
An experimental function that computes all of the modeling steps
necessary for calculating power or energy for a PV system at a given
location.
Parameters
----------
times : DatetimeIndex
Times at which to evaluate the model.
latitude : float.
Positive is north of the equator.
Use decimal degrees notation.
longitude : float.
Positive is east of the prime meridian.
Use decimal degrees notation.
module_parameters : None, dict or Series
Module parameters as defined by the SAPM. See pvsystem.sapm for
details.
temperature_model_parameters : None, dict or Series.
Temperature model parameters as defined by the SAPM.
See temperature.sapm_cell for details.
inverter_parameters : None, dict or Series
Inverter parameters as defined by the CEC. See pvsystem.snlinverter for
details.
irradiance : None or DataFrame, default None
If None, calculates clear sky data.
Columns must be 'dni', 'ghi', 'dhi'.
weather : None or DataFrame, default None
If None, assumes air temperature is 20 C and
wind speed is 0 m/s.
Columns must be 'wind_speed', 'temp_air'.
surface_tilt : None, float or Series, default None
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : None, float or Series, default None
Surface azimuth angles in decimal degrees.
The azimuth convention is defined
as degrees east of north
(North=0, South=180, East=90, West=270).
orientation_strategy : None or str, default None
The strategy for aligning the modules.
If not None, sets the ``surface_azimuth`` and ``surface_tilt``
properties of the ``system``. Allowed strategies include 'flat',
'south_at_latitude_tilt'. Ignored for SingleAxisTracker systems.
transposition_model : str, default 'haydavies'
Passed to system.get_irradiance.
solar_position_method : str, default 'nrel_numpy'
Passed to solarposition.get_solarposition.
airmass_model : str, default 'kastenyoung1989'
Passed to atmosphere.relativeairmass.
altitude : None or float, default None
If None, computed from pressure. Assumed to be 0 m
if pressure is also None.
pressure : None or float, default None
If None, computed from altitude. Assumed to be 101325 Pa
if altitude is also None.
**kwargs
Arbitrary keyword arguments.
See code for details.
Returns
-------
output : (dc, ac)
Tuple of DC power (with SAPM parameters) (DataFrame) and AC
power (Series).
"""
# use surface_tilt and surface_azimuth if provided,
# otherwise set them using the orientation_strategy
if surface_tilt is not None and surface_azimuth is not None:
pass
elif orientation_strategy is not None:
surface_tilt, surface_azimuth = \
get_orientation(orientation_strategy, latitude=latitude)
else:
raise ValueError('orientation_strategy or surface_tilt and '
'surface_azimuth must be provided')
if altitude is None and pressure is None:
altitude = 0.
pressure = 101325.
elif altitude is None:
altitude = atmosphere.pres2alt(pressure)
elif pressure is None:
pressure = atmosphere.alt2pres(altitude)
solar_position = solarposition.get_solarposition(
times, latitude, longitude, altitude=altitude, pressure=pressure,
method=solar_position_method, **kwargs)
# possible error with using apparent zenith with some models
airmass = atmosphere.get_relative_airmass(
solar_position['apparent_zenith'], model=airmass_model)
airmass = atmosphere.get_absolute_airmass(airmass, pressure)
dni_extra = pvlib.irradiance.get_extra_radiation(solar_position.index)
aoi = pvlib.irradiance.aoi(surface_tilt, surface_azimuth,
solar_position['apparent_zenith'],
solar_position['azimuth'])
if irradiance is None:
linke_turbidity = clearsky.lookup_linke_turbidity(
solar_position.index, latitude, longitude)
irradiance = clearsky.ineichen(
solar_position['apparent_zenith'],
airmass,
linke_turbidity,
altitude=altitude,
dni_extra=dni_extra
)
total_irrad = pvlib.irradiance.get_total_irradiance(
surface_tilt,
surface_azimuth,
solar_position['apparent_zenith'],
solar_position['azimuth'],
irradiance['dni'],
irradiance['ghi'],
irradiance['dhi'],
model=transposition_model,
dni_extra=dni_extra)
if weather is None:
weather = {'wind_speed': 0, 'temp_air': 20}
cell_temperature = temperature.sapm_cell(
total_irrad['poa_global'], weather['temp_air'], weather['wind_speed'],
temperature_model_parameters['a'], temperature_model_parameters['b'],
temperature_model_parameters['deltaT'])
effective_irradiance = pvsystem.sapm_effective_irradiance(
total_irrad['poa_direct'], total_irrad['poa_diffuse'], airmass, aoi,
module_parameters)
dc = pvsystem.sapm(effective_irradiance, cell_temperature,
module_parameters)
ac = pvsystem.snlinverter(dc['v_mp'], dc['p_mp'], inverter_parameters)
return dc, ac
def get_orientation(strategy, **kwargs):
"""
Determine a PV system's surface tilt and surface azimuth
using a named strategy.
Parameters
----------
strategy: str
The orientation strategy.
Allowed strategies include 'flat', 'south_at_latitude_tilt'.
**kwargs:
Strategy-dependent keyword arguments. See code for details.
Returns
-------
surface_tilt, surface_azimuth
"""
if strategy == 'south_at_latitude_tilt':
surface_azimuth = 180
surface_tilt = kwargs['latitude']
elif strategy == 'flat':
surface_azimuth = 180
surface_tilt = 0
else:
raise ValueError('invalid orientation strategy. strategy must '
'be one of south_at_latitude, flat,')
return surface_tilt, surface_azimuth
class ModelChain(object):
"""
The ModelChain class to provides a standardized, high-level
interface for all of the modeling steps necessary for calculating PV
power from a time series of weather inputs.
See https://pvlib-python.readthedocs.io/en/stable/modelchain.html
for examples.
Parameters
----------
system : PVSystem
A :py:class:`~pvlib.pvsystem.PVSystem` object that represents
the connected set of modules, inverters, etc.
location : Location
A :py:class:`~pvlib.location.Location` object that represents
the physical location at which to evaluate the model.
orientation_strategy : None or str, default None
The strategy for aligning the modules. If not None, sets the
``surface_azimuth`` and ``surface_tilt`` properties of the
``system``. Allowed strategies include 'flat',
'south_at_latitude_tilt'. Ignored for SingleAxisTracker systems.
clearsky_model : str, default 'ineichen'
Passed to location.get_clearsky.
transposition_model : str, default 'haydavies'
Passed to system.get_irradiance.
solar_position_method : str, default 'nrel_numpy'
Passed to location.get_solarposition.
airmass_model : str, default 'kastenyoung1989'
Passed to location.get_airmass.
dc_model: None, str, or function, default None
If None, the model will be inferred from the contents of
system.module_parameters. Valid strings are 'sapm',
'desoto', 'cec', 'pvsyst', 'pvwatts'. The ModelChain instance will
be passed as the first argument to a user-defined function.
ac_model: None, str, or function, default None
If None, the model will be inferred from the contents of
system.inverter_parameters and system.module_parameters. Valid
strings are 'snlinverter', 'adrinverter', 'pvwatts'. The
ModelChain instance will be passed as the first argument to a
user-defined function.
aoi_model: None, str, or function, default None
If None, the model will be inferred from the contents of
system.module_parameters. Valid strings are 'physical',
'ashrae', 'sapm', 'martin_ruiz', 'no_loss'. The ModelChain instance
will be passed as the first argument to a user-defined function.
spectral_model: None, str, or function, default None
If None, the model will be inferred from the contents of
system.module_parameters. Valid strings are 'sapm',
'first_solar', 'no_loss'. The ModelChain instance will be passed
as the first argument to a user-defined function.
temperature_model: None, str or function, default None
Valid strings are 'sapm' and 'pvsyst'. The ModelChain instance will be
passed as the first argument to a user-defined function.
losses_model: str or function, default 'no_loss'
Valid strings are 'pvwatts', 'no_loss'. The ModelChain instance
will be passed as the first argument to a user-defined function.
name: None or str, default None
Name of ModelChain instance.
**kwargs
Arbitrary keyword arguments. Included for compatibility, but not
used.
"""
def __init__(self, system, location,
orientation_strategy=None,
clearsky_model='ineichen',
transposition_model='haydavies',
solar_position_method='nrel_numpy',
airmass_model='kastenyoung1989',
dc_model=None, ac_model=None, aoi_model=None,
spectral_model=None, temperature_model=None,
losses_model='no_loss', name=None, **kwargs):
self.name = name
self.system = system
self.location = location
self.clearsky_model = clearsky_model
self.transposition_model = transposition_model
self.solar_position_method = solar_position_method
self.airmass_model = airmass_model
# calls setters
self.dc_model = dc_model
self.ac_model = ac_model
self.aoi_model = aoi_model
self.spectral_model = spectral_model
# TODO: deprecated kwarg temp_model. Remove use of temp_model in v0.8
temp_model = kwargs.pop('temp_model', None)
if temp_model is not None:
if temperature_model is None:
warnings.warn('The temp_model keyword argument is deprecated.'
' Use temperature_model instead',
pvlibDeprecationWarning)
temperature_model = temp_model
elif temp_model == temperature_model:
warnings.warn('Provide only one of temperature_model or '
'temp_model (deprecated).',
pvlibDeprecationWarning)
else:
raise ValueError(
'Conflicting temperature_model {} and temp_model {}. '
'temp_model is deprecated. Specify only temperature_model.'
.format(temperature_model, temp_model))
self.temperature_model = temperature_model
self.losses_model = losses_model
self.orientation_strategy = orientation_strategy
self.weather = None
self.times = None
self.solar_position = None
def __repr__(self):
attrs = [
'name', 'orientation_strategy', 'clearsky_model',
'transposition_model', 'solar_position_method',
'airmass_model', 'dc_model', 'ac_model', 'aoi_model',
'spectral_model', 'temperature_model', 'losses_model'
]
def getmcattr(self, attr):
"""needed to avoid recursion in property lookups"""
out = getattr(self, attr)
try:
out = out.__name__
except AttributeError:
pass
return out
return ('ModelChain: \n ' + '\n '.join(
('{}: {}'.format(attr, getmcattr(self, attr)) for attr in attrs)))
@property
def orientation_strategy(self):
return self._orientation_strategy
@orientation_strategy.setter
def orientation_strategy(self, strategy):
if strategy == 'None':
strategy = None
if strategy is not None:
self.system.surface_tilt, self.system.surface_azimuth = \
get_orientation(strategy, latitude=self.location.latitude)
self._orientation_strategy = strategy
@property
def dc_model(self):
return self._dc_model
@dc_model.setter
def dc_model(self, model):
# guess at model if None
if model is None:
self._dc_model, model = self.infer_dc_model()
# Set model and validate parameters
if isinstance(model, str):
model = model.lower()
if model in _DC_MODEL_PARAMS.keys():
# validate module parameters
missing_params = _DC_MODEL_PARAMS[model] - \
set(self.system.module_parameters.keys())
if missing_params: # some parameters are not in module.keys()
raise ValueError(model + ' selected for the DC model but '
'one or more required parameters are '
'missing : ' + str(missing_params))
if model == 'sapm':
self._dc_model = self.sapm
elif model == 'desoto':
self._dc_model = self.desoto
elif model == 'cec':
self._dc_model = self.cec
elif model == 'pvsyst':
self._dc_model = self.pvsyst
elif model == 'pvwatts':
self._dc_model = self.pvwatts_dc
else:
raise ValueError(model + ' is not a valid DC power model')
else:
self._dc_model = partial(model, self)
def infer_dc_model(self):
params = set(self.system.module_parameters.keys())
if set(['A0', 'A1', 'C7']) <= params:
return self.sapm, 'sapm'
elif set(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref',
'R_s', 'Adjust']) <= params:
return self.cec, 'cec'
elif set(['a_ref', 'I_L_ref', 'I_o_ref', 'R_sh_ref',
'R_s']) <= params:
return self.desoto, 'desoto'
elif set(['gamma_ref', 'mu_gamma', 'I_L_ref', 'I_o_ref',
'R_sh_ref', 'R_sh_0', 'R_sh_exp', 'R_s']) <= params:
return self.pvsyst, 'pvsyst'
elif set(['pdc0', 'gamma_pdc']) <= params:
return self.pvwatts_dc, 'pvwatts'
else:
raise ValueError('could not infer DC model from '
'system.module_parameters. Check '
'system.module_parameters or explicitly '
'set the model with the dc_model kwarg.')
def sapm(self):
self.dc = self.system.sapm(self.effective_irradiance/1000.,
self.cell_temperature)
self.dc = self.system.scale_voltage_current_power(self.dc)
return self
def _singlediode(self, calcparams_model_function):
(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) = (
calcparams_model_function(self.effective_irradiance,
self.cell_temperature))
self.diode_params = (photocurrent, saturation_current,
resistance_series,
resistance_shunt, nNsVth)
self.dc = self.system.singlediode(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth)
self.dc = self.system.scale_voltage_current_power(self.dc).fillna(0)
return self
def desoto(self):
return self._singlediode(self.system.calcparams_desoto)
def cec(self):
return self._singlediode(self.system.calcparams_cec)
def pvsyst(self):
return self._singlediode(self.system.calcparams_pvsyst)
def pvwatts_dc(self):
self.dc = self.system.pvwatts_dc(self.effective_irradiance,
self.cell_temperature)
return self
@property
def ac_model(self):
return self._ac_model
@ac_model.setter
def ac_model(self, model):
if model is None:
self._ac_model = self.infer_ac_model()
elif isinstance(model, str):
model = model.lower()
if model == 'snlinverter':
self._ac_model = self.snlinverter
elif model == 'adrinverter':
self._ac_model = self.adrinverter
elif model == 'pvwatts':
self._ac_model = self.pvwatts_inverter
else:
raise ValueError(model + ' is not a valid AC power model')
else:
self._ac_model = partial(model, self)
def infer_ac_model(self):
inverter_params = set(self.system.inverter_parameters.keys())
if set(['C0', 'C1', 'C2']) <= inverter_params:
return self.snlinverter
elif set(['ADRCoefficients']) <= inverter_params:
return self.adrinverter
elif set(['pdc0']) <= inverter_params:
return self.pvwatts_inverter
else:
raise ValueError('could not infer AC model from '
'system.inverter_parameters. Check '
'system.inverter_parameters or explicitly '
'set the model with the ac_model kwarg.')
def snlinverter(self):
self.ac = self.system.snlinverter(self.dc['v_mp'], self.dc['p_mp'])
return self
def adrinverter(self):
self.ac = self.system.adrinverter(self.dc['v_mp'], self.dc['p_mp'])
return self
def pvwatts_inverter(self):
self.ac = self.system.pvwatts_ac(self.dc).fillna(0)
return self
@property
def aoi_model(self):
return self._aoi_model
@aoi_model.setter
def aoi_model(self, model):
if model is None:
self._aoi_model = self.infer_aoi_model()
elif isinstance(model, str):
model = model.lower()
if model == 'ashrae':
self._aoi_model = self.ashrae_aoi_loss
elif model == 'physical':
self._aoi_model = self.physical_aoi_loss
elif model == 'sapm':
self._aoi_model = self.sapm_aoi_loss
elif model == 'martin_ruiz':
self._aoi_model = self.martin_ruiz_aoi_loss
elif model == 'no_loss':
self._aoi_model = self.no_aoi_loss
else:
raise ValueError(model + ' is not a valid aoi loss model')
else:
self._aoi_model = partial(model, self)
def infer_aoi_model(self):
params = set(self.system.module_parameters.keys())
if set(['K', 'L', 'n']) <= params:
return self.physical_aoi_loss
elif set(['B5', 'B4', 'B3', 'B2', 'B1', 'B0']) <= params:
return self.sapm_aoi_loss
elif set(['b']) <= params:
return self.ashrae_aoi_loss
elif set(['a_r']) <= params:
return self.martin_ruiz_aoi_loss
else:
raise ValueError('could not infer AOI model from '
'system.module_parameters. Check that the '
'system.module_parameters contain parameters for '
'the physical, aoi, ashrae or martin_ruiz model; '
'explicitly set the model with the aoi_model '
'kwarg; or set aoi_model="no_loss".')
def ashrae_aoi_loss(self):
self.aoi_modifier = self.system.get_iam(self.aoi, iam_model='ashrae')
return self
def physical_aoi_loss(self):
self.aoi_modifier = self.system.get_iam(self.aoi, iam_model='physical')
return self
def sapm_aoi_loss(self):
self.aoi_modifier = self.system.get_iam(self.aoi, iam_model='sapm')
return self
def martin_ruiz_aoi_loss(self):
self.aoi_modifier = self.system.get_iam(self.aoi,
iam_model='martin_ruiz')
return self
def no_aoi_loss(self):
self.aoi_modifier = 1.0
return self
@property
def spectral_model(self):
return self._spectral_model
@spectral_model.setter
def spectral_model(self, model):
if model is None:
self._spectral_model = self.infer_spectral_model()
elif isinstance(model, str):
model = model.lower()
if model == 'first_solar':
self._spectral_model = self.first_solar_spectral_loss
elif model == 'sapm':
self._spectral_model = self.sapm_spectral_loss
elif model == 'no_loss':
self._spectral_model = self.no_spectral_loss
else:
raise ValueError(model + ' is not a valid spectral loss model')
else:
self._spectral_model = partial(model, self)
def infer_spectral_model(self):
params = set(self.system.module_parameters.keys())
if set(['A4', 'A3', 'A2', 'A1', 'A0']) <= params:
return self.sapm_spectral_loss
elif ((('Technology' in params or
'Material' in params) and
(self.system._infer_cell_type() is not None)) or
'first_solar_spectral_coefficients' in params):
return self.first_solar_spectral_loss
else:
raise ValueError('could not infer spectral model from '
'system.module_parameters. Check that the '
'system.module_parameters contain valid '
'first_solar_spectral_coefficients, a valid '
'Material or Technology value, or set '
'spectral_model="no_loss".')
def first_solar_spectral_loss(self):
self.spectral_modifier = self.system.first_solar_spectral_loss(
self.weather['precipitable_water'],
self.airmass['airmass_absolute'])
return self
def sapm_spectral_loss(self):
self.spectral_modifier = self.system.sapm_spectral_loss(
self.airmass['airmass_absolute'])
return self
def no_spectral_loss(self):
self.spectral_modifier = 1
return self
@property
def temperature_model(self):
return self._temperature_model
@temperature_model.setter
def temperature_model(self, model):
if model is None:
self._temperature_model = self.infer_temperature_model()
elif isinstance(model, str):
model = model.lower()
if model == 'sapm':
self._temperature_model = self.sapm_temp
elif model == 'pvsyst':
self._temperature_model = self.pvsyst_temp
else:
raise ValueError(model + ' is not a valid temperature model')
# check system.temperature_model_parameters for consistency
name_from_params = self.infer_temperature_model().__name__
if self._temperature_model.__name__ != name_from_params:
raise ValueError(
'Temperature model {} is inconsistent with '
'PVsystem.temperature_model_parameters {}'.format(
self._temperature_model.__name__,
self.system.temperature_model_parameters))
else:
self._temperature_model = partial(model, self)
def infer_temperature_model(self):
params = set(self.system.temperature_model_parameters.keys())
if set(['a', 'b', 'deltaT']) <= params:
return self.sapm_temp
elif set(['u_c', 'u_v']) <= params:
return self.pvsyst_temp
else:
raise ValueError('could not infer temperature model from '
'system.temperature_module_parameters {}.'
.format(self.system.temperature_model_parameters))
def sapm_temp(self):
self.cell_temperature = self.system.sapm_celltemp(
self.total_irrad['poa_global'], self.weather['temp_air'],
self.weather['wind_speed'])
return self
def pvsyst_temp(self):
self.cell_temperature = self.system.pvsyst_celltemp(
self.total_irrad['poa_global'], self.weather['temp_air'],
self.weather['wind_speed'])
return self
@property
def losses_model(self):
return self._losses_model
@losses_model.setter
def losses_model(self, model):
if model is None:
self._losses_model = self.infer_losses_model()
elif isinstance(model, str):
model = model.lower()
if model == 'pvwatts':
self._losses_model = self.pvwatts_losses
elif model == 'no_loss':
self._losses_model = self.no_extra_losses
else:
raise ValueError(model + ' is not a valid losses model')
else:
self._losses_model = partial(model, self)
def infer_losses_model(self):
raise NotImplementedError
def pvwatts_losses(self):
self.losses = (100 - self.system.pvwatts_losses()) / 100.
self.dc *= self.losses
return self
def no_extra_losses(self):
self.losses = 1
return self
def effective_irradiance_model(self):
fd = self.system.module_parameters.get('FD', 1.)
self.effective_irradiance = self.spectral_modifier * (
self.total_irrad['poa_direct']*self.aoi_modifier +
fd*self.total_irrad['poa_diffuse'])
return self
def complete_irradiance(self, weather, times=None):
"""
Determine the missing irradiation columns. Only two of the
following data columns (dni, ghi, dhi) are needed to calculate
the missing data.
This function is not safe at the moment. Results can be too high
or negative. Please contribute and help to improve this function
on https://github.com/pvlib/pvlib-python
Parameters
----------
weather : DataFrame
Column names must be ``'dni'``, ``'ghi'``, ``'dhi'``,
``'wind_speed'``, ``'temp_air'``. All irradiance components
are required. Air temperature of 20 C and wind speed
of 0 m/s will be added to the DataFrame if not provided.
times : None, deprecated
Deprecated argument included for API compatibility, but not
used internally. The index of the weather DataFrame is used
for times.
Returns
-------
self
Assigns attributes: weather
Examples
--------
This example does not work until the parameters `my_system`,
`my_location`, `my_datetime` and `my_weather` are not defined
properly but shows the basic idea how this method can be used.
>>> from pvlib.modelchain import ModelChain
>>> # my_weather containing 'dhi' and 'ghi'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.complete_irradiance(my_weather) # doctest: +SKIP
>>> mc.run_model(mc.weather) # doctest: +SKIP
>>> # my_weather containing 'dhi', 'ghi' and 'dni'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.run_model(my_weather) # doctest: +SKIP
"""
self.weather = weather
if times is not None:
warnings.warn('times keyword argument is deprecated and will be '
'removed in 0.8. The index of the weather DataFrame '
'is used for times.', pvlibDeprecationWarning)
self.solar_position = self.location.get_solarposition(
self.weather.index, method=self.solar_position_method)
icolumns = set(self.weather.columns)
wrn_txt = ("This function is not safe at the moment.\n" +
"Results can be too high or negative.\n" +
"Help to improve this function on github:\n" +
"https://github.com/pvlib/pvlib-python \n")
if {'ghi', 'dhi'} <= icolumns and 'dni' not in icolumns:
clearsky = self.location.get_clearsky(
self.weather.index, solar_position=self.solar_position)
self.weather.loc[:, 'dni'] = pvlib.irradiance.dni(
self.weather.loc[:, 'ghi'], self.weather.loc[:, 'dhi'],
self.solar_position.zenith,
clearsky_dni=clearsky['dni'],
clearsky_tolerance=1.1)
elif {'dni', 'dhi'} <= icolumns and 'ghi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'ghi'] = (
self.weather.dni * tools.cosd(self.solar_position.zenith) +
self.weather.dhi)
elif {'dni', 'ghi'} <= icolumns and 'dhi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'dhi'] = (
self.weather.ghi - self.weather.dni *
tools.cosd(self.solar_position.zenith))
return self
def prepare_inputs(self, weather, times=None):
"""
Prepare the solar position, irradiance, and weather inputs to
the model.
Parameters
----------
weather : DataFrame
Column names must be ``'dni'``, ``'ghi'``, ``'dhi'``,
``'wind_speed'``, ``'temp_air'``. All irradiance components
are required. Air temperature of 20 C and wind speed
of 0 m/s will be added to the DataFrame if not provided.
times : None, deprecated
Deprecated argument included for API compatibility, but not
used internally. The index of the weather DataFrame is used
for times.
Notes
-----
Assigns attributes: ``solar_position``, ``airmass``,
``total_irrad``, `aoi`
See also
--------
ModelChain.complete_irradiance
"""
if not {'ghi', 'dni', 'dhi'} <= set(weather.columns):
raise ValueError(
"Uncompleted irradiance data set. Please check your input "
"data.\nData set needs to have 'dni', 'dhi' and 'ghi'.\n"
"Detected data: {0}".format(list(weather.columns)))
self.weather = weather
if times is not None:
warnings.warn('times keyword argument is deprecated and will be '
'removed in 0.8. The index of the weather DataFrame '
'is used for times.', pvlibDeprecationWarning)
self.times = self.weather.index
self.solar_position = self.location.get_solarposition(
self.weather.index, method=self.solar_position_method)
self.airmass = self.location.get_airmass(
solar_position=self.solar_position, model=self.airmass_model)
# PVSystem.get_irradiance and SingleAxisTracker.get_irradiance
# and PVSystem.get_aoi and SingleAxisTracker.get_aoi
# have different method signatures. Use partial to handle
# the differences.
if isinstance(self.system, SingleAxisTracker):
self.tracking = self.system.singleaxis(
self.solar_position['apparent_zenith'],
self.solar_position['azimuth'])
self.tracking['surface_tilt'] = (
self.tracking['surface_tilt']
.fillna(self.system.axis_tilt))
self.tracking['surface_azimuth'] = (
self.tracking['surface_azimuth']
.fillna(self.system.axis_azimuth))
self.aoi = self.tracking['aoi']
get_irradiance = partial(
self.system.get_irradiance,
self.tracking['surface_tilt'],
self.tracking['surface_azimuth'],
self.solar_position['apparent_zenith'],
self.solar_position['azimuth'])
else:
self.aoi = self.system.get_aoi(
self.solar_position['apparent_zenith'],
self.solar_position['azimuth'])
get_irradiance = partial(
self.system.get_irradiance,
self.solar_position['apparent_zenith'],
self.solar_position['azimuth'])
self.total_irrad = get_irradiance(
self.weather['dni'],
self.weather['ghi'],
self.weather['dhi'],
airmass=self.airmass['airmass_relative'],
model=self.transposition_model)
if self.weather.get('wind_speed') is None:
self.weather['wind_speed'] = 0
if self.weather.get('temp_air') is None:
self.weather['temp_air'] = 20
return self
def run_model(self, weather, times=None):
"""
Run the model.
Parameters
----------
weather : DataFrame
Column names must be ``'dni'``, ``'ghi'``, ``'dhi'``,
``'wind_speed'``, ``'temp_air'``. All irradiance components
are required. Air temperature of 20 C and wind speed
of 0 m/s will be added to the DataFrame if not provided.
times : None, deprecated
Deprecated argument included for API compatibility, but not
used internally. The index of the weather DataFrame is used
for times.
Returns
-------
self
Assigns attributes: solar_position, airmass, irradiance,
total_irrad, effective_irradiance, weather, cell_temperature, aoi,
aoi_modifier, spectral_modifier, dc, ac, losses.
"""
if times is not None:
warnings.warn('times keyword argument is deprecated and will be '
'removed in 0.8. The index of the weather DataFrame '
'is used for times.', pvlibDeprecationWarning)
self.prepare_inputs(weather)
self.aoi_model()
self.spectral_model()
self.effective_irradiance_model()
self.temperature_model()
self.dc_model()
self.losses_model()
self.ac_model()
return self
|
|
import numpy as np
import os
from PySide import QtGui, QtCore
import sharppy.sharptab as tab
import sharppy.databases.inset_data as inset_data
from sharppy.sharptab.constants import *
## routine written by Kelton Halbert and Greg Blumberg
## keltonhalbert@ou.edu and wblumberg@ou.edu
__all__ = ['backgroundVROT', 'plotVROT']
class backgroundVROT(QtGui.QFrame):
'''
Draw the background frame and lines for the Theta-E plot frame
'''
def __init__(self):
super(backgroundVROT, self).__init__()
self.initUI()
def initUI(self):
## window configuration settings,
## sich as padding, width, height, and
## min/max plot axes
self.setStyleSheet("QFrame {"
" background-color: rgb(0, 0, 0);"
" border-width: 1px;"
" border-style: solid;"
" border-color: #3399CC;}")
if self.physicalDpiX() > 75:
fsize = 10
else:
fsize = 11
self.plot_font = QtGui.QFont('Helvetica', fsize + 1)
self.box_font = QtGui.QFont('Helvetica', fsize)
self.plot_metrics = QtGui.QFontMetrics( self.plot_font )
self.box_metrics = QtGui.QFontMetrics(self.box_font)
self.plot_height = self.plot_metrics.xHeight() + 5
self.box_height = self.box_metrics.xHeight() + 5
self.vrot_inset_data = inset_data.vrotData()
self.lpad = 5.; self.rpad = 0.
self.tpad = 25.; self.bpad = 15.
self.wid = self.size().width() - self.rpad
self.hgt = self.size().height() - self.bpad
self.tlx = self.rpad; self.tly = self.tpad
self.brx = self.wid; self.bry = self.hgt
self.probmax = 70.; self.probmin = 0.
self.vrotmax = 110.; self.vrotmin = 0
self.EF01_color = "#006600"
self.EF23_color = "#FFCC33"
self.EF45_color = "#FF00FF"
self.plotBitMap = QtGui.QPixmap(self.width()-2, self.height()-2)
self.plotBitMap.fill(QtCore.Qt.black)
self.plotBackground()
def resizeEvent(self, e):
'''
Handles the event the window is resized
'''
self.initUI()
def plotBackground(self):
'''
Handles painting the frame.
'''
## initialize a painter object and draw the frame
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
self.draw_frame(qp)
qp.end()
def setBlackPen(self, qp):
color = QtGui.QColor('#000000')
color.setAlphaF(.5)
pen = QtGui.QPen(color, 0, QtCore.Qt.SolidLine)
brush = QtGui.QBrush(QtCore.Qt.SolidPattern)
qp.setPen(pen)
qp.setBrush(brush)
return qp
def draw_frame(self, qp):
'''
Draw the background frame.
qp: QtGui.QPainter object
'''
## set a new pen to draw with
pen = QtGui.QPen(QtCore.Qt.white, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.plot_font)
rect1 = QtCore.QRectF(1.5, 2, self.brx, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'Conditional EF-scale Probs based on Vrot')
qp.setFont(QtGui.QFont('Helvetica', 9))
color = QtGui.QColor(self.EF01_color)
pen = QtGui.QPen(color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect1 = QtCore.QRectF(self.vrot_to_pix(25), 2 + self.plot_height, 10, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'EF0-EF1')
color = QtGui.QColor(self.EF23_color)
pen = QtGui.QPen(color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect1 = QtCore.QRectF(self.vrot_to_pix(50), 2 + self.plot_height, 10, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'EF2-EF3')
color = QtGui.QColor(self.EF45_color)
pen = QtGui.QPen(color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect1 = QtCore.QRectF(self.vrot_to_pix(75), 2 + self.plot_height, 10, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'EF4-EF5')
pen = QtGui.QPen(QtCore.Qt.blue, 1, QtCore.Qt.DashLine)
qp.setPen(pen)
# Plot all of the Y-ticks for the probabilities
ytick_fontsize = 10
y_ticks_font = QtGui.QFont('Helvetica', ytick_fontsize)
qp.setFont(y_ticks_font)
texts = self.vrot_inset_data['ytexts']
spacing = self.bry / 10.
y_ticks = np.arange(self.tpad, self.bry+spacing, spacing)
for i in xrange(len(y_ticks)):
pen = QtGui.QPen(QtGui.QColor("#0080FF"), 1, QtCore.Qt.DashLine)
qp.setPen(pen)
try:
qp.drawLine(self.tlx, self.prob_to_pix(int(texts[i])), self.brx, self.prob_to_pix(int(texts[i])))
except:
continue
color = QtGui.QColor('#000000')
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
ypos = spacing*(i+1) - (spacing/4.)
ypos = self.prob_to_pix(int(texts[i])) - ytick_fontsize/2
rect = QtCore.QRect(self.tlx, ypos, 20, ytick_fontsize)
pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, texts[i])
width = self.brx / 12
texts = np.arange(10, 110, 10)
# Draw the x tick marks
qp.setFont(QtGui.QFont('Helvetica', 8))
for i in xrange(texts.shape[0]):
color = QtGui.QColor('#000000')
color.setAlpha(0)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
rect = QtCore.QRectF(self.vrot_to_pix(texts[i]) - width/2, self.prob_to_pix(-2), width, 4)
# Change to a white pen to draw the text below the box and whisker plot
pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, str(texts[i]))
xpts = self.vrot_inset_data['xpts']
# Draw the EF1+ stuff
ef01 = self.vrot_inset_data['EF0-EF1']
color = QtGui.QColor(self.EF01_color)
lastprob = ef01[0]
if lastprob > 70:
lastprob = 70
for i in xrange(1, np.asarray(xpts).shape[0], 1):
if ef01[i] > 70:
prob = 70
pen = QtGui.QPen(color, 2.5, QtCore.Qt.DotLine)
qp.setPen(pen)
else:
pen = QtGui.QPen(color, 2.5, QtCore.Qt.SolidLine)
qp.setPen(pen)
prob = ef01[i]
qp.drawLine(self.vrot_to_pix(xpts[i-1]), self.prob_to_pix(lastprob), self.vrot_to_pix(xpts[i]), self.prob_to_pix(prob))
lastprob = prob
# Draw the EF2-EF3 stuff
ef23 = self.vrot_inset_data['EF2-EF3']
color = QtGui.QColor(self.EF23_color)
lastprob = ef23[0]
if lastprob > 70:
lastprob = 70
for i in xrange(1, np.asarray(xpts).shape[0], 1):
if ef23[i] > 70:
prob = 70
pen = QtGui.QPen(color, 2.5, QtCore.Qt.DotLine)
qp.setPen(pen)
else:
pen = QtGui.QPen(color, 2.5, QtCore.Qt.SolidLine)
qp.setPen(pen)
prob = ef23[i]
qp.drawLine(self.vrot_to_pix(xpts[i-1]), self.prob_to_pix(lastprob), self.vrot_to_pix(xpts[i]), self.prob_to_pix(prob))
lastprob = prob
# Draw the EF4-EF5 stuff
ef45 = self.vrot_inset_data['EF4-EF5']
color = QtGui.QColor(self.EF45_color)
lastprob = ef45[0]
for i in xrange(1, np.asarray(xpts).shape[0], 1):
pen = QtGui.QPen(color, 2.5, QtCore.Qt.SolidLine)
qp.setPen(pen)
prob = ef45[i]
qp.drawLine(self.vrot_to_pix(xpts[i-1]), self.prob_to_pix(lastprob), self.vrot_to_pix(xpts[i]), self.prob_to_pix(prob))
lastprob = prob
def prob_to_pix(self, prob):
scl1 = self.probmax - self.probmin
scl2 = self.probmin + prob
return self.bry - (scl2 / scl1) * (self.bry - self.tpad)
def vrot_to_pix(self, vrot):
'''
Function to convert a wind speed value to a X pixel.
Parameters
----------
s: speed in kts
'''
scl1 = self.vrotmax - self.vrotmin
scl2 = self.vrotmax - vrot
return self.lpad + self.brx - (scl2 / scl1) * (self.brx - self.rpad)
class plotVROT(backgroundVROT):
'''
Plot the data on the frame. Inherits the background class that
plots the frame.
'''
def __init__(self):
super(plotVROT, self).__init__()
self.prof = None
self.vrot = 0
def resizeEvent(self, e):
'''
Handles when the window is resized
'''
super(plotVROT, self).resizeEvent(e)
self.interp_vrot()
self.plotData()
def paintEvent(self, e):
super(plotVROT, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(1, 1, self.plotBitMap)
qp.end()
def setProf(self, prof):
return
def plotData(self):
'''
Handles painting on the frame
'''
## this function handles painting the plot
## create a new painter obkect
qp = QtGui.QPainter()
self.draw_vrot(qp)
def interp_vrot(self):
self.probef01 = self.vrot_inset_data['EF0-EF1'][np.argmin(np.abs(self.vrot - self.vrot_inset_data['xpts']))]
self.probef23 = self.vrot_inset_data['EF2-EF3'][np.argmin(np.abs(self.vrot - self.vrot_inset_data['xpts']))]
self.probef45 = self.vrot_inset_data['EF4-EF5'][np.argmin(np.abs(self.vrot - self.vrot_inset_data['xpts']))]
def mouseDoubleClickEvent(self, e):
super(plotVROT, self).resizeEvent(e)
self.openInputDialog()
self.interp_vrot()
self.plotData()
self.update()
def openInputDialog(self):
"""
Opens the text version of the input dialog
"""
text, result = QtGui.QInputDialog.getText(None, "VROT Input",
"Enter the VROT:")
if result:
self.vrot = int(text)
def draw_vrot(self, qp):
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
vrot_pix = self.vrot_to_pix(self.vrot)
# plot the white dashed line
pen = QtGui.QPen(QtGui.QColor("#FFFFFF"), 1.5, QtCore.Qt.DotLine)
qp.setPen(pen)
qp.drawLine(vrot_pix, self.prob_to_pix(0), vrot_pix, self.prob_to_pix(70))
# Draw the probabilties.
color = QtGui.QColor(self.EF01_color)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect = QtCore.QRectF(self.vrot_to_pix(self.vrot-7), self.prob_to_pix(self.probef01), 4, 7)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, tab.utils.INT2STR(self.probef01))
color = QtGui.QColor(self.EF23_color)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect = QtCore.QRectF(self.vrot_to_pix(self.vrot), self.prob_to_pix(self.probef23), 4, 7)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, tab.utils.INT2STR(self.probef23))
color = QtGui.QColor(self.EF45_color)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
rect = QtCore.QRectF(self.vrot_to_pix(self.vrot), self.prob_to_pix(self.probef45), 4, 7)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, tab.utils.INT2STR(self.probef45))
qp.end()
|
|
#!/usr/bin/python
# Copyright (c) 2011 Netcetera.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# MIT License: http://www.opensource.org/licenses/mit-license.php
#
# Contributors:
# Netcetera AG
# Michael Pellaton
#
import glob
import os
import re
import shutil
import subprocess
REL_PATH_SRC_DIR = 'source'
REL_PATH_DONE_DIR = 'done'
REL_PATH_WORK_DIR = 'work'
REL_PATH_TARGET_DIR = 'target'
def _read_file_and_join_lines(file_name):
'''
Reads all contents of a file and appends each line into one ',' separated string.
Leading and trailing whitespace is removed. Lines having a '#' character as first non-
whitespace character are considered as comment and are therefore ignored in the output.
@param fileName: the file to read
'''
def _normalizeString(line):
return line.replace('\n', '').strip()
def _isComment(line):
return not line.startswith("#")
def _isEmptyLine(line):
return line != ""
with open(file_name, 'r') as file:
return ','.join(filter(_isEmptyLine, filter(_isComment, map(_normalizeString, file.readlines()))))
def _get_tag_name(iulist):
'''
Gets the tag name of the iulist which is the file name excluding the leading sort number and
the trailing iulist extension.
@param iulist: the file name to get the tag name from
'''
parts = iulist.split('.')
if len(parts) == 3:
return parts[1]
return 'unknown'
def _get_distribution_info(source_dir, work_dir):
'''
Finds all Eclipse SDK archives in 'source_dir' and extracts the information used from the filename.
'''
distribution_info = []
for distribution in glob.glob(os.path.join(source_dir, 'eclipse-SDK-*')):
a, b, s, variant = distribution.split('-', 3)
platform, filetype = variant.split('.', 1)
distribution_info.append((os.path.abspath(distribution), platform, filetype,
os.path.abspath(os.path.join(work_dir, platform))))
return distribution_info
def _extract_archive(archivefile, filetype, platform_workdir):
'''
Extracts the Eclipse archive file into a working directory.
@param archivefile: the archive file to extract
@param filetype: the file type of the archive
@param platform_workdir: the platform working directory where the archive shall be extracted
'''
if os.path.exists(platform_workdir):
shutil.rmtree(platform_workdir)
os.mkdir(platform_workdir)
if filetype == 'tar.gz':
_call_executable(['tar', '-xzf', archivefile], platform_workdir)
elif filetype == 'zip':
_call_executable(['unzip', archivefile], platform_workdir)
else:
print('Error: unknown file type \'{ftype}\''.format(ftype=filetype))
exit(-1)
def _create_archive(platform_workdir, filetype, platform, distribution_name, target_dir):
'''
Creates an archive of the type specified.
@param platform_workdir: the working directory to create an archive from
@param filetype: the file type of the archive
@param platform: the platform string
@param distribution_name: the name of the distribution
'''
archiveFile = os.path.join(os.path.abspath(target_dir), distribution_name + '-' + platform + '.' + filetype)
if os.path.exists(archiveFile):
os.remove(archiveFile)
if filetype == 'zip' or platform == 'macosx-cocoa-x86_64' :
_call_executable(['zip', '-r', '-q', archiveFile, distribution_name], platform_workdir)
elif filetype == 'tar.gz':
_call_executable(['tar', '-czf', archiveFile, distribution_name], platform_workdir)
else:
print('Error: unknown file type \'{ftype}\''.format(ftype=filetype))
exit(-1)
def _install(destination, configpath, eclipse_binary):
'''
Installs the IUs into Eclipse.
@param destination: the eclipse to install to
@param configpath: the path to the directory containing the iu and repo lists
@param eclipse_binary: the Eclipse binary used as installer
'''
iulist_list = glob.glob(os.path.join(configpath, '*.iulist'))
iulist_list.sort()
for iulist in iulist_list :
repolist = iulist.replace('.iulist', '.repolist');
if os.path.exists(repolist):
print(' -IUs: ' + iulist + ' from: ' + repolist)
# Sometimes there are problems that can be fixed by retrying
retry_count=0
while retry_count < 3:
try:
_call_executable([os.path.abspath(eclipse_binary),
'-application', 'org.eclipse.equinox.p2.director',
'-nosplash',
'-repository', _read_file_and_join_lines(repolist),
'-installIU', _read_file_and_join_lines(iulist),
'-destination', destination,
'-tag', _get_tag_name(iulist),
'-profile', 'SDKProfile'])
break
except:
retry_count+=1
print " -WARNING: An error occured while downloading. Nr of retry: "+ str(retry_count)
pass
def _call_executable(commandline, command_workdir=os.getcwd()):
'''
Executes an operating system command.
@param commandline: the command line to execute
@param command_workdir: the working directory of the command (optional)
'''
with open(os.devnull, 'w') as devnull:
returncode = subprocess.call(commandline, cwd=command_workdir, stdout=devnull)
if not returncode == 0:
print('I am terribly sorry but due to an error this run was aborted.')
exit(returncode)
def _manipulate_splash(basedir, distribution_description):
'''
Writes the distribution description into the splash.bmp file
@param basedir: the base directory in which to look for the splash file
@param distribution_description the textual name to be printed into the splash screen
'''
splash_files = glob.glob(basedir + '/plugins/org.eclipse.platform*/splash.bmp')
if not len(splash_files) == 1:
print('Error: splash.bmp not found in {d}'.format(d=basedir))
exit(-1)
os.popen('convert -background \'#00000000\' -transparent \'#00000000\' -pointsize 14 -font Nimbus-Sans-Regular -fill white label:\'{d}\' miff:- | composite -gravity northeast -geometry +10+200 - {f} {f}'.format(d=distribution_description, f=os.path.abspath(splash_files[0])))
def _move_archive(archive, done_dir):
'''
Moves the source archive to the 'done' directory.
@param archive the path to the archive
@param done_dir the directory where the archive will be moved
'''
if not os.path.exists(done_dir):
os.mkdir(done_dir)
shutil.move(archive, done_dir)
def assemble(configpath, distribution_name, distribution_description, eclipse_binary):
# Make sure the download dir exists.
download_dir = os.path.abspath(os.path.join(configpath, REL_PATH_SRC_DIR))
if not (os.path.exists(download_dir) and os.path.isdir(download_dir)):
exit('The download source directory does not exist: {dir}'.format(dir=download_dir))
# Make sure the work dir exists. Delete and re-create it if it exists.
work_dir = os.path.abspath(os.path.join(configpath, REL_PATH_WORK_DIR))
if os.path.exists(work_dir):
_call_executable(['rm', '-rf', work_dir])
os.mkdir(work_dir)
# Make sure the target dir exists
target_dir = os.path.abspath(os.path.join(configpath, REL_PATH_TARGET_DIR))
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for (archive, platform, filetype, platform_workdir) in _get_distribution_info(download_dir, work_dir):
print('Assembling {dist} for {plat}...'.format(dist=distribution_name, plat=platform))
print(' -extracting archive')
_extract_archive(archive, filetype, platform_workdir)
print(' -renaming target')
destination = os.path.join(platform_workdir, distribution_name)
os.rename(os.path.join(platform_workdir, 'eclipse'), destination)
print(' -installing IUs')
_install(destination, configpath, eclipse_binary)
print(' -manipulating splash screen')
_manipulate_splash(destination, distribution_description)
print(' -creating archive')
_create_archive(platform_workdir, filetype, platform, distribution_name, target_dir)
print(' -moving source archive')
done_dir=os.path.abspath(os.path.join(configpath, REL_PATH_DONE_DIR))
_move_archive(archive, done_dir)
print(' -done.')
|
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import struct
import array
import inspect
from nose.tools import *
from ryu.ofproto import ether, inet
from ryu.lib.packet import *
from ryu.lib import addrconv
LOG = logging.getLogger('test_packet')
class TestPacket(unittest.TestCase):
""" Test case for packet
"""
dst_mac = 'aa:aa:aa:aa:aa:aa'
src_mac = 'bb:bb:bb:bb:bb:bb'
dst_mac_bin = addrconv.mac.text_to_bin(dst_mac)
src_mac_bin = addrconv.mac.text_to_bin(src_mac)
dst_ip = '192.168.128.10'
src_ip = '192.168.122.20'
dst_ip_bin = addrconv.ipv4.text_to_bin(dst_ip)
src_port = 50001
dst_port = 50002
src_ip_bin = addrconv.ipv4.text_to_bin(src_ip)
payload = '\x06\x06\x47\x50\x00\x00\x00\x00' \
+ '\xcd\xc5\x00\x00\x00\x00\x00\x00' \
+ '\x10\x11\x12\x13\x14\x15\x16\x17' \
+ '\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f'
def get_protocols(self, pkt):
protocols = {}
for p in pkt:
if hasattr(p, 'protocol_name'):
protocols[p.protocol_name] = p
else:
protocols['payload'] = p
return protocols
def setUp(self):
pass
def tearDown(self):
pass
def test_arp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_ARP)
a = arp.arp(1, ether.ETH_TYPE_IP, 6, 4, 2,
self.src_mac, self.src_ip, self.dst_mac,
self.dst_ip)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(a)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac_bin \
+ self.src_mac_bin \
+ '\x08\x06'
# arp !HHBBH6sI6sI
a_buf = '\x00\x01' \
+ '\x08\x00' \
+ '\x06' \
+ '\x04' \
+ '\x00\x02' \
+ self.src_mac_bin \
+ self.src_ip_bin \
+ self.dst_mac_bin \
+ self.dst_ip_bin
buf = e_buf + a_buf
eq_(buf, p.data)
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_arp = protocols['arp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_ARP, p_eth.ethertype)
# arp
ok_(p_arp)
eq_(1, p_arp.hwtype)
eq_(ether.ETH_TYPE_IP, p_arp.proto)
eq_(6, p_arp.hlen)
eq_(4, p_arp.plen)
eq_(2, p_arp.opcode)
eq_(self.src_mac, p_arp.src_mac)
eq_(self.src_ip, p_arp.src_ip)
eq_(self.dst_mac, p_arp.dst_mac)
eq_(self.dst_ip, p_arp.dst_ip)
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_ARP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
arp_values = {'hwtype': 1,
'proto': ether.ETH_TYPE_IP,
'hlen': 6,
'plen': 4,
'opcode': 2,
'src_mac': self.src_mac,
'dst_mac': self.dst_mac,
'src_ip': self.src_ip,
'dst_ip': self.dst_ip}
_arp_str = ','.join(['%s=%s' % (k, repr(arp_values[k]))
for k, v in inspect.getmembers(p_arp)
if k in arp_values])
arp_str = '%s(%s)' % (arp.arp.__name__, _arp_str)
pkt_str = '%s, %s' % (eth_str, arp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(arp_str, str(p_arp))
eq_(arp_str, repr(p_arp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_vlan_arp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_8021Q)
v = vlan.vlan(0b111, 0b1, 3, ether.ETH_TYPE_ARP)
a = arp.arp(1, ether.ETH_TYPE_IP, 6, 4, 2,
self.src_mac, self.src_ip, self.dst_mac,
self.dst_ip)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(v)
p.add_protocol(a)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac_bin \
+ self.src_mac_bin \
+ '\x81\x00'
# vlan !HH
v_buf = '\xF0\x03' \
+ '\x08\x06'
# arp !HHBBH6sI6sI
a_buf = '\x00\x01' \
+ '\x08\x00' \
+ '\x06' \
+ '\x04' \
+ '\x00\x02' \
+ self.src_mac_bin \
+ self.src_ip_bin \
+ self.dst_mac_bin \
+ self.dst_ip_bin
buf = e_buf + v_buf + a_buf
eq_(buf, p.data)
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_vlan = protocols['vlan']
p_arp = protocols['arp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_8021Q, p_eth.ethertype)
# vlan
ok_(p_vlan)
eq_(0b111, p_vlan.pcp)
eq_(0b1, p_vlan.cfi)
eq_(3, p_vlan.vid)
eq_(ether.ETH_TYPE_ARP, p_vlan.ethertype)
# arp
ok_(p_arp)
eq_(1, p_arp.hwtype)
eq_(ether.ETH_TYPE_IP, p_arp.proto)
eq_(6, p_arp.hlen)
eq_(4, p_arp.plen)
eq_(2, p_arp.opcode)
eq_(self.src_mac, p_arp.src_mac)
eq_(self.src_ip, p_arp.src_ip)
eq_(self.dst_mac, p_arp.dst_mac)
eq_(self.dst_ip, p_arp.dst_ip)
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_8021Q}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
vlan_values = {'pcp': 0b111,
'cfi': 0b1,
'vid': 3,
'ethertype': ether.ETH_TYPE_ARP}
_vlan_str = ','.join(['%s=%s' % (k, repr(vlan_values[k]))
for k, v in inspect.getmembers(p_vlan)
if k in vlan_values])
vlan_str = '%s(%s)' % (vlan.vlan.__name__, _vlan_str)
arp_values = {'hwtype': 1,
'proto': ether.ETH_TYPE_IP,
'hlen': 6,
'plen': 4,
'opcode': 2,
'src_mac': self.src_mac,
'dst_mac': self.dst_mac,
'src_ip': self.src_ip,
'dst_ip': self.dst_ip}
_arp_str = ','.join(['%s=%s' % (k, repr(arp_values[k]))
for k, v in inspect.getmembers(p_arp)
if k in arp_values])
arp_str = '%s(%s)' % (arp.arp.__name__, _arp_str)
pkt_str = '%s, %s, %s' % (eth_str, vlan_str, arp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(vlan_str, str(p_vlan))
eq_(vlan_str, repr(p_vlan))
eq_(arp_str, str(p_arp))
eq_(arp_str, repr(p_arp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv4_udp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_IP)
ip = ipv4.ipv4(4, 5, 1, 0, 3, 1, 4, 64, inet.IPPROTO_UDP, 0,
self.src_ip, self.dst_ip)
u = udp.udp(0x190F, 0x1F90, 0, 0)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(ip)
p.add_protocol(u)
p.add_protocol(self.payload)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac_bin \
+ self.src_mac_bin \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x01' \
+ '\x00\x3C' \
+ '\x00\x03' \
+ '\x20\x04' \
+ '\x40' \
+ '\x11' \
+ '\x00\x00' \
+ self.src_ip_bin \
+ self.dst_ip_bin
# udp !HHHH
u_buf = '\x19\x0F' \
+ '\x1F\x90' \
+ '\x00\x28' \
+ '\x00\x00'
buf = e_buf + ip_buf + u_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_udp = protocols['udp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(1, p_ipv4.tos)
l = len(ip_buf) + len(u_buf) + len(self.payload)
eq_(l, p_ipv4.total_length)
eq_(3, p_ipv4.identification)
eq_(1, p_ipv4.flags)
eq_(64, p_ipv4.ttl)
eq_(inet.IPPROTO_UDP, p_ipv4.proto)
eq_(self.src_ip, p_ipv4.src)
eq_(self.dst_ip, p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0)
# udp
ok_(p_udp)
eq_(0x190f, p_udp.src_port)
eq_(0x1F90, p_udp.dst_port)
eq_(len(u_buf) + len(self.payload), p_udp.total_length)
eq_(0x77b2, p_udp.csum)
t = bytearray(u_buf)
struct.pack_into('!H', t, 6, p_udp.csum)
ph = struct.pack('!4s4sBBH', self.src_ip_bin, self.dst_ip_bin, 0,
17, len(u_buf) + len(self.payload))
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_IP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv4_values = {'version': 4,
'header_length': 5,
'tos': 1,
'total_length': l,
'identification': 3,
'flags': 1,
'offset': p_ipv4.offset,
'ttl': 64,
'proto': inet.IPPROTO_UDP,
'csum': p_ipv4.csum,
'src': self.src_ip,
'dst': self.dst_ip,
'option': None}
_ipv4_str = ','.join(['%s=%s' % (k, repr(ipv4_values[k]))
for k, v in inspect.getmembers(p_ipv4)
if k in ipv4_values])
ipv4_str = '%s(%s)' % (ipv4.ipv4.__name__, _ipv4_str)
udp_values = {'src_port': 0x190f,
'dst_port': 0x1F90,
'total_length': len(u_buf) + len(self.payload),
'csum': 0x77b2}
_udp_str = ','.join(['%s=%s' % (k, repr(udp_values[k]))
for k, v in inspect.getmembers(p_udp)
if k in udp_values])
udp_str = '%s(%s)' % (udp.udp.__name__, _udp_str)
pkt_str = '%s, %s, %s, %s' % (eth_str, ipv4_str, udp_str,
repr(protocols['payload']))
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv4_str, str(p_ipv4))
eq_(ipv4_str, repr(p_ipv4))
eq_(udp_str, str(p_udp))
eq_(udp_str, repr(p_udp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv4_tcp(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_IP)
ip = ipv4.ipv4(4, 5, 0, 0, 0, 0, 0, 64, inet.IPPROTO_TCP, 0,
self.src_ip, self.dst_ip)
t = tcp.tcp(0x190F, 0x1F90, 0x123, 1, 6, 0b101010, 2048, 0, 0x6f,
'\x01\x02')
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(ip)
p.add_protocol(t)
p.add_protocol(self.payload)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac_bin \
+ self.src_mac_bin \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x00' \
+ '\x00\x4C' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x40' \
+ '\x06' \
+ '\x00\x00' \
+ self.src_ip_bin \
+ self.dst_ip_bin
# tcp !HHIIBBHHH + option
t_buf = '\x19\x0F' \
+ '\x1F\x90' \
+ '\x00\x00\x01\x23' \
+ '\x00\x00\x00\x01' \
+ '\x60' \
+ '\x2A' \
+ '\x08\x00' \
+ '\x00\x00' \
+ '\x00\x6F' \
+ '\x01\x02\x00\x00'
buf = e_buf + ip_buf + t_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_tcp = protocols['tcp']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(0, p_ipv4.tos)
l = len(ip_buf) + len(t_buf) + len(self.payload)
eq_(l, p_ipv4.total_length)
eq_(0, p_ipv4.identification)
eq_(0, p_ipv4.flags)
eq_(64, p_ipv4.ttl)
eq_(inet.IPPROTO_TCP, p_ipv4.proto)
eq_(self.src_ip, p_ipv4.src)
eq_(self.dst_ip, p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0)
# tcp
ok_(p_tcp)
eq_(0x190f, p_tcp.src_port)
eq_(0x1F90, p_tcp.dst_port)
eq_(0x123, p_tcp.seq)
eq_(1, p_tcp.ack)
eq_(6, p_tcp.offset)
eq_(0b101010, p_tcp.bits)
eq_(2048, p_tcp.window_size)
eq_(0x6f, p_tcp.urgent)
eq_(len(t_buf), len(p_tcp))
t = bytearray(t_buf)
struct.pack_into('!H', t, 16, p_tcp.csum)
ph = struct.pack('!4s4sBBH', self.src_ip_bin, self.dst_ip_bin, 0,
6, len(t_buf) + len(self.payload))
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_IP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv4_values = {'version': 4,
'header_length': 5,
'tos': 0,
'total_length': l,
'identification': 0,
'flags': 0,
'offset': p_ipv4.offset,
'ttl': 64,
'proto': inet.IPPROTO_TCP,
'csum': p_ipv4.csum,
'src': self.src_ip,
'dst': self.dst_ip,
'option': None}
_ipv4_str = ','.join(['%s=%s' % (k, repr(ipv4_values[k]))
for k, v in inspect.getmembers(p_ipv4)
if k in ipv4_values])
ipv4_str = '%s(%s)' % (ipv4.ipv4.__name__, _ipv4_str)
tcp_values = {'src_port': 0x190f,
'dst_port': 0x1F90,
'seq': 0x123,
'ack': 1,
'offset': 6,
'bits': 0b101010,
'window_size': 2048,
'csum': p_tcp.csum,
'urgent': 0x6f,
'option': p_tcp.option}
_tcp_str = ','.join(['%s=%s' % (k, repr(tcp_values[k]))
for k, v in inspect.getmembers(p_tcp)
if k in tcp_values])
tcp_str = '%s(%s)' % (tcp.tcp.__name__, _tcp_str)
pkt_str = '%s, %s, %s, %s' % (eth_str, ipv4_str, tcp_str,
repr(protocols['payload']))
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv4_str, str(p_ipv4))
eq_(ipv4_str, repr(p_ipv4))
eq_(tcp_str, str(p_tcp))
eq_(tcp_str, repr(p_tcp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv4_sctp(self):
# build packet
e = ethernet.ethernet()
ip = ipv4.ipv4(proto=inet.IPPROTO_SCTP)
s = sctp.sctp(chunks=[sctp.chunk_data(payload_data=self.payload)])
p = e / ip / s
p.serialize()
ipaddr = addrconv.ipv4.text_to_bin('0.0.0.0')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x00' \
+ '\x00\x50' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\xff' \
+ '\x84' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# sctp !HHII + chunk_data !BBHIHHI + payload
s_buf = '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00' \
+ '\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ self.payload
buf = e_buf + ip_buf + s_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_sctp = protocols['sctp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(0, p_ipv4.tos)
l = len(ip_buf) + len(s_buf)
eq_(l, p_ipv4.total_length)
eq_(0, p_ipv4.identification)
eq_(0, p_ipv4.flags)
eq_(255, p_ipv4.ttl)
eq_(inet.IPPROTO_SCTP, p_ipv4.proto)
eq_('10.0.0.1', p_ipv4.src)
eq_('10.0.0.2', p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0x1403)
# sctp
ok_(p_sctp)
eq_(1, p_sctp.src_port)
eq_(1, p_sctp.dst_port)
eq_(0, p_sctp.vtag)
assert isinstance(p_sctp.chunks[0], sctp.chunk_data)
eq_(0, p_sctp.chunks[0]._type)
eq_(0, p_sctp.chunks[0].unordered)
eq_(0, p_sctp.chunks[0].begin)
eq_(0, p_sctp.chunks[0].end)
eq_(16 + len(self.payload), p_sctp.chunks[0].length)
eq_(0, p_sctp.chunks[0].tsn)
eq_(0, p_sctp.chunks[0].sid)
eq_(0, p_sctp.chunks[0].seq)
eq_(0, p_sctp.chunks[0].payload_id)
eq_(self.payload, p_sctp.chunks[0].payload_data)
eq_(len(s_buf), len(p_sctp))
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv4_values = {'version': 4,
'header_length': 5,
'tos': 0,
'total_length': l,
'identification': 0,
'flags': 0,
'offset': 0,
'ttl': 255,
'proto': inet.IPPROTO_SCTP,
'csum': p_ipv4.csum,
'src': '10.0.0.1',
'dst': '10.0.0.2',
'option': None}
_ipv4_str = ','.join(['%s=%s' % (k, repr(ipv4_values[k]))
for k, v in inspect.getmembers(p_ipv4)
if k in ipv4_values])
ipv4_str = '%s(%s)' % (ipv4.ipv4.__name__, _ipv4_str)
data_values = {'unordered': 0,
'begin': 0,
'end': 0,
'length': 16 + len(self.payload),
'tsn': 0,
'sid': 0,
'seq': 0,
'payload_id': 0,
'payload_data': self.payload}
_data_str = ','.join(['%s=%s' % (k, repr(data_values[k]))
for k in sorted(data_values.keys())])
data_str = '[%s(%s)]' % (sctp.chunk_data.__name__, _data_str)
sctp_values = {'src_port': 1,
'dst_port': 1,
'vtag': 0,
'csum': p_sctp.csum,
'chunks': data_str}
_sctp_str = ','.join(['%s=%s' % (k, sctp_values[k])
for k, _ in inspect.getmembers(p_sctp)
if k in sctp_values])
sctp_str = '%s(%s)' % (sctp.sctp.__name__, _sctp_str)
pkt_str = '%s, %s, %s' % (eth_str, ipv4_str, sctp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv4_str, str(p_ipv4))
eq_(ipv4_str, repr(p_ipv4))
eq_(sctp_str, str(p_sctp))
eq_(sctp_str, repr(p_sctp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv4_icmp(self):
# buid packet
e = ethernet.ethernet()
ip = ipv4.ipv4(proto=inet.IPPROTO_ICMP)
ic = icmp.icmp()
p = e / ip / ic
p.serialize()
ipaddr = addrconv.ipv4.text_to_bin('0.0.0.0')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x08\x00'
# ipv4 !BBHHHBBHII
ip_buf = '\x45' \
+ '\x00' \
+ '\x00\x1c' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\xff' \
+ '\x01' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# icmp !BBH + echo !HH
ic_buf = '\x08' \
+ '\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00'
buf = e_buf + ip_buf + ic_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv4 = protocols['ipv4']
p_icmp = protocols['icmp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IP, p_eth.ethertype)
# ipv4
ok_(p_ipv4)
eq_(4, p_ipv4.version)
eq_(5, p_ipv4.header_length)
eq_(0, p_ipv4.tos)
l = len(ip_buf) + len(ic_buf)
eq_(l, p_ipv4.total_length)
eq_(0, p_ipv4.identification)
eq_(0, p_ipv4.flags)
eq_(255, p_ipv4.ttl)
eq_(inet.IPPROTO_ICMP, p_ipv4.proto)
eq_('10.0.0.1', p_ipv4.src)
eq_('10.0.0.2', p_ipv4.dst)
t = bytearray(ip_buf)
struct.pack_into('!H', t, 10, p_ipv4.csum)
eq_(packet_utils.checksum(t), 0x1403)
# icmp
ok_(p_icmp)
eq_(8, p_icmp.type)
eq_(0, p_icmp.code)
eq_(0, p_icmp.data.id)
eq_(0, p_icmp.data.seq)
eq_(len(ic_buf), len(p_icmp))
t = bytearray(ic_buf)
struct.pack_into('!H', t, 2, p_icmp.csum)
eq_(packet_utils.checksum(t), 0)
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IP}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, _ in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv4_values = {'version': 4,
'header_length': 5,
'tos': 0,
'total_length': l,
'identification': 0,
'flags': 0,
'offset': p_ipv4.offset,
'ttl': 255,
'proto': inet.IPPROTO_ICMP,
'csum': p_ipv4.csum,
'src': '10.0.0.1',
'dst': '10.0.0.2',
'option': None}
_ipv4_str = ','.join(['%s=%s' % (k, repr(ipv4_values[k]))
for k, _ in inspect.getmembers(p_ipv4)
if k in ipv4_values])
ipv4_str = '%s(%s)' % (ipv4.ipv4.__name__, _ipv4_str)
echo_values = {'id': 0,
'seq': 0,
'data': None}
_echo_str = ','.join(['%s=%s' % (k, repr(echo_values[k]))
for k in sorted(echo_values.keys())])
echo_str = '%s(%s)' % (icmp.echo.__name__, _echo_str)
icmp_values = {'type': 8,
'code': 0,
'csum': p_icmp.csum,
'data': echo_str}
_icmp_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, _ in inspect.getmembers(p_icmp)
if k in icmp_values])
icmp_str = '%s(%s)' % (icmp.icmp.__name__, _icmp_str)
pkt_str = '%s, %s, %s' % (eth_str, ipv4_str, icmp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv4_str, str(p_ipv4))
eq_(ipv4_str, repr(p_ipv4))
eq_(icmp_str, str(p_icmp))
eq_(icmp_str, repr(p_icmp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv6_udp(self):
# build packet
e = ethernet.ethernet(ethertype=ether.ETH_TYPE_IPV6)
ip = ipv6.ipv6(nxt=inet.IPPROTO_UDP)
u = udp.udp()
p = e / ip / u / self.payload
p.serialize()
ipaddr = addrconv.ipv6.text_to_bin('::')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x86\xdd'
# ipv6 !IHBB16s16s'
ip_buf = '\x60\x00\x00\x00' \
+ '\x00\x00' \
+ '\x11' \
+ '\xff' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# udp !HHHH
u_buf = '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x28' \
+ '\x00\x00'
buf = e_buf + ip_buf + u_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv6 = protocols['ipv6']
p_udp = protocols['udp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IPV6, p_eth.ethertype)
# ipv6
ok_(p_ipv6)
eq_(6, p_ipv6.version)
eq_(0, p_ipv6.traffic_class)
eq_(0, p_ipv6.flow_label)
eq_(len(u_buf) + len(self.payload), p_ipv6.payload_length)
eq_(inet.IPPROTO_UDP, p_ipv6.nxt)
eq_(255, p_ipv6.hop_limit)
eq_('10::10', p_ipv6.src)
eq_('20::20', p_ipv6.dst)
# udp
ok_(p_udp)
eq_(1, p_udp.src_port)
eq_(1, p_udp.dst_port)
eq_(len(u_buf) + len(self.payload), p_udp.total_length)
eq_(0x2B60, p_udp.csum)
t = bytearray(u_buf)
struct.pack_into('!H', t, 6, p_udp.csum)
ph = struct.pack('!16s16sI3xB', ipaddr, ipaddr,
len(u_buf) + len(self.payload), 17)
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0x62)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IPV6}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv6_values = {'version': 6,
'traffic_class': 0,
'flow_label': 0,
'payload_length': len(u_buf) + len(self.payload),
'nxt': inet.IPPROTO_UDP,
'hop_limit': 255,
'src': '10::10',
'dst': '20::20',
'ext_hdrs': []}
_ipv6_str = ','.join(['%s=%s' % (k, repr(ipv6_values[k]))
for k, v in inspect.getmembers(p_ipv6)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
udp_values = {'src_port': 1,
'dst_port': 1,
'total_length': len(u_buf) + len(self.payload),
'csum': 0x2B60}
_udp_str = ','.join(['%s=%s' % (k, repr(udp_values[k]))
for k, v in inspect.getmembers(p_udp)
if k in udp_values])
udp_str = '%s(%s)' % (udp.udp.__name__, _udp_str)
pkt_str = '%s, %s, %s, %s' % (eth_str, ipv6_str, udp_str,
repr(protocols['payload']))
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv6_str, str(p_ipv6))
eq_(ipv6_str, repr(p_ipv6))
eq_(udp_str, str(p_udp))
eq_(udp_str, repr(p_udp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv6_tcp(self):
# build packet
e = ethernet.ethernet(ethertype=ether.ETH_TYPE_IPV6)
ip = ipv6.ipv6()
t = tcp.tcp(option='\x01\x02')
p = e / ip / t / self.payload
p.serialize()
ipaddr = addrconv.ipv6.text_to_bin('::')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x86\xdd'
# ipv6 !IHBB16s16s'
ip_buf = '\x60\x00\x00\x00' \
+ '\x00\x00' \
+ '\x06' \
+ '\xff' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# tcp !HHIIBBHHH + option
t_buf = '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x60' \
+ '\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x01\x02\x00\x00'
buf = e_buf + ip_buf + t_buf + self.payload
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv6 = protocols['ipv6']
p_tcp = protocols['tcp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IPV6, p_eth.ethertype)
# ipv6
ok_(p_ipv6)
eq_(6, p_ipv6.version)
eq_(0, p_ipv6.traffic_class)
eq_(0, p_ipv6.flow_label)
eq_(len(t_buf) + len(self.payload), p_ipv6.payload_length)
eq_(inet.IPPROTO_TCP, p_ipv6.nxt)
eq_(255, p_ipv6.hop_limit)
eq_('10::10', p_ipv6.src)
eq_('20::20', p_ipv6.dst)
# tcp
ok_(p_tcp)
eq_(1, p_tcp.src_port)
eq_(1, p_tcp.dst_port)
eq_(0, p_tcp.seq)
eq_(0, p_tcp.ack)
eq_(6, p_tcp.offset)
eq_(0, p_tcp.bits)
eq_(0, p_tcp.window_size)
eq_(0, p_tcp.urgent)
eq_(len(t_buf), len(p_tcp))
t = bytearray(t_buf)
struct.pack_into('!H', t, 16, p_tcp.csum)
ph = struct.pack('!16s16sI3xB', ipaddr, ipaddr,
len(t_buf) + len(self.payload), 6)
t = ph + t + self.payload
eq_(packet_utils.checksum(t), 0x62)
# payload
ok_('payload' in protocols)
eq_(self.payload, protocols['payload'].tostring())
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IPV6}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv6_values = {'version': 6,
'traffic_class': 0,
'flow_label': 0,
'payload_length': len(t_buf) + len(self.payload),
'nxt': inet.IPPROTO_TCP,
'hop_limit': 255,
'src': '10::10',
'dst': '20::20',
'ext_hdrs': []}
_ipv6_str = ','.join(['%s=%s' % (k, repr(ipv6_values[k]))
for k, v in inspect.getmembers(p_ipv6)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
tcp_values = {'src_port': 1,
'dst_port': 1,
'seq': 0,
'ack': 0,
'offset': 6,
'bits': 0,
'window_size': 0,
'csum': p_tcp.csum,
'urgent': 0,
'option': p_tcp.option}
_tcp_str = ','.join(['%s=%s' % (k, repr(tcp_values[k]))
for k, v in inspect.getmembers(p_tcp)
if k in tcp_values])
tcp_str = '%s(%s)' % (tcp.tcp.__name__, _tcp_str)
pkt_str = '%s, %s, %s, %s' % (eth_str, ipv6_str, tcp_str,
repr(protocols['payload']))
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv6_str, str(p_ipv6))
eq_(ipv6_str, repr(p_ipv6))
eq_(tcp_str, str(p_tcp))
eq_(tcp_str, repr(p_tcp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv6_sctp(self):
# build packet
e = ethernet.ethernet(ethertype=ether.ETH_TYPE_IPV6)
ip = ipv6.ipv6(nxt=inet.IPPROTO_SCTP)
s = sctp.sctp(chunks=[sctp.chunk_data(payload_data=self.payload)])
p = e / ip / s
p.serialize()
ipaddr = addrconv.ipv6.text_to_bin('::')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x86\xdd'
# ipv6 !IHBB16s16s'
ip_buf = '\x60\x00\x00\x00' \
+ '\x00\x00' \
+ '\x84' \
+ '\xff' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# sctp !HHII + chunk_data !BBHIHHI + payload
s_buf = '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00' \
+ '\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00' \
+ '\x00\x00\x00\x00' \
+ self.payload
buf = e_buf + ip_buf + s_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv6 = protocols['ipv6']
p_sctp = protocols['sctp']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IPV6, p_eth.ethertype)
# ipv6
ok_(p_ipv6)
eq_(6, p_ipv6.version)
eq_(0, p_ipv6.traffic_class)
eq_(0, p_ipv6.flow_label)
eq_(len(s_buf), p_ipv6.payload_length)
eq_(inet.IPPROTO_SCTP, p_ipv6.nxt)
eq_(255, p_ipv6.hop_limit)
eq_('10::10', p_ipv6.src)
eq_('20::20', p_ipv6.dst)
# sctp
ok_(p_sctp)
eq_(1, p_sctp.src_port)
eq_(1, p_sctp.dst_port)
eq_(0, p_sctp.vtag)
assert isinstance(p_sctp.chunks[0], sctp.chunk_data)
eq_(0, p_sctp.chunks[0]._type)
eq_(0, p_sctp.chunks[0].unordered)
eq_(0, p_sctp.chunks[0].begin)
eq_(0, p_sctp.chunks[0].end)
eq_(16 + len(self.payload), p_sctp.chunks[0].length)
eq_(0, p_sctp.chunks[0].tsn)
eq_(0, p_sctp.chunks[0].sid)
eq_(0, p_sctp.chunks[0].seq)
eq_(0, p_sctp.chunks[0].payload_id)
eq_(self.payload, p_sctp.chunks[0].payload_data)
eq_(len(s_buf), len(p_sctp))
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IPV6}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv6_values = {'version': 6,
'traffic_class': 0,
'flow_label': 0,
'payload_length': len(s_buf),
'nxt': inet.IPPROTO_SCTP,
'hop_limit': 255,
'src': '10::10',
'dst': '20::20',
'ext_hdrs': []}
_ipv6_str = ','.join(['%s=%s' % (k, repr(ipv6_values[k]))
for k, v in inspect.getmembers(p_ipv6)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
data_values = {'unordered': 0,
'begin': 0,
'end': 0,
'length': 16 + len(self.payload),
'tsn': 0,
'sid': 0,
'seq': 0,
'payload_id': 0,
'payload_data': self.payload}
_data_str = ','.join(['%s=%s' % (k, repr(data_values[k]))
for k in sorted(data_values.keys())])
data_str = '[%s(%s)]' % (sctp.chunk_data.__name__, _data_str)
sctp_values = {'src_port': 1,
'dst_port': 1,
'vtag': 0,
'csum': p_sctp.csum,
'chunks': data_str}
_sctp_str = ','.join(['%s=%s' % (k, sctp_values[k])
for k, _ in inspect.getmembers(p_sctp)
if k in sctp_values])
sctp_str = '%s(%s)' % (sctp.sctp.__name__, _sctp_str)
pkt_str = '%s, %s, %s' % (eth_str, ipv6_str, sctp_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv6_str, str(p_ipv6))
eq_(ipv6_str, repr(p_ipv6))
eq_(sctp_str, str(p_sctp))
eq_(sctp_str, repr(p_sctp))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_ipv6_icmpv6(self):
# build packet
e = ethernet.ethernet(ethertype=ether.ETH_TYPE_IPV6)
ip = ipv6.ipv6(nxt=inet.IPPROTO_ICMPV6)
ic = icmpv6.icmpv6()
p = e / ip / ic
p.serialize()
ipaddr = addrconv.ipv6.text_to_bin('::')
# ethernet !6s6sH
e_buf = '\xff\xff\xff\xff\xff\xff' \
+ '\x00\x00\x00\x00\x00\x00' \
+ '\x86\xdd'
# ipv6 !IHBB16s16s'
ip_buf = '\x60\x00\x00\x00' \
+ '\x00\x00' \
+ '\x3a' \
+ '\xff' \
+ '\x00\x00' \
+ ipaddr \
+ ipaddr
# icmpv6 !BBH
ic_buf = '\x00' \
+ '\x00' \
+ '\x00\x00'
buf = e_buf + ip_buf + ic_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_ipv6 = protocols['ipv6']
p_icmpv6 = protocols['icmpv6']
# ethernet
ok_(p_eth)
eq_('ff:ff:ff:ff:ff:ff', p_eth.dst)
eq_('00:00:00:00:00:00', p_eth.src)
eq_(ether.ETH_TYPE_IPV6, p_eth.ethertype)
# ipv6
ok_(p_ipv6)
eq_(6, p_ipv6.version)
eq_(0, p_ipv6.traffic_class)
eq_(0, p_ipv6.flow_label)
eq_(len(ic_buf), p_ipv6.payload_length)
eq_(inet.IPPROTO_ICMPV6, p_ipv6.nxt)
eq_(255, p_ipv6.hop_limit)
eq_('10::10', p_ipv6.src)
eq_('20::20', p_ipv6.dst)
# icmpv6
ok_(p_icmpv6)
eq_(0, p_icmpv6.type_)
eq_(0, p_icmpv6.code)
eq_(len(ic_buf), len(p_icmpv6))
t = bytearray(ic_buf)
struct.pack_into('!H', t, 2, p_icmpv6.csum)
ph = struct.pack('!16s16sI3xB', ipaddr, ipaddr, len(ic_buf), 58)
t = ph + t
eq_(packet_utils.checksum(t), 0x60)
# to string
eth_values = {'dst': 'ff:ff:ff:ff:ff:ff',
'src': '00:00:00:00:00:00',
'ethertype': ether.ETH_TYPE_IPV6}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, _ in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ipv6_values = {'version': 6,
'traffic_class': 0,
'flow_label': 0,
'payload_length': len(ic_buf),
'nxt': inet.IPPROTO_ICMPV6,
'hop_limit': 255,
'src': '10::10',
'dst': '20::20',
'ext_hdrs': []}
_ipv6_str = ','.join(['%s=%s' % (k, repr(ipv6_values[k]))
for k, _ in inspect.getmembers(p_ipv6)
if k in ipv6_values])
ipv6_str = '%s(%s)' % (ipv6.ipv6.__name__, _ipv6_str)
icmpv6_values = {'type_': 0,
'code': 0,
'csum': p_icmpv6.csum,
'data': None}
_icmpv6_str = ','.join(['%s=%s' % (k, repr(icmpv6_values[k]))
for k, _ in inspect.getmembers(p_icmpv6)
if k in icmpv6_values])
icmpv6_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _icmpv6_str)
pkt_str = '%s, %s, %s' % (eth_str, ipv6_str, icmpv6_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(ipv6_str, str(p_ipv6))
eq_(ipv6_str, repr(p_ipv6))
eq_(icmpv6_str, str(p_icmpv6))
eq_(icmpv6_str, repr(p_icmpv6))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_llc_bpdu(self):
# buid packet
e = ethernet.ethernet(self.dst_mac, self.src_mac,
ether.ETH_TYPE_IEEE802_3)
llc_control = llc.ControlFormatU(0, 0, 0)
l = llc.llc(llc.SAP_BPDU, llc.SAP_BPDU, llc_control)
b = bpdu.ConfigurationBPDUs(flags=0,
root_priority=32768,
root_system_id_extension=0,
root_mac_address=self.src_mac,
root_path_cost=0,
bridge_priority=32768,
bridge_system_id_extension=0,
bridge_mac_address=self.dst_mac,
port_priority=128,
port_number=4,
message_age=1,
max_age=20,
hello_time=2,
forward_delay=15)
p = packet.Packet()
p.add_protocol(e)
p.add_protocol(l)
p.add_protocol(b)
p.serialize()
# ethernet !6s6sH
e_buf = self.dst_mac + self.src_mac + '\x05\xdc'
# llc !BBB
l_buf = ('\x42'
'\x42'
'\x03')
# bpdu !HBBBQIQHHHHH
b_buf = ('\x00\x00'
'\x00'
'\x00'
'\x00'
'\x80\x64\xaa\xaa\xaa\xaa\xaa\xaa'
'\x00\x00\x00\x04'
'\x80\x64\xbb\xbb\xbb\xbb\xbb\xbb'
'\x80\x04'
'\x01\x00'
'\x14\x00'
'\x02\x00'
'\x0f\x00')
buf = e_buf + l_buf + b_buf
# parse
pkt = packet.Packet(array.array('B', p.data))
protocols = self.get_protocols(pkt)
p_eth = protocols['ethernet']
p_llc = protocols['llc']
p_bpdu = protocols['ConfigurationBPDUs']
# ethernet
ok_(p_eth)
eq_(self.dst_mac, p_eth.dst)
eq_(self.src_mac, p_eth.src)
eq_(ether.ETH_TYPE_IEEE802_3, p_eth.ethertype)
# llc
ok_(p_llc)
eq_(llc.SAP_BPDU, p_llc.dsap_addr)
eq_(llc.SAP_BPDU, p_llc.ssap_addr)
eq_(0, p_llc.control.modifier_function1)
eq_(0, p_llc.control.pf_bit)
eq_(0, p_llc.control.modifier_function2)
# bpdu
ok_(p_bpdu)
eq_(bpdu.PROTOCOL_IDENTIFIER, p_bpdu._protocol_id)
eq_(bpdu.PROTOCOLVERSION_ID_BPDU, p_bpdu._version_id)
eq_(bpdu.TYPE_CONFIG_BPDU, p_bpdu._bpdu_type)
eq_(0, p_bpdu.flags)
eq_(32768, p_bpdu.root_priority)
eq_(0, p_bpdu.root_system_id_extension)
eq_(self.src_mac, p_bpdu.root_mac_address)
eq_(0, p_bpdu.root_path_cost)
eq_(32768, p_bpdu.bridge_priority)
eq_(0, p_bpdu.bridge_system_id_extension)
eq_(self.dst_mac, p_bpdu.bridge_mac_address)
eq_(128, p_bpdu.port_priority)
eq_(4, p_bpdu.port_number)
eq_(1, p_bpdu.message_age)
eq_(20, p_bpdu.max_age)
eq_(2, p_bpdu.hello_time)
eq_(15, p_bpdu.forward_delay)
# to string
eth_values = {'dst': self.dst_mac,
'src': self.src_mac,
'ethertype': ether.ETH_TYPE_IEEE802_3}
_eth_str = ','.join(['%s=%s' % (k, repr(eth_values[k]))
for k, v in inspect.getmembers(p_eth)
if k in eth_values])
eth_str = '%s(%s)' % (ethernet.ethernet.__name__, _eth_str)
ctrl_values = {'modifier_function1': 0,
'pf_bit': 0,
'modifier_function2': 0}
_ctrl_str = ','.join(['%s=%s' % (k, repr(ctrl_values[k]))
for k, v in inspect.getmembers(p_llc.control)
if k in ctrl_values])
ctrl_str = '%s(%s)' % (llc.ControlFormatU.__name__, _ctrl_str)
llc_values = {'dsap_addr': repr(llc.SAP_BPDU),
'ssap_addr': repr(llc.SAP_BPDU),
'control': ctrl_str}
_llc_str = ','.join(['%s=%s' % (k, llc_values[k])
for k, v in inspect.getmembers(p_llc)
if k in llc_values])
llc_str = '%s(%s)' % (llc.llc.__name__, _llc_str)
bpdu_values = {'flags': 0,
'root_priority': long(32768),
'root_system_id_extension': long(0),
'root_mac_address': self.src_mac,
'root_path_cost': 0,
'bridge_priority': long(32768),
'bridge_system_id_extension': long(0),
'bridge_mac_address': self.dst_mac,
'port_priority': 128,
'port_number': 4,
'message_age': float(1),
'max_age': float(20),
'hello_time': float(2),
'forward_delay': float(15)}
_bpdu_str = ','.join(['%s=%s' % (k, repr(bpdu_values[k]))
for k, v in inspect.getmembers(p_bpdu)
if k in bpdu_values])
bpdu_str = '%s(%s)' % (bpdu.ConfigurationBPDUs.__name__, _bpdu_str)
pkt_str = '%s, %s, %s' % (eth_str, llc_str, bpdu_str)
eq_(eth_str, str(p_eth))
eq_(eth_str, repr(p_eth))
eq_(llc_str, str(p_llc))
eq_(llc_str, repr(p_llc))
eq_(bpdu_str, str(p_bpdu))
eq_(bpdu_str, repr(p_bpdu))
eq_(pkt_str, str(pkt))
eq_(pkt_str, repr(pkt))
def test_div_api(self):
e = ethernet.ethernet(self.dst_mac, self.src_mac, ether.ETH_TYPE_IP)
i = ipv4.ipv4()
u = udp.udp(self.src_port, self.dst_port)
pkt = e / i / u
ok_(isinstance(pkt, packet.Packet))
ok_(isinstance(pkt.protocols[0], ethernet.ethernet))
ok_(isinstance(pkt.protocols[1], ipv4.ipv4))
ok_(isinstance(pkt.protocols[2], udp.udp))
|
|
import os
import re
import shutil
import subprocess
import projectconfig
class Project:
def __init__(self, project_dir, config = None):
self.project_dir = project_dir
self.config = config or projectconfig
def process(self, command):
err = subprocess.Popen(command).wait()
return err
def element_tree(self):
import xml.etree.ElementTree
return xml.etree.ElementTree
def msbuild_path(self):
return self.config.msbuild_path
def nuget_path(self):
return self.config.nuget_path
def test_command(self):
return self.config.test_command
def project_name(self):
return os.path.basename(os.path.normpath(self.project_dir))
def project_file(self):
return os.path.join(self.project_dir, self.project_name() + '.csproj')
def project_file_namespace(self):
return self.config.project_file_namespace
def test_project(self):
test_project_dir = os.path.join(self.project_dir, '..', self.project_name() + '.Tests')
return Project(test_project_dir, self.config)
def assembly_info_file(self):
return os.path.join(self.project_dir, 'Properties', 'AssemblyInfo.cs')
def assembly_file(self):
return os.path.join(self.build_dir(), self.project_name() + '.dll')
def build_configuration(self):
return self.config.build_configuration
def build_dir(self):
return os.path.join(self.project_dir, 'bin', self.build_configuration())
def nuget_dir(self):
return os.path.join(self.project_dir, 'nuget')
def nuspec_file(self):
return os.path.join(self.nuget_dir(), self.project_name() + '.nuspec')
def nuget_source(self):
return self.config.nuget_source
def nuget_api_key(self):
return self.config.nuget_api_key
def target_framework_versions(self):
return self.config.target_framework_versions
def framework_version_nuget_dir_map(self):
return self.config.framework_version_nuget_dir_map
def get_assembly_info_file_contents(self):
with open(self.assembly_info_file(), 'r') as f:
return f.read()
def set_assembly_info_file_contents(self, contents):
with open(self.assembly_info_file(), 'w') as f:
f.write(contents)
def get_version(self, version_type = 'AssemblyVersion'):
contents = self.get_assembly_info_file_contents()
lines = contents.split('\n')
for line in lines:
if 'assembly: ' + version_type in line:
match = re.findall(r'\"(.+?)\"', line)[0]
split = match.split('.')
return split
return []
def increment_version(self, version_type = 'AssemblyFileVersion', version_index = 3, version_increment = 1):
version = self.get_version(version_type)
version[version_index] = str(int(version[version_index]) + version_increment)
lines = self.get_assembly_info_file_contents().split('\n')
for i, line in enumerate(lines):
if 'assembly: ' + version_type in line:
lines[i] = '[assembly: {}("{}")]'.format(version_type, '.'.join(version))
self.set_assembly_info_file_contents('\n'.join(lines))
def build(self, target_framework_version):
project_xml_namespace = self.project_file_namespace()
et = self.element_tree()
et.register_namespace('', project_xml_namespace)
project_file = self.project_file()
project_xml_doc = et.parse(project_file)
project_xml_root = project_xml_doc.getroot()
for version_element in project_xml_root.iter('{' + project_xml_namespace + '}TargetFrameworkVersion'):
version_element.text = target_framework_version
project_xml_doc.write(project_file)
cmd = '{} "{}" /p:configuration={}'.format(self.msbuild_path(), project_file, self.build_configuration())
err = self.process(cmd)
return err
def test(self, target_framework_version):
test_project = self.test_project()
test_project.build(target_framework_version)
test_cmd = self.test_command()
test_assembly_file = test_project.assembly_file()
cmd = '{} "{}"'.format(test_cmd, test_assembly_file)
err = self.process(cmd)
return err
def pack(self):
self.increment_version()
versions = self.target_framework_versions()
for version in versions:
err = self.build(version)
if err:
return err
err = self.test(version)
if err:
return err
nuget_map = self.framework_version_nuget_dir_map()
nuget_dir = nuget_map[version]
nuget_dir = os.path.join(self.nuget_dir(), 'lib', nuget_dir)
if os.path.exists(nuget_dir):
shutil.rmtree(nuget_dir)
build_dir = self.build_dir()
shutil.copytree(build_dir, nuget_dir)
nuget_dir = self.nuget_dir()
nuget_path = self.nuget_path()
nuspec_file = self.nuspec_file()
version = self.get_version('AssemblyInformationalVersion')
version = '.'.join(version)
cmd = '{} pack "{}" -Version {} -OutputDirectory "{}"'.format(nuget_path, nuspec_file, version, nuget_dir)
err = self.process(cmd)
return err
def push(self):
err = self.pack()
if err:
return err
version = self.get_version('AssemblyInformationalVersion')
version = '.'.join(version)
nuget_dir = self.nuget_dir()
project_name = self.project_name()
package_file_name = '{}.{}.nupkg'.format(project_name, version)
package_file = os.path.join(nuget_dir, package_file_name)
nuget_path = self.nuget_path()
cmd = '{} push "{}"'.format(nuget_path, package_file)
nuget_source = self.nuget_source()
if nuget_source:
cmd = '{} -Source {}'.format(cmd, nuget_source)
nuget_api_key = self.nuget_api_key()
if nuget_api_key:
cmd = '{} -ApiKey {}'.format(cmd, nuget_api_key)
err = self.process(cmd)
return err
|
|
# -*- coding: utf-8 -*-
import json
from .. import base
from girder.api.v1 import resource
from girder.constants import AccessType
from girder.models.model_base import AccessControlledModel
from girder.models.assetstore import Assetstore
from girder.models.collection import Collection
from girder.models.item import Item
from girder.models.user import User
from girder.utility.acl_mixin import AccessControlMixin
from girder.utility import search
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class SearchTestCase(base.TestCase):
def testResourceSearch(self):
"""
Test resource/search endpoint
"""
# get expected models from the database
admin = User().findOne({'login': 'adminlogin'})
user = User().findOne({'login': 'goodlogin'})
coll1 = Collection().findOne({'name': 'Test Collection'})
coll2 = Collection().findOne({'name': 'Magic collection'})
item1 = Item().findOne({'name': 'Public object'})
# set user read permissions on the private collection
Collection().setUserAccess(coll2, user, level=AccessType.READ, save=True)
# Grab the default user folders
resp = self.request(
path='/folder', method='GET', user=user, params={
'parentType': 'user',
'parentId': user['_id'],
'sort': 'name',
'sortdir': 1
})
privateFolder = resp.json[0]
# First test all of the required parameters.
self.ensureRequiredParams(path='/resource/search', required=['q', 'types'])
# Now test parameter validation
resp = self.request(path='/resource/search', params={
'q': 'query',
'types': ',,invalid;json!'
})
self.assertStatus(resp, 400)
self.assertEqual('Parameter types must be valid JSON.', resp.json['message'])
# Test searching with no results
resp = self.request(path='/resource/search', params={
'q': 'gibberish',
'types': '["folder", "user", "collection", "group"]'
})
self.assertStatusOk(resp)
self.assertEqual(resp.json, {
'folder': [],
'user': [],
'collection': [],
'group': []
})
# Ensure searching respects permissions
resp = self.request(path='/resource/search', params={
'q': 'private',
'types': '["folder", "user", "collection"]'
})
self.assertStatusOk(resp)
self.assertEqual(resp.json, {
'folder': [],
'user': [],
'collection': []
})
resp = self.request(path='/resource/search', params={
'q': 'pr',
'mode': 'prefix',
'types': '["folder", "user", "collection"]'
})
self.assertStatusOk(resp)
self.assertEqual(resp.json, {
'folder': [],
'user': [],
'collection': []
})
resp = self.request(path='/resource/search', params={
'q': 'private',
'types': '["folder", "user", "collection"]'
}, user=user)
self.assertStatusOk(resp)
self.assertEqual(1, len(resp.json['folder']))
self.assertDictContainsSubset({
'_id': str(privateFolder['_id']),
'name': 'Private'
}, resp.json['folder'][0])
self.assertEqual(1, len(resp.json['collection']))
self.assertDictContainsSubset({
'_id': str(coll2['_id']),
'name': coll2['name']
}, resp.json['collection'][0])
self.assertEqual(0, len(resp.json['user']))
resp = self.request(path='/resource/search', params={
'q': 'pr',
'mode': 'prefix',
'types': '["folder", "user", "collection", "item"]'
}, user=user)
self.assertStatusOk(resp)
self.assertEqual(1, len(resp.json['folder']))
self.assertDictContainsSubset({
'_id': str(privateFolder['_id']),
'name': 'Private'
}, resp.json['folder'][0])
self.assertEqual(0, len(resp.json['collection']))
self.assertEqual(0, len(resp.json['item']))
self.assertEqual(0, len(resp.json['user']))
# Ensure that weights are respected, e.g. description should be
# weighted less than name.
resp = self.request(path='/resource/search', params={
'q': 'magic',
'types': '["collection"]'
}, user=admin)
self.assertStatusOk(resp)
self.assertEqual(2, len(resp.json['collection']))
self.assertDictContainsSubset({
'_id': str(coll2['_id']),
'name': coll2['name']
}, resp.json['collection'][0])
self.assertDictContainsSubset({
'_id': str(coll1['_id']),
'name': coll1['name']
}, resp.json['collection'][1])
self.assertTrue(resp.json['collection'][0]['_textScore']
> resp.json['collection'][1]['_textScore'])
# Exercise user search by login
resp = self.request(path='/resource/search', params={
'q': 'goodlogin',
'types': '["user"]'
}, user=admin)
self.assertStatusOk(resp)
self.assertEqual(1, len(resp.json['user']))
self.assertDictContainsSubset({
'_id': str(user['_id']),
'firstName': user['firstName'],
'lastName': user['lastName'],
'login': user['login']
}, resp.json['user'][0])
# check item search with proper permissions
resp = self.request(path='/resource/search', params={
'q': 'object',
'types': '["item"]'
}, user=user)
self.assertStatusOk(resp)
self.assertEqual(1, len(resp.json['item']))
self.assertDictContainsSubset({
'_id': str(item1['_id']),
'name': item1['name']
}, resp.json['item'][0])
# Check search for model that is not access controlled
self.assertNotIsInstance(Assetstore(), AccessControlledModel)
self.assertNotIsInstance(Assetstore(), AccessControlMixin)
resource.allowedSearchTypes.add('assetstore')
resp = self.request(path='/resource/search', params={
'q': 'Test',
'mode': 'prefix',
'types': '["assetstore"]'
}, user=user)
self.assertStatusOk(resp)
self.assertEqual(1, len(resp.json['assetstore']))
def testResourceFileSearch(self):
admin = User().findOne({'login': 'adminlogin'})
resp = self.request(path='/resource/search', params={
'q': 'public',
'types': '["file"]'
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['file']), 1)
resp = self.request(path='/resource/search', params={
'q': 'personal',
'types': '["file"]'
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['file']), 0)
resp = self.request(path='/resource/search', params={
'q': 'file',
'types': '["file"]'
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['file']), 1)
resp = self.request(path='/resource/search', params={
'q': 'personal',
'types': '["file"]'
}, user=admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['file']), 1)
resp = self.request(path='/resource/search', params={
'q': 'file',
'types': '["file"]'
}, user=admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['file']), 2)
def testSearchModeRegistry(self):
def testSearchHandler(query, types, user, level, limit, offset):
return {
'query': query,
'types': types
}
search.addSearchMode('testSearch', testSearchHandler)
# Use the new search mode.
resp = self.request(path='/resource/search', params={
'q': 'Test',
'mode': 'testSearch',
'types': json.dumps(['collection'])
})
self.assertStatusOk(resp)
self.assertDictEqual(resp.json, {
'query': 'Test',
'types': ['collection']
})
search.removeSearchMode('testSearch')
# Use the deleted search mode.
resp = self.request(path='/resource/search', params={
'q': 'Test',
'mode': 'testSearch',
'types': json.dumps(['collection'])
})
self.assertStatus(resp, 400)
|
|
#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for processing various .yaml files in CourseBuilder installations."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import copy
import re
import yaml
NEWLINE_BEFORE_YAML_SECTIONS = set([
'env_variables',
'includes',
'inbound_services',
'builtins',
'libraries',
'handlers',
])
class CourseBuilderYamlFormatter(yaml.Dumper):
"""Custom formatter to generate CourseBuilder style in yaml files."""
def __init__(self, *args, **kwargs):
super(CourseBuilderYamlFormatter, self).__init__(*args, **kwargs)
self.best_width = 0 # Minimize line merging
# Add newlines before major sections for good visual parsing.
def emit(self, item):
if (isinstance(item, yaml.ScalarEvent) and
str(item.value) in NEWLINE_BEFORE_YAML_SECTIONS):
self.write_line_break()
self.write_line_break()
super(CourseBuilderYamlFormatter, self).emit(item)
# For very long lines, don't leave 1st item in element on same line
# as name of element; instead, move to next line so all parts have
# the same indent. (E.g., for GCB_REGISTERED_MODULES list)
def write_plain(self, text, split):
if len(text) > 80 or ' ' in text:
self.write_line_break()
self.write_indent()
super(CourseBuilderYamlFormatter, self).write_plain(text, split)
class AppYamlFile(object):
"""Parse, modify, and write app.yaml file."""
def __init__(self, name):
self._name = name
self._loaded = False
def _lazy_load(self):
if self._loaded:
return
with open(self._name) as fp:
self._root = yaml.compose(fp)
# Root value is a list of 2-tuples for name/value of top-level
# items in yaml file.
for item in self._root.value:
if item[0].value == 'env_variables':
self._env_vars = item[1].value
if item[0].value == 'libraries':
self._library_list = item[1].value
if item[0].value == 'application':
self._application = item[1].value
# Libraries item is a list of name/value 2-tuples.
# Extract name and version for each library.
self._lib_versions = {}
for lib_spec in self._library_list:
name = None
vers = None
for lib_item in lib_spec.value:
if lib_item[0].value == 'name':
name = lib_item[1].value
elif lib_item[0].value == 'version':
vers = lib_item[1].value
if name and vers:
self._lib_versions[name] = vers
self._loaded = True
def write(self):
self._lazy_load()
content = yaml.serialize(self._root, stream=None,
Dumper=CourseBuilderYamlFormatter)
with open(self._name, 'w') as fp:
fp.write(content)
def require_library(self, library, version):
"""Add tree nodes for new library if it is not already called for."""
self._lazy_load()
if library in self._lib_versions:
if version != self._lib_versions[library]:
raise ValueError(
'Library "%s" is already required ' % library +
'at version "%s". ' % self._lib_versions[library] +
'Cannot satisfy request for version "%s".' % version)
return False
added_lib = copy.deepcopy(self._library_list[0])
added_lib.value[0][1].value = library
added_lib.value[1][1].value = version
self._library_list.append(added_lib)
self._library_list.sort(key=lambda x: x.value[0][1].value)
return True
def set_env(self, var_name, var_value):
self._lazy_load()
var_value = var_value.strip()
env_var = None
for member in self._env_vars:
if member[0].value == var_name:
env_var = member
break
if var_value:
if not env_var:
env_var_name = yaml.ScalarNode('tag:yaml.org,2002:str',
var_name)
env_var_value = yaml.ScalarNode('tag:yaml.org,2002:str',
var_value)
env_var = (env_var_name, env_var_value)
self._env_vars.append(env_var)
else:
env_var[1].value = var_value
else:
if env_var:
self._env_vars.remove(env_var)
def get_env(self, var_name):
self._lazy_load()
for env_var in self._env_vars:
if env_var[0].value == var_name:
return env_var[1].value
return None
def get_all_env(self):
self._lazy_load()
ret = {}
for env_var in self._env_vars:
ret[env_var[0].value] = env_var[1].value
return ret
@property
def application(self):
self._lazy_load()
return self._application
class ModuleManifest(object):
"""Parse module.yaml files into object providing convienent properties."""
def __init__(self, path):
self._path = path
self._loaded = False
def _lazy_load(self):
if self._loaded:
return
with open(self._path) as fp:
module_spec = yaml.load(fp)
self._main_module = module_spec['module_name']
parts = self._main_module.split('.')
if parts[0] != 'modules' or len(parts) < 2:
raise ValueError(
'module_name is expected to name the main python file '
'under CourseBuilder as: modules.<module>.<filename>')
self._module_name = parts[1]
self._required_version = module_spec['container_version']
self._third_party_libraries = module_spec.get(
'third_party_libraries', {})
self._appengine_libraries = module_spec.get(
'appengine_libraries', {})
self._tests = module_spec['tests']
self._loaded = True
def assert_version_compatibility(self, actual_version):
self._lazy_load()
for required, actual in zip(re.split(r'[-.]', self._required_version),
re.split(r'[-.]', actual_version)):
if int(required) < int(actual):
break
if int(required) > int(actual):
raise ValueError(
'Current CourseBuilder version %s ' % actual_version +
'is less than the version %s ' % self._required_version +
'required by module %s' % self._module_name)
@property
def module_name(self):
self._lazy_load()
return self._module_name
@property
def main_module(self):
self._lazy_load()
return self._main_module
@property
def third_party_libraries(self):
self._lazy_load()
return self._third_party_libraries
@property
def appengine_libraries(self):
self._lazy_load()
return self._appengine_libraries
@property
def tests(self):
self._lazy_load()
return self._tests
|
|
"""Tests for deCONZ config flow."""
import asyncio
import pydeconz
from homeassistant import data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.deconz import config_flow
from homeassistant.components.deconz.config_flow import (
CONF_MANUAL_INPUT,
CONF_SERIAL,
DECONZ_MANUFACTURERURL,
)
from homeassistant.components.deconz.const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_DECONZ_GROUPS,
CONF_MASTER_GATEWAY,
DOMAIN,
)
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from .test_gateway import API_KEY, BRIDGEID, setup_deconz_integration
async def test_flow_discovered_bridges(hass, aioclient_mock):
"""Test that config flow works for discovered bridges."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[
{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80},
{"id": "1234E567890A", "internalipaddress": "5.6.7.8", "internalport": 80},
],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_flow_manual_configuration_decision(hass, aioclient_mock):
"""Test that config flow for one discovered bridge works."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: CONF_MANUAL_INPUT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_flow_manual_configuration(hass, aioclient_mock):
"""Test that config flow works with manual configuration after no discovered bridges."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_manual_configuration_after_discovery_timeout(hass, aioclient_mock):
"""Test failed discovery fallbacks to manual configuration."""
aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=asyncio.TimeoutError)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
assert not hass.config_entries.flow._progress[result["flow_id"]].bridges
async def test_manual_configuration_after_discovery_ResponseError(hass, aioclient_mock):
"""Test failed discovery fallbacks to manual configuration."""
aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=config_flow.ResponseError)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
assert not hass.config_entries.flow._progress[result["flow_id"]].bridges
async def test_manual_configuration_update_configuration(hass, aioclient_mock):
"""Test that manual configuration can update existing config entry."""
gateway = await setup_deconz_integration(hass)
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "2.3.4.5", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://2.3.4.5:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://2.3.4.5:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "2.3.4.5"
async def test_manual_configuration_dont_update_configuration(hass, aioclient_mock):
"""Test that _create_entry work and that bridgeid can be requested."""
await setup_deconz_integration(hass)
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_manual_configuration_timeout_get_bridge(hass, aioclient_mock):
"""Test that _create_entry handles a timeout."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config", exc=asyncio.TimeoutError
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_bridges"
async def test_link_get_api_key_ResponseError(hass, aioclient_mock):
"""Test config flow should abort if no API key was possible to retrieve."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post("http://1.2.3.4:80/api", exc=pydeconz.errors.ResponseError)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
assert result["errors"] == {"base": "no_key"}
async def test_flow_ssdp_discovery(hass, aioclient_mock):
"""Test that config flow for one discovered bridge works."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": "application/json"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_ssdp_discovery_not_deconz_bridge(hass):
"""Test a non deconz bridge being discovered over ssdp."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={ssdp.ATTR_UPNP_MANUFACTURER_URL: "not deconz bridge"},
context={"source": "ssdp"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_deconz_bridge"
async def test_ssdp_discovery_update_configuration(hass):
"""Test if a discovered bridge is configured but updates with new attributes."""
gateway = await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://2.3.4.5:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "2.3.4.5"
async def test_ssdp_discovery_dont_update_configuration(hass):
"""Test if a discovered bridge has already been configured."""
gateway = await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "1.2.3.4"
async def test_ssdp_discovery_dont_update_existing_hassio_configuration(hass):
"""Test to ensure the SSDP discovery does not update an Hass.io entry."""
gateway = await setup_deconz_integration(hass, source="hassio")
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ssdp.ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ssdp.ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ssdp.ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": "ssdp"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "1.2.3.4"
async def test_flow_hassio_discovery(hass):
"""Test hassio discovery flow works."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
"addon": "Mock Addon",
CONF_HOST: "mock-deconz",
CONF_PORT: 80,
CONF_SERIAL: BRIDGEID,
CONF_API_KEY: API_KEY,
},
context={"source": "hassio"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "hassio_confirm"
assert result["description_placeholders"] == {"addon": "Mock Addon"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
CONF_HOST: "mock-deconz",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_hassio_discovery_update_configuration(hass):
"""Test we can update an existing config entry."""
gateway = await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
CONF_HOST: "2.3.4.5",
CONF_PORT: 8080,
CONF_API_KEY: "updated",
CONF_SERIAL: BRIDGEID,
},
context={"source": "hassio"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert gateway.config_entry.data[CONF_HOST] == "2.3.4.5"
assert gateway.config_entry.data[CONF_PORT] == 8080
assert gateway.config_entry.data[CONF_API_KEY] == "updated"
async def test_hassio_discovery_dont_update_configuration(hass):
"""Test we can update an existing config entry."""
await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
CONF_SERIAL: BRIDGEID,
},
context={"source": "hassio"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_option_flow(hass):
"""Test config flow options."""
gateway = await setup_deconz_integration(hass)
result = await hass.config_entries.options.async_init(gateway.config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "deconz_devices"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_ALLOW_CLIP_SENSOR: False, CONF_ALLOW_DECONZ_GROUPS: False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_ALLOW_CLIP_SENSOR: False,
CONF_ALLOW_DECONZ_GROUPS: False,
CONF_MASTER_GATEWAY: True,
}
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import requests
import time
from datetime import datetime
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, File
from resource_management.libraries.functions import format as ambari_format
from resource_management.libraries.functions.format import format
import metron_service
import metron_security
# Wrap major operations and functionality in this class
class IndexingCommands:
__params = None
__indexing_topic = None
__random_access_indexing_topology = None
__batch_indexing_topology = None
__configured = False
__acl_configured = False
__hdfs_perm_configured = False
__hbase_configured = False
__hbase_acl_configured = False
def __init__(self, params):
if params is None:
raise ValueError("params argument is required for initialization")
self.__params = params
self.__random_access_indexing_topology = params.metron_random_access_indexing_topology
self.__batch_indexing_topology = params.metron_batch_indexing_topology
self.__indexing_topic = params.indexing_input_topic
self.__configured = os.path.isfile(self.__params.indexing_configured_flag_file)
self.__acl_configured = os.path.isfile(self.__params.indexing_acl_configured_flag_file)
self.__hbase_configured = os.path.isfile(self.__params.indexing_hbase_configured_flag_file)
self.__hbase_acl_configured = os.path.isfile(self.__params.indexing_hbase_acl_configured_flag_file)
self.__elasticsearch_template_installed = os.path.isfile(self.__params.elasticsearch_template_installed_flag_file)
self.__solr_schema_installed = os.path.isfile(self.__params.solr_schema_installed_flag_file)
self.__hdfs_perm_configured = os.path.isfile(self.__params.indexing_hdfs_perm_configured_flag_file)
def __get_topics(self):
return [self.__indexing_topic]
def __get_kafka_acl_groups(self):
# Indexed topic names matches the group
return ['indexing-batch', 'indexing-ra']
def get_templates(self):
"""
Defines the Elasticsearch index templates.
:return: Dict where key is the name of an index template and the
value is a path to file containing the index template definition.
"""
from params import params
return {
"bro_index": params.bro_index_path,
"yaf_index": params.yaf_index_path,
"snort_index": params.snort_index_path,
"error_index": params.error_index_path,
"metaalert_index": params.meta_index_path
}
def get_solr_schemas(self):
"""
Defines the Solr schemas.
:return: Dict where key is the name of a collection and the
value is a path to file containing the schema definition.
"""
return [
"bro",
"yaf",
"snort",
"error",
"metaalert"
]
def is_configured(self):
return self.__configured
def is_acl_configured(self):
return self.__acl_configured
def is_hdfs_perm_configured(self):
return self.__hdfs_perm_configured
def is_hbase_configured(self):
return self.__hbase_configured
def is_hbase_acl_configured(self):
return self.__hbase_acl_configured
def is_elasticsearch_template_installed(self):
return self.__elasticsearch_template_installed
def is_solr_schema_installed(self):
return self.__solr_schema_installed
def set_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_configured_flag_file, "Setting Indexing configured to True")
def set_hbase_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_hbase_configured_flag_file, "Setting HBase configured to True for indexing")
def set_hbase_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_hbase_acl_configured_flag_file, "Setting HBase ACL configured to True for indexing")
def set_acl_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_acl_configured_flag_file, "Setting Indexing ACL configured to True")
def set_hdfs_perm_configured(self):
metron_service.set_configured(self.__params.metron_user, self.__params.indexing_hdfs_perm_configured_flag_file, "Setting HDFS perm configured to True")
def set_elasticsearch_template_installed(self):
metron_service.set_configured(self.__params.metron_user, self.__params.elasticsearch_template_installed_flag_file, "Setting Elasticsearch template installed to True")
def set_solr_schema_installed(self):
metron_service.set_configured(self.__params.metron_user, self.__params.solr_schema_installed_flag_file, "Setting Solr schema installed to True")
def create_hbase_tables(self):
Logger.info("Creating HBase Tables for indexing")
metron_service.create_hbase_table(self.__params,
self.__params.update_hbase_table,
self.__params.update_hbase_cf)
Logger.info("Done creating HBase Tables for indexing")
self.set_hbase_configured()
def set_hbase_acls(self):
Logger.info("Setting HBase ACLs for indexing")
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.hbase_keytab_path,
self.__params.hbase_principal_name,
execute_user=self.__params.hbase_user)
cmd = "echo \"grant '{0}', 'RW', '{1}'\" | hbase shell -n"
add_update_acl_cmd = cmd.format(self.__params.metron_user, self.__params.update_hbase_table)
Execute(add_update_acl_cmd,
tries=3,
try_sleep=5,
logoutput=False,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
user=self.__params.hbase_user
)
Logger.info("Done setting HBase ACLs for indexing")
self.set_hbase_acl_configured()
def init_kafka_topics(self):
Logger.info('Creating Kafka topics for indexing')
metron_service.init_kafka_topics(self.__params, self.__get_topics())
def init_kafka_acls(self):
Logger.info('Creating Kafka ACLs for indexing')
metron_service.init_kafka_acls(self.__params, self.__get_topics())
metron_service.init_kafka_acl_groups(self.__params, self.__get_kafka_acl_groups())
def init_hdfs_dir(self):
Logger.info('Setting up HDFS indexing directory')
# Non Kerberized Metron runs under 'storm', requiring write under the 'hadoop' group.
# Kerberized Metron runs under it's own user.
ownership = 0755 if self.__params.security_enabled else 0775
Logger.info('HDFS indexing directory ownership is: ' + str(ownership))
self.__params.HdfsResource(self.__params.metron_apps_indexed_hdfs_dir,
type="directory",
action="create_on_execute",
owner=self.__params.metron_user,
group=self.__params.hadoop_group,
mode=ownership,
)
Logger.info('Done creating HDFS indexing directory')
def check_elasticsearch_templates(self):
for template_name in self.get_templates():
# check for the index template
cmd = "curl -s -XGET \"http://{0}/_template/{1}\" | grep -o {1}"
err_msg="Missing Elasticsearch index template: name={0}"
metron_service.execute(
cmd=cmd.format(self.__params.es_http_url, template_name),
user=self.__params.metron_user,
err_msg=err_msg.format(template_name))
def solr_schema_install(self, env):
from params import params
env.set_params(params)
Logger.info("Installing Solr schemas")
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.solr_keytab_path,
self.__params.solr_principal_name,
self.__params.solr_user)
commands = IndexingCommands(params)
for collection_name in commands.get_solr_schemas():
# install the schema
cmd = format((
"export ZOOKEEPER={solr_zookeeper_url};"
"export SECURITY_ENABLED={security_enabled};"
))
cmd += "{0}/bin/create_collection.sh {1};"
Execute(
cmd.format(params.metron_home, collection_name),
user=self.__params.solr_user)
def solr_schema_delete(self, env):
from params import params
env.set_params(params)
Logger.info("Deleting Solr schemas")
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.solr_keytab_path,
self.__params.solr_principal_name,
self.__params.solr_user)
commands = IndexingCommands(params)
for collection_name in commands.get_solr_schemas():
# delete the schema
cmd = format((
"export ZOOKEEPER={solr_zookeeper_url};"
"export SECURITY_ENABLED={security_enabled};"
))
cmd += "{0}/bin/delete_collection.sh {1};"
Execute(
cmd.format(params.metron_home, collection_name),
user=self.__params.solr_user)
def start_batch_indexing_topology(self, env):
Logger.info('Starting ' + self.__batch_indexing_topology)
if not self.is_batch_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
start_cmd_template = """{0}/bin/start_hdfs_topology.sh"""
start_cmd = start_cmd_template.format(self.__params.metron_home)
Execute(start_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info('Batch Indexing topology already running')
Logger.info('Finished starting batch indexing topology')
def start_random_access_indexing_topology(self, env):
Logger.info('Starting ' + self.__random_access_indexing_topology)
if not self.is_random_access_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
start_cmd_template = """{0}/bin/start_elasticsearch_topology.sh"""
if self.__params.ra_indexing_writer == 'Solr':
start_cmd_template = """{0}/bin/start_solr_topology.sh"""
start_cmd = start_cmd_template.format(self.__params.metron_home)
Execute(start_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info('Random Access Indexing topology already running')
Logger.info('Finished starting random access indexing topology')
def start_indexing_topology(self, env):
self.start_batch_indexing_topology(env)
self.start_random_access_indexing_topology(env)
Logger.info('Finished starting indexing topologies')
def stop_batch_indexing_topology(self, env):
Logger.info('Stopping ' + self.__batch_indexing_topology)
if self.is_batch_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
stop_cmd = 'storm kill ' + self.__batch_indexing_topology
Execute(stop_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info("Batch Indexing topology already stopped")
Logger.info('Done stopping batch indexing topologies')
def stop_random_access_indexing_topology(self, env):
Logger.info('Stopping ' + self.__random_access_indexing_topology)
if self.is_random_access_topology_active(env):
if self.__params.security_enabled:
metron_security.kinit(self.__params.kinit_path_local,
self.__params.metron_keytab_path,
self.__params.metron_principal_name,
execute_user=self.__params.metron_user)
stop_cmd = 'storm kill ' + self.__random_access_indexing_topology
Execute(stop_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
else:
Logger.info("Random Access Indexing topology already stopped")
Logger.info('Done stopping random access indexing topologies')
def stop_indexing_topology(self, env):
self.stop_batch_indexing_topology(env)
self.stop_random_access_indexing_topology(env)
Logger.info('Done stopping indexing topologies')
def restart_indexing_topology(self, env):
Logger.info('Restarting the indexing topologies')
self.restart_batch_indexing_topology(env)
self.restart_random_access_indexing_topology(env)
def restart_batch_indexing_topology(self, env):
Logger.info('Restarting the batch indexing topology')
self.stop_batch_indexing_topology(env)
# Wait for old topology to be cleaned up by Storm, before starting again.
retries = 0
topology_active = self.is_batch_topology_active(env)
while topology_active and retries < 3:
Logger.info('Existing batch topology still active. Will wait and retry')
time.sleep(10)
retries += 1
topology_active = self.is_batch_topology_active(env)
if not topology_active:
Logger.info('Waiting for storm kill to complete')
time.sleep(30)
self.start_batch_indexing_topology(env)
Logger.info('Done restarting the batch indexing topology')
else:
Logger.warning('Retries exhausted. Existing topology not cleaned up. Aborting topology start.')
def restart_random_access_indexing_topology(self, env):
Logger.info('Restarting the random access indexing topology')
self.stop_random_access_indexing_topology(env)
# Wait for old topology to be cleaned up by Storm, before starting again.
retries = 0
topology_active = self.is_random_access_topology_active(env)
while topology_active and retries < 3:
Logger.info('Existing random access topology still active. Will wait and retry')
time.sleep(10)
retries += 1
topology_active = self.is_random_access_topology_active(env)
if not topology_active:
Logger.info('Waiting for storm kill to complete')
time.sleep(30)
self.start_random_access_indexing_topology(env)
Logger.info('Done restarting the random access indexing topology')
else:
Logger.warning('Retries exhausted. Existing topology not cleaned up. Aborting topology start.')
def is_batch_topology_active(self, env):
env.set_params(self.__params)
topologies = metron_service.get_running_topologies(self.__params)
is_batch_running = False
if self.__batch_indexing_topology in topologies:
is_batch_running = topologies[self.__batch_indexing_topology] in ['ACTIVE', 'REBALANCING']
return is_batch_running
def is_random_access_topology_active(self, env):
env.set_params(self.__params)
topologies = metron_service.get_running_topologies(self.__params)
is_random_access_running = False
if self.__random_access_indexing_topology in topologies:
is_random_access_running = topologies[self.__random_access_indexing_topology] in ['ACTIVE', 'REBALANCING']
return is_random_access_running
def is_topology_active(self, env):
return self.is_batch_topology_active(env) and self.is_random_access_topology_active(env)
def service_check(self, env):
"""
Performs a service check for Indexing.
:param env: Environment
"""
metron_service.check_indexer_parameters()
Logger.info('Checking Kafka topics for Indexing')
metron_service.check_kafka_topics(self.__params, self.__get_topics())
Logger.info("Checking HBase for Indexing")
metron_service.check_hbase_table(self.__params, self.__params.update_hbase_table)
metron_service.check_hbase_column_family(self.__params, self.__params.update_hbase_table, self.__params.update_hbase_cf)
Logger.info('Checking Elasticsearch templates for Indexing')
self.check_elasticsearch_templates()
if self.__params.security_enabled:
Logger.info('Checking Kafka ACLs for Indexing')
metron_service.check_kafka_acls(self.__params, self.__get_topics())
metron_service.check_kafka_acl_groups(self.__params, self.__get_kafka_acl_groups())
Logger.info("Checking HBase ACLs for Indexing")
metron_service.check_hbase_acls(self.__params, self.__params.update_hbase_table)
Logger.info("Checking for Indexing topology")
if not self.is_topology_active(env):
raise Fail("Indexing topology not running")
Logger.info("Indexing service check completed successfully")
def get_zeppelin_auth_details(self, ses, zeppelin_server_url, env):
"""
With Ambari 2.5+, Zeppelin server is enabled to work with Shiro authentication, which requires user/password
for authentication (see https://zeppelin.apache.org/docs/0.6.0/security/shiroauthentication.html for details).
This method checks if Shiro authentication is enabled on the Zeppelin server. And if enabled, it returns the
session connection details to be used for importing Zeppelin notebooks.
:param ses: Session handle
:param zeppelin_server_url: Zeppelin Server URL
:return: ses
"""
from params import params
env.set_params(params)
# Check if authentication is enabled on the Zeppelin server
try:
ses.get(ambari_format('http://{zeppelin_server_url}/api/login'))
# Establish connection if authentication is enabled
try:
Logger.info("Shiro authentication is found to be enabled on the Zeppelin server.")
# Read the Shiro admin user credentials from Zeppelin config in Ambari
seen_users = False
username = None
password = None
if re.search(r'^\[users\]', params.zeppelin_shiro_ini_content, re.MULTILINE):
seen_users = True
tokens = re.search(r'^admin\ =.*', params.zeppelin_shiro_ini_content, re.MULTILINE).group()
userpassword = tokens.split(',')[0].strip()
username = userpassword.split('=')[0].strip()
password = userpassword.split('=')[1].strip()
else:
Logger.error("ERROR: Admin credentials config was not found in shiro.ini. Notebook import may fail.")
zeppelin_payload = {'userName': username, 'password' : password}
ses.post(ambari_format('http://{zeppelin_server_url}/api/login'), data=zeppelin_payload)
except:
pass
# If authentication is not enabled, fall back to default method of imporing notebooks
except requests.exceptions.RequestException:
ses.get(ambari_format('http://{zeppelin_server_url}/api/notebook'))
return ses
|
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import collections
from matplotlib.patches import Circle
import networkx as nx
from skimage import img_as_float, morphology
from skimage.color import gray2rgb
def _normalise_image(image, *, image_cmap=None):
image = img_as_float(image)
if image.ndim == 2:
if image_cmap is None:
image = gray2rgb(image)
else:
image = plt.get_cmap(image_cmap)(image)[..., :3]
return image
def pixel_perfect_figsize(image, dpi=80):
"""Return the Matplotlib figure size tuple (w, h) for given image and dpi.
Parameters
----------
image : array, shape (M, N[, 3])
The image to be plotted.
dpi : int, optional
The desired figure dpi.
Returns
-------
figsize : tuple of float
The desired figure size.
Examples
--------
>>> image = np.empty((768, 1024))
>>> pixel_perfect_figsize(image)
(12.8, 9.6)
"""
hpix, wpix = image.shape[:2]
return wpix / dpi, hpix / dpi
def overlay_skeleton_2d(
image,
skeleton,
*,
image_cmap=None,
color=(1, 0, 0),
alpha=1,
dilate=0,
axes=None
):
"""Overlay the skeleton pixels on the input image.
Parameters
----------
image : array, shape (M, N[, 3])
The input image. Can be grayscale or RGB.
skeleton : array, shape (M, N)
The input 1-pixel-wide skeleton.
Other Parameters
----------------
image_cmap : matplotlib colormap name or object, optional
If the input image is grayscale, colormap it with this colormap.
The default is grayscale.
color : tuple of float in [0, 1], optional
The RGB color for the skeleton pixels.
alpha : float, optional
Blend the skeleton pixels with the given alpha.
dilate : int, optional
Dilate the skeleton by this amount. This is useful when rendering
large images where aliasing may cause some pixels of the skeleton
not to be drawn.
axes : matplotlib Axes
The Axes on which to plot the image. If None, new ones are created.
Returns
-------
axes : matplotlib Axes
The Axis on which the image is drawn.
"""
image = _normalise_image(image, image_cmap=image_cmap)
skeleton = skeleton.astype(bool)
if dilate > 0:
selem = morphology.disk(dilate)
skeleton = morphology.binary_dilation(skeleton, selem)
if axes is None:
fig, axes = plt.subplots()
image[skeleton] = alpha * np.array(color) + (1-alpha) * image[skeleton]
axes.imshow(image)
axes.axis('off')
return axes
def overlay_euclidean_skeleton_2d(
image,
stats,
*,
image_cmap=None,
skeleton_color_source='branch-type',
skeleton_colormap='viridis',
axes=None
):
"""Plot the image, and overlay the straight-line skeleton over it.
Parameters
----------
image : array, shape (M, N)
The input image.
stats : array, shape (M, N)
Skeleton statistics.
Other Parameters
----------------
image_cmap : matplotlib colormap name or object, optional
The colormap to use for the input image. Defaults to grayscale.
skeleton_color_source : string, optional
The name of the column to use for the skeleton edge color. See the
output of `skan.summarise` for valid choices. Most common choices
would be:
- skeleton-id: each individual skeleton (connected component) will
have a different colour.
- branch-type: each branch type (tip-tip, tip-junction,
junction-junction, path-path). This is the default.
- branch-distance: the curved length of the skeleton branch.
- euclidean-distance: the straight-line length of the skeleton branch.
skeleton_colormap : matplotlib colormap name or object, optional
The colormap for the skeleton values.
axes : matplotlib Axes object, optional
An Axes object on which to draw. If `None`, a new one is created.
Returns
-------
axes : matplotlib Axes object
The Axes on which the plot is drawn.
"""
image = _normalise_image(image, image_cmap=image_cmap)
summary = stats
# transforming from row, col to x, y
coords_cols = (['image-coord-src-%i' % i for i in [1, 0]]
+ ['image-coord-dst-%i' % i for i in [1, 0]])
coords = summary[coords_cols].values.reshape((-1, 2, 2))
if axes is None:
fig, axes = plt.subplots()
axes.imshow(image)
axes.axis('off')
color_values = summary[skeleton_color_source]
cmap = plt.get_cmap(
skeleton_colormap, min(len(np.unique(color_values)), 256)
)
colormapped = cmap((color_values - np.min(color_values)) /
(np.max(color_values) - np.min(color_values)))
linecoll = collections.LineCollection(coords, colors=colormapped)
axes.add_collection(linecoll)
return axes
def overlay_skeleton_2d_class(
skeleton,
*,
image_cmap='gray',
skeleton_color_source='path_means',
skeleton_colormap='viridis',
vmin=None,
vmax=None,
axes=None
):
"""Plot the image, and overlay the skeleton over it.
Parameters
----------
skeleton : skan.Skeleton object
The input skeleton, which contains both the skeleton and the source
image.
Other Parameters
----------------
image_cmap : matplotlib colormap name or object, optional
The colormap to use for the input image. Defaults to grayscale.
skeleton_color_source : string or callable, optional
The name of the method to use for the skeleton edge color. See the
documentation of `skan.Skeleton` for valid choices. Most common choices
would be:
- path_means: the mean value of the skeleton along each path.
- path_lengths: the length of each path.
- path_stdev: the standard deviation of pixel values along the path.
Alternatively, a callable can be provided that takes as input a
Skeleton object and outputs a list of floating point values of the same
length as the number of paths.
skeleton_colormap : matplotlib colormap name or object, optional
The colormap for the skeleton values.
vmin, vmax : float, optional
The minimum and maximum values for the colormap. Use this to pin the
colormapped values to a certain range.
axes : matplotlib Axes object, optional
An Axes object on which to draw. If `None`, a new one is created.
Returns
-------
axes : matplotlib Axes object
The Axes on which the plot is drawn.
mappable : matplotlib ScalarMappable object
The mappable values corresponding to the line colors. This can be used
to create a colorbar for the plot.
"""
if axes is None:
fig, axes = plt.subplots()
if skeleton.source_image is not None:
axes.imshow(skeleton.source_image, cmap=image_cmap)
if callable(skeleton_color_source):
values = skeleton_color_source(skeleton)
elif hasattr(skeleton, skeleton_color_source):
values = getattr(skeleton, skeleton_color_source)()
else:
raise ValueError(
'Unknown skeleton color source: %s. Provide an '
'attribute of skan.csr.Skeleton or a callable.'
% skeleton_color_source
)
cmap = plt.get_cmap(skeleton_colormap, min(len(np.unique(values)), 256))
if vmin is None:
vmin = np.min(values)
if vmax is None:
vmax = np.max(values)
mapping_values = (values-vmin) / (vmax-vmin)
mappable = plt.cm.ScalarMappable(plt.Normalize(vmin, vmax), cmap)
mappable._A = mapping_values
colors = cmap(mapping_values)
coordinates = [
skeleton.path_coordinates(i)[:, ::-1]
for i in range(skeleton.n_paths)
]
linecoll = collections.LineCollection(coordinates, colors=colors)
axes.add_collection(linecoll)
return axes, mappable
def sholl_shells(center, radii, *, axes=None, **kwargs):
"""Draw concentric circles around a center point.
Parameters
----------
center : array of float, shape (2,)
The center of the circles. This should be in NumPy-style row/column
coordinates.
radii : array of float, shape (N,)
The radii of the concentric circles.
axes : matplotlib Axes, optional
The axes on which to draw the circles. If None, create a new instance.
Returns
-------
axes : matplotlib Axes
The axes on which the circles were drawn
patches : list of matplotlib Patches
The patch objects that were drawn.
Notes
-----
Additional keyword arguments are passed directly to the
`matplotlib.patches.Circle` call. Valid keywords include ``edgecolor``,
``linestyle``, and `linewidth``. See matplotlib documentation for details.
"""
row, col = center
color = kwargs.pop('edgecolor', 'cornflowerblue')
circles = [
Circle((col, row), radius=r, fill=False, edgecolor=color, **kwargs)
for r in radii
]
if axes is None:
_, axes = plt.subplots()
for c in circles:
axes.add_patch(c)
return axes, circles
def pipeline_plot(
image,
thresholded,
skeleton,
stats,
*,
figure=None,
axes=None,
figsize=(9, 9)
):
"""Draw the image, the thresholded version, and its skeleton.
Parameters
----------
image : array, shape (M, N, ...[, 3])
Input image, conformant with scikit-image data type
specification [1]_.
thresholded : array, same shape as image
Binarized version of the input image.
skeleton : array, same shape as image
Skeletonized version of the input image.
stats : pandas DataFrame
Skeleton statistics from the input image/skeleton.
Other Parameters
----------------
figure : matplotlib Figure, optional
If given, where to make the plots.
axes : array of matplotlib Axes, optional
If given, use these axes to draw the plots. Should have len 4.
figsize : 2-tuple of float, optional
The width and height of the figure.
smooth_method : {'Gaussian', 'TV', 'NL'}, optional
Which denoising method to use on the image.
Returns
-------
fig : matplotlib Figure
The Figure containing all the plots
axes : array of matplotlib Axes
The four axes containing the drawn images.
References
----------
.. [1] http://scikit-image.org/docs/dev/user_guide/data_types.html
"""
if figure is None:
fig, axes = plt.subplots(
2, 2, figsize=figsize, sharex=True, sharey=True
)
axes = np.ravel(axes)
else:
fig = figure
if axes is None:
ax0 = fig.add_subplot(2, 2, 1)
axes = [ax0] + [
fig.add_subplot(2, 2, i, sharex=ax0, sharey=ax0)
for i in range(2, 5)
]
axes = np.ravel(axes)
axes[0].imshow(image, cmap='gray')
axes[0].axis('off')
axes[1].imshow(thresholded, cmap='gray')
axes[1].axis('off')
overlay_skeleton_2d(image, skeleton, axes=axes[2])
overlay_euclidean_skeleton_2d(image, stats, axes=axes[3])
fig.subplots_adjust(0, 0, 1, 1, 0, 0)
return fig, axes
def _clean_positions_dict(d, g):
for k in list(d.keys()):
if k not in g:
del d[k]
elif g.degree(k) == 0:
g.remove_node(k)
def overlay_skeleton_networkx(
csr_graph, coordinates, *, axis=None, image=None, cmap=None, **kwargs
):
"""Draw the skeleton as a NetworkX graph, optionally overlaid on an image.
Due to the size of NetworkX drawing elements, this is only recommended
for very small skeletons.
Parameters
----------
csr_graph : SciPy Sparse matrix
The skeleton graph in SciPy CSR format.
coordinates : array, shape (N_points, 2)
The coordinates of each point in the skeleton. ``coordinates.shape[0]``
should be equal to ``csr_graph.shape[0]``.
Other Parameters
----------------
axis : Matplotlib Axes object, optional
The Axes on which to plot the data. If None, a new figure and axes will
be created.
image : array, shape (M, N[, 3])
An image on which to overlay the skeleton. ``image.shape`` should be
greater than ``np.max(coordinates, axis=0)``.
**kwargs : keyword arguments
Arguments passed on to `nx.draw_networkx`. Particularly useful ones
include ``node_size=`` and ``font_size=``.
"""
if axis is None:
_, axis = plt.subplots()
if image is not None:
cmap = cmap or 'gray'
axis.imshow(image, cmap=cmap)
gnx = nx.from_scipy_sparse_matrix(csr_graph)
# Note: we invert the positions because Matplotlib uses x/y for
# scatterplot, but the coordinates are row/column NumPy indexing
positions = dict(zip(range(coordinates.shape[0]), coordinates[:, ::-1]))
_clean_positions_dict(positions, gnx) # remove nodes not in Graph
nx.draw_networkx(gnx, pos=positions, ax=axis, **kwargs)
return axis
|
|
"""The lookin integration climate platform."""
from __future__ import annotations
from collections.abc import Coroutine
from datetime import timedelta
import logging
from typing import Any, Callable, Final, cast
from aiolookin import Climate, MeteoSensor, SensorID
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MIDDLE,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SWING_BOTH,
SWING_OFF,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN
from .entity import LookinCoordinatorEntity
from .models import LookinData
SUPPORT_FLAGS: int = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_SWING_MODE
LOOKIN_FAN_MODE_IDX_TO_HASS: Final = [FAN_AUTO, FAN_LOW, FAN_MIDDLE, FAN_HIGH]
LOOKIN_SWING_MODE_IDX_TO_HASS: Final = [SWING_OFF, SWING_BOTH]
LOOKIN_HVAC_MODE_IDX_TO_HASS: Final = [
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
]
HASS_TO_LOOKIN_HVAC_MODE: dict[str, int] = {
mode: idx for idx, mode in enumerate(LOOKIN_HVAC_MODE_IDX_TO_HASS)
}
HASS_TO_LOOKIN_FAN_MODE: dict[str, int] = {
mode: idx for idx, mode in enumerate(LOOKIN_FAN_MODE_IDX_TO_HASS)
}
HASS_TO_LOOKIN_SWING_MODE: dict[str, int] = {
mode: idx for idx, mode in enumerate(LOOKIN_SWING_MODE_IDX_TO_HASS)
}
MIN_TEMP: Final = 16
MAX_TEMP: Final = 30
LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the climate platform for lookin from a config entry."""
lookin_data: LookinData = hass.data[DOMAIN][config_entry.entry_id]
entities = []
for remote in lookin_data.devices:
if remote["Type"] != "EF":
continue
uuid = remote["UUID"]
def _wrap_async_update(
uuid: str,
) -> Callable[[], Coroutine[None, Any, Climate]]:
"""Create a function to capture the uuid cell variable."""
async def _async_update() -> Climate:
return await lookin_data.lookin_protocol.get_conditioner(uuid)
return _async_update
coordinator = DataUpdateCoordinator(
hass,
LOGGER,
name=f"{config_entry.title} {uuid}",
update_method=_wrap_async_update(uuid),
update_interval=timedelta(
seconds=60
), # Updates are pushed (fallback is polling)
)
await coordinator.async_refresh()
device: Climate = coordinator.data
entities.append(
ConditionerEntity(
uuid=uuid,
device=device,
lookin_data=lookin_data,
coordinator=coordinator,
)
)
async_add_entities(entities)
class ConditionerEntity(LookinCoordinatorEntity, ClimateEntity):
"""An aircon or heat pump."""
_attr_temperature_unit = TEMP_CELSIUS
_attr_supported_features: int = SUPPORT_FLAGS
_attr_fan_modes: list[str] = LOOKIN_FAN_MODE_IDX_TO_HASS
_attr_swing_modes: list[str] = LOOKIN_SWING_MODE_IDX_TO_HASS
_attr_hvac_modes: list[str] = LOOKIN_HVAC_MODE_IDX_TO_HASS
_attr_min_temp = MIN_TEMP
_attr_max_temp = MAX_TEMP
_attr_target_temperature_step = PRECISION_WHOLE
def __init__(
self,
uuid: str,
device: Climate,
lookin_data: LookinData,
coordinator: DataUpdateCoordinator,
) -> None:
"""Init the ConditionerEntity."""
super().__init__(coordinator, uuid, device, lookin_data)
self._async_update_from_data()
@property
def _climate(self) -> Climate:
return cast(Climate, self.coordinator.data)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set the hvac mode of the device."""
if (mode := HASS_TO_LOOKIN_HVAC_MODE.get(hvac_mode)) is None:
return
self._climate.hvac_mode = mode
await self._async_update_conditioner()
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set the temperature of the device."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
return
self._climate.temp_celsius = int(temperature)
await self._async_update_conditioner()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set the fan mode of the device."""
if (mode := HASS_TO_LOOKIN_FAN_MODE.get(fan_mode)) is None:
return
self._climate.fan_mode = mode
await self._async_update_conditioner()
async def async_set_swing_mode(self, swing_mode: str) -> None:
"""Set the swing mode of the device."""
if (mode := HASS_TO_LOOKIN_SWING_MODE.get(swing_mode)) is None:
return
self._climate.swing_mode = mode
await self._async_update_conditioner()
async def _async_update_conditioner(self) -> None:
"""Update the conditioner state from the climate data."""
self.coordinator.async_set_updated_data(self._climate)
await self._lookin_protocol.update_conditioner(climate=self._climate)
def _async_update_from_data(self) -> None:
"""Update attrs from data."""
meteo_data: MeteoSensor = self._meteo_coordinator.data
self._attr_current_temperature = meteo_data.temperature
self._attr_current_humidity = int(meteo_data.humidity)
self._attr_target_temperature = self._climate.temp_celsius
self._attr_fan_mode = LOOKIN_FAN_MODE_IDX_TO_HASS[self._climate.fan_mode]
self._attr_swing_mode = LOOKIN_SWING_MODE_IDX_TO_HASS[self._climate.swing_mode]
self._attr_hvac_mode = LOOKIN_HVAC_MODE_IDX_TO_HASS[self._climate.hvac_mode]
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._async_update_from_data()
super()._handle_coordinator_update()
@callback
def _async_push_update(self, msg: dict[str, str]) -> None:
"""Process an update pushed via UDP."""
LOGGER.debug("Processing push message for %s: %s", self.entity_id, msg)
self._climate.update_from_status(msg["value"])
self.coordinator.async_set_updated_data(self._climate)
async def async_added_to_hass(self) -> None:
"""Call when the entity is added to hass."""
self.async_on_remove(
self._lookin_udp_subs.subscribe_sensor(
self._lookin_device.id, SensorID.IR, self._uuid, self._async_push_update
)
)
self.async_on_remove(
self._meteo_coordinator.async_add_listener(self._handle_coordinator_update)
)
return await super().async_added_to_hass()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Created by niphlod@gmail.com
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Scheduler with redis backend
---------------------------------
"""
import os
import time
import socket
import datetime
import logging
from gluon.utils import web2py_uuid
from gluon.storage import Storage
from gluon.scheduler import *
from gluon.scheduler import _decode_dict
from gluon.contrib.redis_utils import RWatchError
USAGE = """
## Example
For any existing app
Create File: app/models/scheduler.py ======
from gluon.contrib.redis_utils import RConn
from gluon.contrib.redis_scheduler import RScheduler
def demo1(*args,**vars):
print 'you passed args=%s and vars=%s' % (args, vars)
return 'done!'
def demo2():
1/0
rconn = RConn()
mysched = RScheduler(db, dict(demo1=demo1,demo2=demo2), ...., redis_conn=rconn)
## run worker nodes with:
cd web2py
python web2py.py -K app
"""
path = os.getcwd()
if 'WEB2PY_PATH' not in os.environ:
os.environ['WEB2PY_PATH'] = path
try:
# try external module
from simplejson import loads, dumps
except ImportError:
try:
# try stdlib (Python >= 2.6)
from json import loads, dumps
except:
# fallback to pure-Python module
from gluon.contrib.simplejson import loads, dumps
IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid())
logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER)
POLLING = 'POLLING'
class RScheduler(Scheduler):
def __init__(self, db, tasks=None, migrate=True,
worker_name=None, group_names=None, heartbeat=HEARTBEAT,
max_empty_runs=0, discard_results=False, utc_time=False,
redis_conn=None, mode=1):
"""
Highly-experimental coordination with redis
Takes all args from Scheduler except redis_conn which
must be something closer to a StrictRedis instance.
My only regret - and the reason why I kept this under the hood for a
while - is that it's hard to hook up in web2py to something happening
right after the commit to a table, which will enable this version of the
scheduler to process "immediate" tasks right away instead of waiting a
few seconds (see FIXME in queue_task())
mode is reserved for future usage patterns.
Right now it moves the coordination (which is the most intensive
routine in the scheduler in matters of IPC) of workers to redis.
I'd like to have incrementally redis-backed modes of operations,
such as e.g.:
- 1: IPC through redis (which is the current implementation)
- 2: Store task results in redis (which will relieve further pressure
from the db leaving the scheduler_run table empty and possibly
keep things smooth as tasks results can be set to expire
after a bit of time)
- 3: Move all the logic for storing and queueing tasks to redis
itself - which means no scheduler_task usage too - and use
the database only as an historical record-bookkeeping
(e.g. for reporting)
As usual, I'm eager to see your comments.
"""
Scheduler.__init__(self, db, tasks=tasks, migrate=migrate,
worker_name=worker_name, group_names=group_names,
heartbeat=heartbeat, max_empty_runs=max_empty_runs,
discard_results=discard_results, utc_time=utc_time)
self.r_server = redis_conn
from gluon import current
self._application = current.request.application or 'appname'
def _nkey(self, key):
"""Helper to restrict all keys to a namespace and track them."""
prefix = 'w2p:rsched:%s' % self._application
allkeys = '%s:allkeys' % prefix
newkey = "%s:%s" % (prefix, key)
self.r_server.sadd(allkeys, newkey)
return newkey
def prune_all(self):
"""Global housekeeping."""
all_keys = self._nkey('allkeys')
with self.r_server.pipeline() as pipe:
while True:
try:
pipe.watch('PRUNE_ALL')
while True:
k = pipe.spop(all_keys)
if k is None:
break
pipe.delete(k)
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
def dt2str(self, value):
return value.strftime('%Y-%m-%d %H:%M:%S')
def str2date(self, value):
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
def send_heartbeat(self, counter):
"""
Workers coordination in redis.
It has evolved into something is not that easy.
Here we try to do what we need in a single transaction,
and retry that transaction if something goes wrong
"""
with self.r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SEND_HEARTBEAT')
self.inner_send_heartbeat(counter, pipe)
pipe.execute()
self.adj_hibernation()
self.sleep()
break
except RWatchError:
time.sleep(0.1)
continue
def inner_send_heartbeat(self, counter, pipe):
"""
Do a few things in the "maintenance" thread.
Specifically:
- registers the workers
- accepts commands sent to workers (KILL, TERMINATE, PICK, DISABLED, etc)
- adjusts sleep
- saves stats
- elects master
- does "housecleaning" for dead workers
- triggers tasks assignment
"""
r_server = pipe
status_keyset = self._nkey('worker_statuses')
status_key = self._nkey('worker_status:%s' % (self.worker_name))
now = self.now()
mybackedstatus = r_server.hgetall(status_key)
if not mybackedstatus:
r_server.hmset(
status_key,
dict(
status=ACTIVE, worker_name=self.worker_name,
first_heartbeat=self.dt2str(now),
last_heartbeat=self.dt2str(now),
group_names=dumps(self.group_names), is_ticker=False,
worker_stats=dumps(self.w_stats))
)
r_server.sadd(status_keyset, status_key)
if not self.w_stats.status == POLLING:
self.w_stats.status = ACTIVE
self.w_stats.sleep = self.heartbeat
mybackedstatus = ACTIVE
else:
mybackedstatus = mybackedstatus['status']
if mybackedstatus == DISABLED:
# keep sleeping
self.w_stats.status = DISABLED
r_server.hmset(
status_key,
dict(last_heartbeat=self.dt2str(now),
worker_stats=dumps(self.w_stats))
)
elif mybackedstatus == TERMINATE:
self.w_stats.status = TERMINATE
logger.debug("Waiting to terminate the current task")
self.give_up()
elif mybackedstatus == KILL:
self.w_stats.status = KILL
self.die()
else:
if mybackedstatus == STOP_TASK:
logger.info('Asked to kill the current task')
self.terminate_process()
logger.info('........recording heartbeat (%s)',
self.w_stats.status)
r_server.hmset(
status_key,
dict(
last_heartbeat=self.dt2str(now), status=ACTIVE,
worker_stats=dumps(self.w_stats)
)
)
# newroutine
r_server.expire(status_key, self.heartbeat * 3 * 15)
self.w_stats.sleep = self.heartbeat # re-activating the process
if self.w_stats.status not in (RUNNING, POLLING):
self.w_stats.status = ACTIVE
self.do_assign_tasks = False
if counter % 5 == 0 or mybackedstatus == PICK:
try:
logger.info(
' freeing workers that have not sent heartbeat')
registered_workers = r_server.smembers(status_keyset)
allkeys = self._nkey('allkeys')
for worker in registered_workers:
w = r_server.hgetall(worker)
w = Storage(w)
if not w:
r_server.srem(status_keyset, worker)
logger.info('removing %s from %s', worker, allkeys)
r_server.srem(allkeys, worker)
continue
try:
self.is_a_ticker = self.being_a_ticker(pipe)
except:
pass
if self.w_stats.status in (ACTIVE, POLLING):
self.do_assign_tasks = True
if self.is_a_ticker and self.do_assign_tasks:
# I'm a ticker, and 5 loops passed without reassigning tasks,
# let's do that and loop again
if not self.db_thread:
logger.debug('thread building own DAL object')
self.db_thread = DAL(
self.db._uri, folder=self.db._adapter.folder)
self.define_tables(self.db_thread, migrate=False)
db = self.db_thread
self.wrapped_assign_tasks(db)
return None
except:
logger.error('Error assigning tasks')
def being_a_ticker(self, pipe):
"""
Elects a ticker.
This is slightly more convoluted than the original
but if far more efficient
"""
r_server = pipe
status_keyset = self._nkey('worker_statuses')
registered_workers = r_server.smembers(status_keyset)
ticker = None
all_active = []
all_workers = []
for worker in registered_workers:
w = r_server.hgetall(worker)
if w['worker_name'] != self.worker_name and w['status'] == ACTIVE:
all_active.append(w)
if w['is_ticker'] == 'True' and ticker is None:
ticker = w
all_workers.append(w)
not_busy = self.w_stats.status in (ACTIVE, POLLING)
if not ticker:
if not_busy:
# only if this worker isn't busy, otherwise wait for a free one
for worker in all_workers:
key = self._nkey('worker_status:%s' % worker['worker_name'])
if worker['worker_name'] == self.worker_name:
r_server.hset(key, 'is_ticker', True)
else:
r_server.hset(key, 'is_ticker', False)
logger.info("TICKER: I'm a ticker")
else:
# giving up, only if I'm not alone
if len(all_active) > 1:
key = self._nkey('worker_status:%s' % (self.worker_name))
r_server.hset(key, 'is_ticker', False)
else:
not_busy = True
return not_busy
else:
logger.info(
"%s is a ticker, I'm a poor worker" % ticker['worker_name'])
return False
def assign_tasks(self, db):
"""
The real beauty.
We don't need to ASSIGN tasks, we just put
them into the relevant queue
"""
st, sd = db.scheduler_task, db.scheduler_task_deps
r_server = self.r_server
now = self.now()
status_keyset = self._nkey('worker_statuses')
with r_server.pipeline() as pipe:
while 1:
try:
# making sure we're the only one doing the job
pipe.watch('ASSIGN_TASKS')
registered_workers = pipe.smembers(status_keyset)
all_workers = []
for worker in registered_workers:
w = pipe.hgetall(worker)
if w['status'] == ACTIVE:
all_workers.append(Storage(w))
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
# build workers as dict of groups
wkgroups = {}
for w in all_workers:
group_names = loads(w.group_names)
for gname in group_names:
if gname not in wkgroups:
wkgroups[gname] = dict(
workers=[{'name': w.worker_name, 'c': 0}])
else:
wkgroups[gname]['workers'].append(
{'name': w.worker_name, 'c': 0})
# set queued tasks that expired between "runs" (i.e., you turned off
# the scheduler): then it wasn't expired, but now it is
db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.stop_time < now)
).update(status=EXPIRED)
# calculate dependencies
deps_with_no_deps = db(
(sd.can_visit == False) &
(~sd.task_child.belongs(
db(sd.can_visit == False)._select(sd.task_parent)
)
)
)._select(sd.task_child)
no_deps = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(
(sd.id == None) | (st.id.belongs(deps_with_no_deps))
)
)._select(st.id, distinct=True, left=sd.on(
(st.id == sd.task_parent) &
(sd.can_visit == False)
)
)
all_available = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.next_run_time <= now) &
(st.enabled == True) &
(st.id.belongs(no_deps))
)
limit = len(all_workers) * (50 / (len(wkgroups) or 1))
# let's freeze it up
db.commit()
x = 0
r_server = self.r_server
for group in wkgroups.keys():
queued_list = self._nkey('queued:%s' % group)
queued_set = self._nkey('queued_set:%s' % group)
# if are running, let's don't assign them again
running_list = self._nkey('running:%s' % group)
while True:
# the joys for rpoplpush!
t = r_server.rpoplpush(running_list, queued_list)
if not t:
# no more
break
r_server.sadd(queued_set, t)
tasks = all_available(st.group_name == group).select(
limitby=(0, limit), orderby = st.next_run_time)
# put tasks in the processing list
for task in tasks:
x += 1
gname = task.group_name
if r_server.sismember(queued_set, task.id):
# already queued, we don't put on the list
continue
r_server.sadd(queued_set, task.id)
r_server.lpush(queued_list, task.id)
d = dict(status=QUEUED)
if not task.task_name:
d['task_name'] = task.function_name
db(
(st.id == task.id) &
(st.status.belongs((QUEUED, ASSIGNED)))
).update(**d)
db.commit()
# I didn't report tasks but I'm working nonetheless!!!!
if x > 0:
self.w_stats.empty_runs = 0
self.w_stats.queue = x
self.w_stats.distribution = wkgroups
self.w_stats.workers = len(all_workers)
# I'll be greedy only if tasks queued are equal to the limit
# (meaning there could be others ready to be queued)
self.greedy = x >= limit
logger.info('TICKER: workers are %s', len(all_workers))
logger.info('TICKER: tasks are %s', x)
def pop_task(self, db):
"""Lift a task off a queue."""
r_server = self.r_server
st = self.db.scheduler_task
task = None
# ready to process something
for group in self.group_names:
queued_set = self._nkey('queued_set:%s' % group)
queued_list = self._nkey('queued:%s' % group)
running_list = self._nkey('running:%s' % group)
running_dict = self._nkey('running_dict:%s' % group)
self.w_stats.status = POLLING
# polling for 1 minute in total. If more groups are in,
# polling is 1 minute in total
logger.debug(' polling on %s', group)
task_id = r_server.brpoplpush(queued_list, running_list,
timeout=60 / len(self.group_names))
logger.debug(' finished polling')
self.w_stats.status = ACTIVE
if task_id:
r_server.hset(running_dict, task_id, self.worker_name)
r_server.srem(queued_set, task_id)
task = db(
(st.id == task_id) &
(st.status == QUEUED)
).select().first()
if not task:
r_server.lrem(running_list, 0, task_id)
r_server.hdel(running_dict, task_id)
r_server.lrem(queued_list, 0, task_id)
logger.error("we received a task that isn't there (%s)",
task_id)
return None
break
now = self.now()
if task:
task.update_record(status=RUNNING, last_run_time=now)
# noone will touch my task!
db.commit()
logger.debug(' work to do %s', task.id)
else:
logger.info('nothing to do')
return None
times_run = task.times_run + 1
if not task.prevent_drift:
next_run_time = task.last_run_time + datetime.timedelta(
seconds=task.period
)
else:
# calc next_run_time based on available slots
# see #1191
next_run_time = task.start_time
secondspassed = self.total_seconds(now - next_run_time)
steps = secondspassed // task.period + 1
next_run_time += datetime.timedelta(seconds=task.period * steps)
if times_run < task.repeats or task.repeats == 0:
# need to run (repeating task)
run_again = True
else:
# no need to run again
run_again = False
run_id = 0
while True and not self.discard_results:
logger.debug(' new scheduler_run record')
try:
run_id = db.scheduler_run.insert(
task_id=task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except:
time.sleep(0.5)
db.rollback()
logger.info('new task %(id)s "%(task_name)s"'
' %(application_name)s.%(function_name)s' % task)
return Task(
app=task.application_name,
function=task.function_name,
timeout=task.timeout,
args=task.args, # in json
vars=task.vars, # in json
task_id=task.id,
run_id=run_id,
run_again=run_again,
next_run_time=next_run_time,
times_run=times_run,
stop_time=task.stop_time,
retry_failed=task.retry_failed,
times_failed=task.times_failed,
sync_output=task.sync_output,
uuid=task.uuid,
group_name=task.group_name)
def report_task(self, task, task_report):
"""
Override.
Needs it only because we need to pop from the
running tasks
"""
r_server = self.r_server
db = self.db
now = self.now()
st = db.scheduler_task
sr = db.scheduler_run
if not self.discard_results:
if task_report.result != 'null' or task_report.tb:
# result is 'null' as a string if task completed
# if it's stopped it's None as NoneType, so we record
# the STOPPED "run" anyway
logger.debug(' recording task report in db (%s)',
task_report.status)
db(sr.id == task.run_id).update(
status=task_report.status,
stop_time=now,
run_result=task_report.result,
run_output=task_report.output,
traceback=task_report.tb)
else:
logger.debug(' deleting task report in db because of no result')
db(sr.id == task.run_id).delete()
# if there is a stop_time and the following run would exceed it
is_expired = (task.stop_time and
task.next_run_time > task.stop_time and
True or False)
status = (task.run_again and is_expired and EXPIRED or
task.run_again and not is_expired and
QUEUED or COMPLETED)
if task_report.status == COMPLETED:
# assigned calculations
d = dict(status=status,
next_run_time=task.next_run_time,
times_run=task.times_run,
times_failed=0,
assigned_worker_name=self.worker_name
)
db(st.id == task.task_id).update(**d)
if status == COMPLETED:
self.update_dependencies(db, task.task_id)
else:
st_mapping = {'FAILED': 'FAILED',
'TIMEOUT': 'TIMEOUT',
'STOPPED': 'FAILED'}[task_report.status]
status = (task.retry_failed and
task.times_failed < task.retry_failed and
QUEUED or task.retry_failed == -1 and
QUEUED or st_mapping)
db(st.id == task.task_id).update(
times_failed=st.times_failed + 1,
next_run_time=task.next_run_time,
status=status,
assigned_worker_name=self.worker_name
)
logger.info('task completed (%s)', task_report.status)
running_list = self._nkey('running:%s' % task.group_name)
running_dict = self._nkey('running_dict:%s' % task.group_name)
r_server.lrem(running_list, 0, task.task_id)
r_server.hdel(running_dict, task.task_id)
def wrapped_pop_task(self):
"""Commodity function to call `pop_task` and trap exceptions.
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
db.commit() # another nifty db.commit() only for Mysql
x = 0
while x < 10:
try:
rtn = self.pop_task(db)
return rtn
break
# this is here to "interrupt" any blrpoplpush op easily
except KeyboardInterrupt:
self.give_up()
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error popping tasks')
x += 1
time.sleep(0.5)
def get_workers(self, only_ticker=False):
"""Return a dict holding worker_name : {**columns}
representing all "registered" workers.
only_ticker returns only the worker running as a TICKER,
if there is any
"""
r_server = self.r_server
status_keyset = self._nkey('worker_statuses')
registered_workers = r_server.smembers(status_keyset)
all_workers = {}
for worker in registered_workers:
w = r_server.hgetall(worker)
w = Storage(w)
if not w:
continue
all_workers[w.worker_name] = Storage(
status=w.status,
first_heartbeat=self.str2date(w.first_heartbeat),
last_heartbeat=self.str2date(w.last_heartbeat),
group_names=loads(w.group_names, object_hook=_decode_dict),
is_ticker=w.is_ticker == 'True' and True or False,
worker_stats=loads(w.worker_stats, object_hook=_decode_dict)
)
if only_ticker:
for k, v in all_workers.iteritems():
if v['is_ticker']:
return {k: v}
return {}
return all_workers
def set_worker_status(self, group_names=None, action=ACTIVE,
exclude=None, limit=None, worker_name=None):
"""Internal function to set worker's status"""
r_server = self.r_server
all_workers = self.get_workers()
if not group_names:
group_names = self.group_names
elif isinstance(group_names, str):
group_names = [group_names]
exclusion = exclude and exclude.append(action) or [action]
workers = []
if worker_name is not None:
if worker_name in all_workers.keys():
workers = [worker_name]
else:
for k, v in all_workers.iteritems():
if v.status not in exclusion and set(group_names) & set(v.group_names):
workers.append(k)
if limit and worker_name is None:
workers = workers[:limit]
if workers:
with r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SET_WORKER_STATUS')
for w in workers:
worker_key = self._nkey('worker_status:%s' % w)
pipe.hset(worker_key, 'status', action)
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
def queue_task(self, function, pargs=[], pvars={}, **kwargs):
"""
FIXME: immediate should put item in queue. The hard part is
that currently there are no hooks happening at post-commit time
Queue tasks. This takes care of handling the validation of all
parameters
Args:
function: the function (anything callable with a __name__)
pargs: "raw" args to be passed to the function. Automatically
jsonified.
pvars: "raw" kwargs to be passed to the function. Automatically
jsonified
kwargs: all the parameters available (basically, every
`scheduler_task` column). If args and vars are here, they should
be jsonified already, and they will override pargs and pvars
Returns:
a dict just as a normal validate_and_insert(), plus a uuid key
holding the uuid of the queued task. If validation is not passed
( i.e. some parameters are invalid) both id and uuid will be None,
and you'll get an "error" dict holding the errors found.
"""
if hasattr(function, '__name__'):
function = function.__name__
targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs)
tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars)
tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid()
tname = 'task_name' in kwargs and kwargs.pop('task_name') or function
immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None
rtn = self.db.scheduler_task.validate_and_insert(
function_name=function,
task_name=tname,
args=targs,
vars=tvars,
uuid=tuuid,
**kwargs)
if not rtn.errors:
rtn.uuid = tuuid
if immediate:
r_server = self.r_server
ticker = self.get_workers(only_ticker=True)
if ticker.keys():
ticker = ticker.keys()[0]
with r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SET_WORKER_STATUS')
worker_key = self._nkey('worker_status:%s' % ticker)
pipe.hset(worker_key, 'status', 'PICK')
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
else:
rtn.uuid = None
return rtn
def stop_task(self, ref):
"""Shortcut for task termination.
If the task is RUNNING it will terminate it, meaning that status
will be set as FAILED.
If the task is QUEUED, its stop_time will be set as to "now",
the enabled flag will be set to False, and the status to STOPPED
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
Returns:
- 1 if task was stopped (meaning an update has been done)
- None if task was not found, or if task was not RUNNING or QUEUED
Note:
Experimental
"""
r_server = self.r_server
st = self.db.scheduler_task
if isinstance(ref, int):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
else:
raise SyntaxError(
"You can retrieve results only by id or uuid")
task = self.db(q).select(st.id, st.status, st.group_name)
task = task.first()
rtn = None
if not task:
return rtn
running_dict = self._nkey('running_dict:%s' % task.group_name)
if task.status == 'RUNNING':
worker_key = r_server.hget(running_dict, task.id)
worker_key = self._nkey('worker_status:%s' % (worker_key))
r_server.hset(worker_key, 'status', STOP_TASK)
elif task.status == 'QUEUED':
rtn = self.db(q).update(
stop_time=self.now(),
enabled=False,
status=STOPPED)
return rtn
|
|
"""
Provide user facing operators for doing the split part of the
split-apply-combine paradigm.
"""
from __future__ import annotations
from typing import Hashable
import warnings
import numpy as np
from pandas._typing import (
ArrayLike,
FrameOrSeries,
final,
)
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import sanitize_to_nanoseconds
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_list_like,
is_scalar,
)
import pandas.core.algorithms as algorithms
from pandas.core.arrays import (
Categorical,
ExtensionArray,
)
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.groupby import ops
from pandas.core.groupby.categorical import (
recode_for_groupby,
recode_from_groupby,
)
from pandas.core.indexes.api import (
CategoricalIndex,
Index,
MultiIndex,
)
from pandas.core.series import Series
from pandas.io.formats.printing import pprint_thing
class Grouper:
"""
A Grouper allows the user to specify a groupby instruction for an object.
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
If `axis` and/or `level` are passed as keywords to both `Grouper` and
`groupby`, the values passed to `Grouper` take precedence.
Parameters
----------
key : str, defaults to None
Groupby key, which selects the grouping column of the target.
level : name/number, defaults to None
The level for the target index.
freq : str / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
axis : str, int, defaults to 0
Number/name of the axis.
sort : bool, default to False
Whether to sort the resulting labels.
closed : {'left' or 'right'}
Closed end of interval. Only when `freq` parameter is passed.
label : {'left' or 'right'}
Interval boundary to use for labeling.
Only when `freq` parameter is passed.
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex and `freq` parameter is passed.
base : int, default 0
Only when `freq` parameter is passed.
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
.. deprecated:: 1.1.0
The new arguments that you should use are 'offset' or 'origin'.
loffset : str, DateOffset, timedelta object
Only when `freq` parameter is passed.
.. deprecated:: 1.1.0
loffset is only working for ``.resample(...)`` and not for
Grouper (:issue:`28302`).
However, loffset is also deprecated for ``.resample(...)``
See: :class:`DataFrame.resample`
origin : {{'epoch', 'start', 'start_day', 'end', 'end_day'}}, Timestamp
or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin must
match the timezone of the index.
If a timestamp is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
.. versionadded:: 1.1.0
- 'end': `origin` is the last value of the timeseries
- 'end_day': `origin` is the ceiling midnight of the last day
.. versionadded:: 1.3.0
offset : Timedelta or str, default is None
An offset timedelta added to the origin.
.. versionadded:: 1.1.0
dropna : bool, default True
If True, and if group keys contain NA values, NA values together with
row/column will be dropped. If False, NA values will also be treated as
the key in groups.
.. versionadded:: 1.2.0
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df = pd.DataFrame(
... {
... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
... "Speed": [100, 5, 200, 300, 15],
... }
... )
>>> df
Animal Speed
0 Falcon 100
1 Parrot 5
2 Falcon 200
3 Falcon 300
4 Parrot 15
>>> df.groupby(pd.Grouper(key="Animal")).mean()
Speed
Animal
Falcon 200.0
Parrot 10.0
Specify a resample operation on the column 'Publish date'
>>> df = pd.DataFrame(
... {
... "Publish date": [
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-09"),
... pd.Timestamp("2000-01-16")
... ],
... "ID": [0, 1, 2, 3],
... "Price": [10, 20, 30, 40]
... }
... )
>>> df
Publish date ID Price
0 2000-01-02 0 10
1 2000-01-02 1 20
2 2000-01-09 2 30
3 2000-01-16 3 40
>>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
ID Price
Publish date
2000-01-02 0.5 15.0
2000-01-09 2.0 30.0
2000-01-16 3.0 40.0
If you want to adjust the start of the bins based on a fixed timestamp:
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
2000-10-01 23:37:00 3
2000-10-01 23:44:00 6
2000-10-01 23:51:00 9
2000-10-01 23:58:00 12
2000-10-02 00:05:00 15
2000-10-02 00:12:00 18
2000-10-02 00:19:00 21
2000-10-02 00:26:00 24
Freq: 7T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min')).sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
2000-10-02 00:05:00 54
2000-10-02 00:22:00 24
Freq: 17T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
2000-10-02 00:09:00 39
2000-10-02 00:26:00 24
Freq: 17T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
2000-10-02 00:15:00 45
Freq: 17T, dtype: int64
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
>>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
To replace the use of the deprecated `base` argument, you can now use `offset`,
in this example it is equivalent to have `base=2`:
>>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
2000-10-01 23:16:00 0
2000-10-01 23:33:00 9
2000-10-01 23:50:00 36
2000-10-02 00:07:00 39
2000-10-02 00:24:00 24
Freq: 17T, dtype: int64
"""
axis: int
sort: bool
dropna: bool
_gpr_index: Index | None
_grouper: Index | None
_attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort")
def __new__(cls, *args, **kwargs):
if kwargs.get("freq") is not None:
from pandas.core.resample import TimeGrouper
_check_deprecated_resample_kwargs(kwargs, origin=cls)
cls = TimeGrouper
return super().__new__(cls)
def __init__(
self,
key=None,
level=None,
freq=None,
axis: int = 0,
sort: bool = False,
dropna: bool = True,
):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self._gpr_index = None
self.obj = None
self.indexer = None
self.binner = None
self._grouper = None
self._indexer = None
self.dropna = dropna
@final
@property
def ax(self) -> Index:
index = self._gpr_index
if index is None:
raise ValueError("_set_grouper must be called before ax is accessed")
return index
def _get_grouper(self, obj: FrameOrSeries, validate: bool = True):
"""
Parameters
----------
obj : Series or DataFrame
validate : bool, default True
if True, validate the grouper
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
# error: Value of type variable "FrameOrSeries" of "get_grouper" cannot be
# "Optional[Any]"
# error: Incompatible types in assignment (expression has type "BaseGrouper",
# variable has type "None")
self.grouper, _, self.obj = get_grouper( # type: ignore[type-var,assignment]
self.obj,
[self.key],
axis=self.axis,
level=self.level,
sort=self.sort,
validate=validate,
dropna=self.dropna,
)
return self.binner, self.grouper, self.obj
@final
def _set_grouper(self, obj: FrameOrSeries, sort: bool = False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : Series or DataFrame
sort : bool, default False
whether the resulting grouper should be sorted
"""
assert obj is not None
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# Keep self.grouper value before overriding
if self._grouper is None:
# TODO: What are we assuming about subsequent calls?
self._grouper = self._gpr_index
self._indexer = self.indexer
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
if getattr(self._gpr_index, "name", None) == key and isinstance(
obj, Series
):
# Sometimes self._grouper will have been resorted while
# obj has not. In this case there is a mismatch when we
# call self._grouper.take(obj.index) so we need to undo the sorting
# before we call _grouper.take.
assert self._grouper is not None
if self._indexer is not None:
reverse_indexer = self._indexer.argsort()
unsorted_ax = self._grouper.take(reverse_indexer)
ax = unsorted_ax.take(obj.index)
else:
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
raise KeyError(f"The grouper name {key} is not found")
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(f"The level {level} is not valid")
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
# TODO: why does putting na_position="first" fix datetimelike cases?
indexer = self.indexer = ax.array.argsort(
kind="mergesort", na_position="first"
)
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis)
# error: Incompatible types in assignment (expression has type
# "FrameOrSeries", variable has type "None")
self.obj = obj # type: ignore[assignment]
self._gpr_index = ax
return self._gpr_index
@final
@property
def groups(self):
# error: "None" has no attribute "groups"
return self.grouper.groups # type: ignore[attr-defined]
@final
def __repr__(self) -> str:
attrs_list = (
f"{attr_name}={repr(getattr(self, attr_name))}"
for attr_name in self._attributes
if getattr(self, attr_name) is not None
)
attrs = ", ".join(attrs_list)
cls_name = type(self).__name__
return f"{cls_name}({attrs})"
@final
class Grouping:
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj : DataFrame or Series
name : Label
level :
observed : bool, default False
If we are a Categorical, use the observed values
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* codes : ndarray, group codes
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
_codes: np.ndarray | None = None
_group_index: Index | None = None
_passed_categorical: bool
_all_grouper: Categorical | None
_index: Index
def __init__(
self,
index: Index,
grouper=None,
obj: FrameOrSeries | None = None,
level=None,
sort: bool = True,
observed: bool = False,
in_axis: bool = False,
dropna: bool = True,
):
self.level = level
self._orig_grouper = grouper
self.grouping_vector = _convert_grouper(index, grouper)
self._all_grouper = None
self._index = index
self._sort = sort
self.obj = obj
self._observed = observed
self.in_axis = in_axis
self._dropna = dropna
self._passed_categorical = False
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
ilevel = self._ilevel
if ilevel is not None:
mapper = self.grouping_vector
# In extant tests, the new self.grouping_vector matches
# `index.get_level_values(ilevel)` whenever
# mapper is None and isinstance(index, MultiIndex)
(
self.grouping_vector, # Index
self._codes,
self._group_index,
) = index._get_grouper_for_level(mapper, level=ilevel)
# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get codes
elif isinstance(self.grouping_vector, Grouper):
# get the new grouper; we already have disambiguated
# what key/level refer to exactly, don't need to
# check again as we have by this point converted these
# to an actual value (rather than a pd.Grouper)
_, newgrouper, newobj = self.grouping_vector._get_grouper(
# error: Value of type variable "FrameOrSeries" of "_get_grouper"
# of "Grouper" cannot be "Optional[FrameOrSeries]"
self.obj, # type: ignore[type-var]
validate=False,
)
self.obj = newobj
ng = newgrouper._get_grouper()
if isinstance(newgrouper, ops.BinGrouper):
# in this case we have `ng is newgrouper`
self.grouping_vector = ng
else:
# ops.BaseGrouper
# use Index instead of ndarray so we can recover the name
self.grouping_vector = Index(ng, name=newgrouper.result_index.name)
elif is_categorical_dtype(self.grouping_vector):
# a passed Categorical
self._passed_categorical = True
self.grouping_vector, self._all_grouper = recode_for_groupby(
self.grouping_vector, sort, observed
)
elif not isinstance(
self.grouping_vector, (Series, Index, ExtensionArray, np.ndarray)
):
# no level passed
if getattr(self.grouping_vector, "ndim", 1) != 1:
t = self.name or str(type(self.grouping_vector))
raise ValueError(f"Grouper for '{t}' not 1-dimensional")
self.grouping_vector = index.map(self.grouping_vector)
if not (
hasattr(self.grouping_vector, "__len__")
and len(self.grouping_vector) == len(index)
):
grper = pprint_thing(self.grouping_vector)
errmsg = (
"Grouper result violates len(labels) == "
f"len(data)\nresult: {grper}"
)
self.grouping_vector = None # Try for sanity
raise AssertionError(errmsg)
if isinstance(self.grouping_vector, np.ndarray):
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
self.grouping_vector = sanitize_to_nanoseconds(self.grouping_vector)
def __repr__(self) -> str:
return f"Grouping({self.name})"
def __iter__(self):
return iter(self.indices)
@cache_readonly
def name(self) -> Hashable:
ilevel = self._ilevel
if ilevel is not None:
return self._index.names[ilevel]
if isinstance(self._orig_grouper, (Index, Series)):
return self._orig_grouper.name
elif isinstance(self.grouping_vector, ops.BaseGrouper):
return self.grouping_vector.result_index.name
elif isinstance(self.grouping_vector, Index):
return self.grouping_vector.name
# otherwise we have ndarray or ExtensionArray -> no name
return None
@cache_readonly
def _ilevel(self) -> int | None:
"""
If necessary, converted index level name to index level position.
"""
level = self.level
if level is None:
return None
if not isinstance(level, int):
index = self._index
if level not in index.names:
raise AssertionError(f"Level {level} not in index")
return index.names.index(level)
return level
@property
def ngroups(self) -> int:
return len(self.group_index)
@cache_readonly
def indices(self):
# we have a list of groupers
if isinstance(self.grouping_vector, ops.BaseGrouper):
return self.grouping_vector.indices
values = Categorical(self.grouping_vector)
return values._reverse_indexer()
@property
def codes(self) -> np.ndarray:
if self._codes is not None:
# _codes is set in __init__ for MultiIndex cases
return self._codes
return self._codes_and_uniques[0]
@cache_readonly
def group_arraylike(self) -> ArrayLike:
"""
Analogous to result_index, but holding an ArrayLike to ensure
we can can retain ExtensionDtypes.
"""
return self._codes_and_uniques[1]
@cache_readonly
def result_index(self) -> Index:
# TODO: what's the difference between result_index vs group_index?
if self._all_grouper is not None:
group_idx = self.group_index
assert isinstance(group_idx, CategoricalIndex)
return recode_from_groupby(self._all_grouper, self._sort, group_idx)
return self.group_index
@cache_readonly
def group_index(self) -> Index:
if self._group_index is not None:
# _group_index is set in __init__ for MultiIndex cases
return self._group_index
uniques = self.group_arraylike
return Index(uniques, name=self.name)
@cache_readonly
def _codes_and_uniques(self) -> tuple[np.ndarray, ArrayLike]:
if self._passed_categorical:
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
cat = self.grouping_vector
categories = cat.categories
if self._observed:
ucodes = algorithms.unique1d(cat.codes)
ucodes = ucodes[ucodes != -1]
if self._sort or cat.ordered:
ucodes = np.sort(ucodes)
else:
ucodes = np.arange(len(categories))
uniques = Categorical.from_codes(
codes=ucodes, categories=categories, ordered=cat.ordered
)
return cat.codes, uniques
elif isinstance(self.grouping_vector, ops.BaseGrouper):
# we have a list of groupers
codes = self.grouping_vector.codes_info
uniques = self.grouping_vector.result_arraylike
else:
# GH35667, replace dropna=False with na_sentinel=None
if not self._dropna:
na_sentinel = None
else:
na_sentinel = -1
codes, uniques = algorithms.factorize(
self.grouping_vector, sort=self._sort, na_sentinel=na_sentinel
)
return codes, uniques
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]:
return self._index.groupby(Categorical.from_codes(self.codes, self.group_index))
def get_grouper(
obj: FrameOrSeries,
key=None,
axis: int = 0,
level=None,
sort: bool = True,
observed: bool = False,
mutated: bool = False,
validate: bool = True,
dropna: bool = True,
) -> tuple[ops.BaseGrouper, frozenset[Hashable], FrameOrSeries]:
"""
Create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values.
If validate, then check for key/level overlaps.
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError("No group keys passed!")
else:
raise ValueError("multiple levels only valid with MultiIndex")
if isinstance(level, str):
if obj._get_axis(axis).name != level:
raise ValueError(
f"level name {level} is not the name "
f"of the {obj._get_axis_name(axis)}"
)
elif level > 0 or level < -1:
raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, frozenset(), obj
else:
return grouper, frozenset({key.key}), obj
# already have a BaseGrouper, just return it
elif isinstance(key, ops.BaseGrouper):
return key, frozenset(), obj
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(
isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys
)
# is this an index replacement?
if (
not any_callable
and not any_arraylike
and not any_groupers
and match_axis_length
and level is None
):
if isinstance(obj, DataFrame):
all_in_columns_index = all(
g in obj.columns or g in obj.index.names for g in keys
)
else:
assert isinstance(obj, Series)
all_in_columns_index = all(g in obj.index.names for g in keys)
if not all_in_columns_index:
keys = [com.asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings: list[Grouping] = []
exclusions: set[Hashable] = set()
# if the actual grouper should be obj[key]
def is_in_axis(key) -> bool:
if not _is_label_like(key):
# items -> .columns for DataFrame, .index for Series
items = obj.axes[-1]
try:
items.get_loc(key)
except (KeyError, TypeError, InvalidIndexError):
# TypeError shows up here if we pass e.g. Int64Index
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr) -> bool:
if not hasattr(gpr, "name"):
return False
try:
return gpr is obj[gpr.name]
except (KeyError, IndexError):
# IndexError reached in e.g. test_skip_group_keys when we pass
# lambda here
return False
for gpr, level in zip(keys, levels):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis = True
exclusions.add(gpr.name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr, axis=axis)
in_axis, name, gpr = True, gpr, obj[gpr]
if gpr.ndim != 1:
# non-unique columns; raise here to get the name in the
# exception message
raise ValueError(f"Grouper for '{name}' not 1-dimensional")
exclusions.add(name)
elif obj._is_level_reference(gpr, axis=axis):
in_axis, level, gpr = False, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.add(gpr.key)
in_axis = False
else:
in_axis = False
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) "
"must be same length"
)
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = (
Grouping(
group_axis,
gpr,
obj=obj,
level=level,
sort=sort,
observed=observed,
in_axis=in_axis,
dropna=dropna,
)
if not isinstance(gpr, Grouping)
else gpr
)
groupings.append(ping)
if len(groupings) == 0 and len(obj):
raise ValueError("No group keys passed!")
elif len(groupings) == 0:
groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp)))
# create the internals grouper
grouper = ops.BaseGrouper(
group_axis, groupings, sort=sort, mutated=mutated, dropna=dropna
)
return grouper, frozenset(exclusions), obj
def _is_label_like(val) -> bool:
return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))
def _convert_grouper(axis: Index, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, MultiIndex):
return grouper._values
elif isinstance(grouper, (list, tuple, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError("Grouper and axis must be same length")
if isinstance(grouper, (list, tuple)):
grouper = com.asarray_tuplesafe(grouper)
return grouper
else:
return grouper
def _check_deprecated_resample_kwargs(kwargs, origin):
"""
Check for use of deprecated parameters in ``resample`` and related functions.
Raises the appropriate warnings if these parameters are detected.
Only sets an approximate ``stacklevel`` for the warnings (see #37603, #36629).
Parameters
----------
kwargs : dict
Dictionary of keyword arguments to check for deprecated parameters.
origin : object
From where this function is being called; either Grouper or TimeGrouper. Used
to determine an approximate stacklevel.
"""
from pandas.core.resample import TimeGrouper
# Deprecation warning of `base` and `loffset` since v1.1.0:
# we are raising the warning here to be able to set the `stacklevel`
# properly since we need to raise the `base` and `loffset` deprecation
# warning from three different cases:
# core/generic.py::NDFrame.resample
# core/groupby/groupby.py::GroupBy.resample
# core/groupby/grouper.py::Grouper
# raising these warnings from TimeGrouper directly would fail the test:
# tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
# hacky way to set the stacklevel: if cls is TimeGrouper it means
# that the call comes from a pandas internal call of resample,
# otherwise it comes from pd.Grouper
stacklevel = (5 if origin is TimeGrouper else 2) + 1
# the + 1 is for this helper function, check_deprecated_resample_kwargs
if kwargs.get("base", None) is not None:
warnings.warn(
"'base' in .resample() and in Grouper() is deprecated.\n"
"The new arguments that you should use are 'offset' or 'origin'.\n"
'\n>>> df.resample(freq="3s", base=2)\n'
"\nbecomes:\n"
'\n>>> df.resample(freq="3s", offset="2s")\n',
FutureWarning,
stacklevel=stacklevel,
)
if kwargs.get("loffset", None) is not None:
warnings.warn(
"'loffset' in .resample() and in Grouper() is deprecated.\n"
'\n>>> df.resample(freq="3s", loffset="8H")\n'
"\nbecomes:\n"
"\n>>> from pandas.tseries.frequencies import to_offset"
'\n>>> df = df.resample(freq="3s").mean()'
'\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n',
FutureWarning,
stacklevel=stacklevel,
)
|
|
# coding=utf-8
# Copyright (c) 2016 Janusz Skonieczny
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import os
import threading
from datetime import date, datetime, timedelta
import six
from django.conf import settings
from django.contrib import admin
from django.core.mail import EmailMultiAlternatives
from django.core.mail import send_mail
from django.core.management import call_command, execute_from_command_line
from django.db import transaction
from django.test.testcases import TransactionTestCase
from django.test.utils import modify_settings, override_settings
from django.utils import timezone
from django.utils.timezone import is_naive, is_aware
from mock import patch, MagicMock
from django.test import TestCase
from django_tasker import exceptions
from django_tasker.models import TaskInfo
from django_tasker.decoration import queueable
from django_tasker import models, admin
from . import factories
class TaskInfoDecoratorTests(TestCase):
def test_no_queue(self):
@queueable
def foo(a):
return a
self.assertEqual(1, foo(1))
self.assertEqual('a', foo('a'))
def test_decorate_no_args(self):
@queueable
def foo():
return 1
self.assertIsNotNone(foo.queue)
self.assertTrue(callable(foo.queue))
def test_decorate_with_options(self):
@queueable(queue="some")
def foo():
return 1
self.assertIsNotNone(foo.queue)
self.assertTrue(callable(foo.queue))
@patch("django_tasker.models.TaskInfo.queue")
def test_queue(self, queue):
@queueable(queue="some")
def foo():
return 1
foo.queue(1, 2, a='b')
queue.assert_called_with(1, 2, a='b')
@patch("django_tasker.models.TaskInfo.setup")
def test_setup(self, setup):
@queueable(queue="some")
def foo():
return 1
foo.queue(1, 2, a='b')
setup.assert_called_with(foo.__wrapped__, None, queue='some')
def test_no_call_no_create_missing_queue(self):
@queueable(queue="some")
def foo():
return 1
self.assertIsNone(models.TaskQueue.objects.filter(name='some').first())
def test_create_missing_queue(self):
@queueable(queue="some")
def foo():
return 1
foo.queue(1, 2, a='b')
queue = models.TaskQueue.objects.filter(name='some').first()
self.assertIsNotNone(queue)
self.assertIsNone(queue.rate_limit)
def test_set_rate_limit(self):
@queueable(queue="some", rate_limit=12)
def foo():
return 1
foo.queue(1, 2, a='b')
queue = models.TaskQueue.objects.filter(name='some').first()
self.assertIsNotNone(queue)
self.assertEqual(12, queue.rate_limit)
class TaskInfoInstanceTests(TestCase):
def test_queue_on_model_instance(self):
stub = models.TaskQueue.objects.create()
queueable(stub.process_batch).queue(1, 2, some='foo') # Re-use existing model as decorator target
o = TaskInfo.objects.last()
self.assertEqual('django_tasker.models.TaskQueue.process_batch', o.target.name)
self.assertEqual(stub.pk, json.loads(o.payload)['pk'])
self.assertEqual(json.dumps({'args': [1, 2], 'kwargs': {'some': 'foo'}, "pk": 1}), o.payload)
self.assertIsNotNone(o.eta)
self.assertEqual(models.TaskStatus.queued, o.status)
def test_execute_arguments(self):
stub = models.TaskQueue.objects.create()
queueable(stub.process_batch).queue(1, 2, some='foo') # Re-use existing model as decorator target
o = TaskInfo.objects.last()
with patch("django_tasker.models.TaskQueue.process_batch") as method:
o.execute()
method.assert_called_with(1, 2, some='foo')
def test_execute_smoke(self):
stub = models.TaskQueue.objects.create()
queueable(stub.throttle).queue('ignored but needed by test') # Re-use existing model as decorator target
o = TaskInfo.objects.last()
o.execute()
self.assertEqual(None, o.status_message)
self.assertEqual(o.status, models.TaskStatus.success)
def test_success_status(self):
queueable(models.TaskQueue.throttle).queue() # Re-use existing model as decorator target
o = TaskInfo.objects.last()
with patch("django_tasker.models.TaskQueue.throttle") as method:
o.execute()
self.assertIsNone(o.status_message)
self.assertEqual(models.TaskStatus.success, o.status)
class TaskInfoNonInstanceTests(TestCase):
def test_queue_on_class_method(self):
queueable(models.TaskQueue.throttle).queue(1, 2, some='foo') # Re-use existing model as decorator target
o = TaskInfo.objects.last()
self.assertEqual('django_tasker.models.TaskQueue.throttle', o.target.name)
self.assertEqual(json.dumps({'args': [1, 2], 'kwargs': {'some': 'foo'}}), o.payload)
self.assertIsNotNone(o.eta)
self.assertEqual(models.TaskStatus.queued, o.status)
def test_execute(self):
queueable(models.TaskQueue.throttle).queue(1, 2, some='foo')
o = TaskInfo.objects.first()
with patch("django_tasker.models.TaskQueue.throttle") as method:
o.execute()
method.assert_called_with(1, 2, some='foo')
def test_execute_smoke(self):
t = queueable(models.TaskInfo.process_one).queue(213412) # Re-use existing model as decorator target
self.assertEqual('django_tasker.models.TaskInfo.process_one', t.target.name)
o = TaskInfo.objects.last()
o.execute()
self.assertEqual(None, o.status_message)
self.assertEqual(o.status, models.TaskStatus.success)
class TaskInfoModuleFunction(TestCase):
def test_queue(self):
@queueable(queue="some")
def foo():
return 1
foo.queue(1, 2, a='b')
o = TaskInfo.objects.last()
self.assertEqual('tests.tests.TaskInfoModuleFunction.test_queue.<locals>.foo', o.target.name)
self.assertEqual(json.dumps({'args': [1, 2], 'kwargs': {'a': 'b'}}), o.payload)
self.assertIsNotNone(o.eta)
self.assertEqual(models.TaskStatus.queued, o.status)
class TaskInfoTests(TestCase):
@patch('django_tasker.models.TaskInfo.execute')
def test_process_one(self, execute):
t = factories.TaskInfoFactory(status=models.TaskStatus.queued)
models.TaskInfo.process_one(t.pk)
execute.assert_called_with()
models.TaskInfo.process_one(t.pk)
self.assertEqual(1, execute.call_count)
@patch('django_tasker.models.TaskInfo.execute')
def test_process_one_retry(self, execute):
t = factories.TaskInfoFactory(status=models.TaskStatus.retry)
models.TaskInfo.process_one(t.pk)
execute.assert_called_with()
models.TaskInfo.process_one(t.pk)
self.assertEqual(1, execute.call_count)
@patch('django_tasker.models.TaskInfo.execute')
def test_process_one_busy(self, execute):
t = factories.TaskInfoFactory(status=models.TaskStatus.busy)
models.TaskInfo.process_one(t.pk)
self.assertFalse(execute.called)
def test_retry(self):
t = factories.TaskInfoFactory()
t._execute_call(1, None, None)
self.assertEqual(models.TaskStatus.retry, t.status)
self.assertEqual(1, t.retry_count)
t._execute_call(1, None, None)
self.assertEqual(2, t.retry_count)
self.assertEqual(models.TaskStatus.retry, t.status)
t.retry_count = 5
t._execute_call(1, None, None)
self.assertEqual(models.TaskStatus.error, t.status)
def test_get_target_name_from_subclass_instance(self):
from tests.app.models import SomeModel
t = SomeModel.objects.create().process_me.queue()
self.assertEqual('tests.app.models.SomeModel.process_me', t.target.name)
def test_get_target_name_subclass_instance(self):
from tests.app.models import SomeModel
v = models.TaskInfo.get_target_name(SomeModel.process_me, SomeModel())
self.assertEqual('tests.app.models.SomeModel.process_me', v)
def test_get_target_name_subclass_instance2(self):
from tests.app.models import SomeModel
v = models.TaskInfo.get_target_name(SomeModel().process_me, None)
self.assertEqual('tests.app.models.BaseModel.process_me', v)
def test_get_target_name_subclass_class_method(self):
from tests.app.models import SomeModel
v = models.TaskInfo.get_target_name(SomeModel.process_me, None)
self.assertEqual('tests.app.models.BaseModel.process_me', v)
def test_get_target_name_subclass_instance_plain(self):
from tests.app.models import SomeModel
v = models.TaskInfo.get_target_name(SomeModel.no_queable, SomeModel())
self.assertEqual('tests.app.models.SomeModel.no_queable', v)
def test_get_target_name_subclass_instance2_plain(self):
from tests.app.models import SomeModel
v = models.TaskInfo.get_target_name(SomeModel().no_queable, None)
self.assertEqual('tests.app.models.SomeModel.no_queable', v)
def test_get_target_name_subclass_class_method_plain(self):
from tests.app.models import SomeModel
v = models.TaskInfo.get_target_name(SomeModel.no_queable, None)
# Unbound (or just) functions don't know where there where taken from
self.assertEqual('tests.app.models.BaseModel.no_queable', v)
def test_is_not_unique(self):
a = factories.TaskInfoFactory.create(status=models.TaskStatus.queued, payload="{}")
b = factories.TaskInfoFactory.build(status=models.TaskStatus.queued, payload=a.payload, eta=a.eta, target=a.target)
self.assertFalse(b.is_unique('{}'))
def test_is_unique_fail(self):
a = factories.TaskInfoFactory.create()
self.assertRaises(AssertionError, a.is_unique, '{}')
@patch('django_tasker.models.TaskInfo._get_payload')
def test_queue_once_fail(self, get_payload):
get_payload.return_value = None
a = factories.TaskInfoFactory.create()
self.assertRaises(AssertionError, a.queue_once)
def test_is_unique(self):
a = factories.TaskInfoFactory.create(status=models.TaskStatus.queued, payload="{}")
b = factories.TaskInfoFactory.build(status=a.status, payload="{a}", eta=a.eta)
self.assertTrue(b.is_unique('{}'))
b = factories.TaskInfoFactory.build(status=a.status, target=a.target, payload="{}", eta=a.eta + timedelta(seconds=1))
self.assertNotEqual(a.eta, b.eta)
self.assertTrue(b.is_unique('{}'))
@override_settings(TASKER_ALWAYS_EAGER=False)
def test_queue_once(self):
dummy = factories.TaskQueueFactory()
eta = timezone.now()
models.TaskInfo.setup(dummy.throttle, dummy, eta=eta).queue()
models.TaskInfo.setup(dummy.throttle, dummy, eta=eta).queue_once()
self.assertEqual(1, TaskInfo.objects.count())
def test_countdown(self):
dummy = factories.TaskInfoFactory.create()
when = timezone.now()
task = models.TaskInfo.setup(dummy.is_unique, dummy, countdown=5)
self.assertAlmostEqual(when + timedelta(seconds=5), task.eta, delta=timedelta(milliseconds=2))
class ChangesNotReflectedInMigrations(TestCase):
def test_missing_migrations(self):
from django.db import connection
from django.apps.registry import apps
from django.db.migrations.executor import MigrationExecutor
executor = MigrationExecutor(connection)
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.state import ProjectState
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
self.assertEqual({}, changes)
# TODO: Test select_for_update
# class TaskInfoTestsTx(TransactionTestCase):
# @patch('django_tasker.models.TaskInfo.execute')
# def test_process_one(self, execute):
# t = factories.TaskInfoFactory()
# models.TaskInfo.process_one(t.pk)
# execute.assert_called_with()
# def another_call():
# models.TaskInfo.process_one(t.pk)
# thread = threading.Thread(target=another_call)
# thread.start()
# thread.join()
# self.assertEqual(1, execute.call_count)
class TaskInfoAdminTests(TestCase):
@patch("django_tasker.admin.messages")
@patch("django_tasker.models.TaskInfo.execute")
def test_execute_tasks(self, execute, messages):
factories.TaskInfoFactory.create_batch(9)
admin.TaskInfoAdmin.execute_tasks(None, None, models.TaskInfo.objects.all()[3:6])
self.assertEqual(3, execute.call_count)
self.assertTrue(messages.info.called)
@patch("django_tasker.admin.messages")
def test_set_retry_status(self, messages):
tasks = factories.TaskInfoFactory.create_batch(9, status=models.TaskStatus.error)
factories.TaskInfoFactory.create_batch(3, status=models.TaskStatus.retry)
admin.TaskInfoAdmin.set_retry_status(None, None, models.TaskInfo.objects.filter(id__in=[t.pk for t in tasks[3:6]]))
self.assertEqual(6, models.TaskInfo.objects.filter(status=models.TaskStatus.retry).count())
self.assertTrue(messages.info.called)
@patch("django_tasker.admin.messages")
def test_reset_retry_count(self, messages):
tasks = factories.TaskInfoFactory.create_batch(9, retry_count=5)
admin.TaskInfoAdmin.reset_retry_count(None, None, models.TaskInfo.objects.filter(id__in=[t.pk for t in tasks[3:6]]))
self.assertEqual(3, models.TaskInfo.objects.filter(retry_count=0).count())
self.assertTrue(messages.info.called)
@patch("django_tasker.admin.messages")
def test_delete_completed(self, messages):
factories.TaskInfoFactory.create_batch(10)
factories.TaskInfoFactory.create_batch(3, status=models.TaskStatus.success)
count = models.TaskInfo.objects.filter(status=models.TaskStatus.success).count()
admin.TaskInfoAdmin.delete_completed(None, None, None)
self.assertEqual(13 - count, models.TaskInfo.objects.count())
class TaskQueueTests(TestCase):
@patch("time.sleep")
def test_no_throttling(self, sleep):
q = models.TaskQueue()
q.throttle(timedelta(seconds=1))
self.assertFalse(sleep.called)
@patch("django_tasker.models.sleep")
def test_throttle(self, sleep):
q = models.TaskQueue(rate_limit=60)
q.throttle(timedelta(seconds=1))
sleep.assert_called_with(59)
def test_time_interval(self):
q = models.TaskQueue(rate_limit=1800)
self.assertEqual(q.time_interval.total_seconds(), 2)
@patch("django_tasker.models.sleep")
def test_reamaining_throttle_empty(self, sleep):
q = models.TaskQueue(max_tasks_per_hour=3600)
q.throttle(timedelta(seconds=1))
self.assertFalse(sleep.called)
@patch("django_tasker.models.sleep")
def test_reamaining_throttle_empty(self, sleep):
q = models.TaskQueue(rate_limit=3600)
q.throttle(timedelta(seconds=2))
self.assertFalse(sleep.called)
@patch('django_tasker.models.TaskInfo.process_one')
def test_process_batch(self, process_one):
task = factories.TaskInfoFactory(status=models.TaskStatus.queued, eta=datetime.now())
q = task.target.queue
empty_run = q.process_batch()
self.assertFalse(empty_run)
process_one.assert_called_with(task.pk)
@override_settings(DEBUG=True)
def test_get_batch(self):
t1 = factories.TaskInfoFactory(status=models.TaskStatus.queued, eta=datetime.now())
t2 = factories.TaskInfoFactory(status=models.TaskStatus.queued, eta=datetime.now() - timedelta(seconds=5), target=t1.target)
t3 = factories.TaskInfoFactory(status=models.TaskStatus.queued, eta=datetime.now() - timedelta(seconds=2), target=t1.target)
queue = t1.target.queue
queue.refresh_from_db()
batch = queue.get_batch(3)
self.assertEqual(list(batch), [t2.pk, t3.pk, t1.pk])
def test_get_batch_full(self):
t1 = factories.TaskInfoFactory(status=models.TaskStatus.queued, eta=datetime.now())
t2 = factories.TaskInfoFactory(status=models.TaskStatus.queued, eta=datetime.now() - timedelta(seconds=5), target=t1.target)
t3 = factories.TaskInfoFactory(status=models.TaskStatus.queued, eta=datetime.now() - timedelta(seconds=2), target=t1.target)
batch = t1.target.queue.get_batch(3, flat=False)
self.assertEqual(list(batch), [t2, t3, t1])
@patch('django_tasker.models.TaskInfo.process_one')
def test_process_future(self, process_one):
task = factories.TaskInfoFactory(status=models.TaskStatus.queued, eta=datetime.now() + timedelta(hours=1))
q = task.target.queue
empty_run = q.process_batch()
self.assertTrue(empty_run)
self.assertFalse(process_one.called)
@patch('django_tasker.models.TaskInfo.process_one')
def test_process_not_ququed(self, process_one):
task = factories.TaskInfoFactory(status=models.TaskStatus.created, eta=datetime.now())
q = task.target.queue
q.process_batch()
self.assertFalse(process_one.called)
@override_settings(USE_TZ=True)
def test_queue(self):
task = models.TaskInfo.setup(lambda: 1, None)
self.assertTrue(is_aware(task.eta))
@patch('django_tasker.models.sleep')
def test_back_off_time(self, sleep):
worker = factories.TaskQueueFactory()
s = worker.on_error_back_off(None, Exception())
base_seconds = models.TaskQueue._meta.get_field('back_off_base_seconds').default
back_off_max_seconds = models.TaskQueue._meta.get_field('back_off_max_seconds').default
back_off_multiplier = models.TaskQueue._meta.get_field('back_off_multiplier').default
sleep.assert_called_with(base_seconds)
s = worker.on_error_back_off(s, Exception())
sleep.assert_called_with(base_seconds * back_off_multiplier)
s = worker.on_error_back_off(s, Exception())
sleep.assert_called_with(base_seconds * back_off_multiplier ** 2)
s = worker.on_error_back_off(s, Exception())
sleep.assert_called_with(base_seconds * back_off_multiplier ** 3)
s = worker.on_error_back_off(s, Exception())
sleep.assert_called_with(base_seconds * back_off_multiplier ** 4)
s = worker.on_error_back_off(s, Exception())
sleep.assert_called_with(base_seconds * back_off_multiplier ** 5)
s = worker.on_error_back_off(s, Exception())
sleep.assert_called_with(back_off_max_seconds)
def test_retry_busy_timeouts(self):
queue = factories.TaskQueueFactory()
factories.TaskInfoFactory.create_batch(3, status=models.TaskStatus.busy, target__queue=queue)
when = timezone.now() - timedelta(seconds=models.TaskQueue._meta.get_field('busy_max_seconds').default)
# Update in db, cause ts has auto_now=True
models.TaskInfo.objects.filter(status=models.TaskStatus.busy).update(ts=when)
factories.TaskInfoFactory.create_batch(10, target__queue=queue)
rows = queue.retry_busy_timeouts()
self.assertEqual(3, rows)
class TestAppTests(TestCase):
def test_queue_base_method_runs_on_subclass(self):
from tests.app.models import SomeModel
o = SomeModel.objects.create()
o.process_me.setup_task(max_retries=0).queue()
t = models.TaskInfo.objects.first()
t.execute()
self.assertEqual(None, t.status_message)
self.assertEqual(models.TaskStatus.success, t.status)
def test_do_stuff(self):
from tests.app.models import SomeModel
o = SomeModel.objects.create()
o.do_stuff.setup_task(max_retries=0).queue()
t = models.TaskInfo.objects.first()
t.execute()
self.assertEqual(None, t.status_message)
self.assertEqual(models.TaskStatus.success, t.status)
def test_do_stuff_fails_not_saved_instance(self):
from tests.app.models import SomeModel
o = SomeModel()
self.assertRaises(AssertionError, o.do_stuff.queue)
def test_do_whole_other_stuff(self):
from tests.app.models import SomeModel
SomeModel.do_whole_other_stuff.setup_task(max_retries=0).queue()
t = models.TaskInfo.objects.first()
t.execute()
self.assertEqual(None, t.status_message)
self.assertEqual(models.TaskStatus.success, t.status)
class TestWorker(TestCase):
@patch('django_tasker.models.sleep')
def test_sleep_on_no_work(self, sleep):
worker = factories.TaskWorkerFactory()
worker.run_once()
sleep.assert_called_with(60)
@patch('django_tasker.models.sleep')
def test_no_sleep_when_work_done(self, sleep):
from tests.app.models import SomeModel
task = SomeModel.objects.create().do_stuff.queue()
worker = factories.TaskWorkerFactory(queue=task.target.queue)
worker.run_once()
self.assertFalse(sleep.called)
@patch('django_tasker.models.TaskQueue.on_error_back_off')
def test_sleep_on_erorr(self, on_error_back_off):
worker = factories.TaskWorkerFactory()
worker.queue.process_batch = MagicMock()
ex = Exception()
worker.queue.process_batch.side_effect = ex
worker.run_once()
on_error_back_off.assert_called_with(None, ex)
@patch('django_tasker.models.TaskQueue.process_batch')
@patch('django_tasker.models.TaskQueue.retry_busy_timeouts')
def test_retry_busy_timeouts_called(self, retry_busy_timeouts, process_batch):
worker = factories.TaskWorkerFactory()
process_batch.return_value = False
worker.run_once()
self.assertTrue(retry_busy_timeouts.called)
class RetryLaterExceptionTests(TestCase):
@override_settings(USE_TZ=True)
def test_naive_eta_tz(self):
ex = exceptions.RetryLaterException('', eta=datetime.now())
self.assertIsNotNone(ex.eta.tzinfo)
@override_settings(USE_TZ=True)
def test_no_eta_tz(self):
ex = exceptions.RetryLaterException('', countdown=3)
self.assertIsNotNone(ex.eta.tzinfo)
@override_settings(USE_TZ=True)
def test_aware_eta_tz(self):
ex = exceptions.RetryLaterException('', eta=timezone.now())
self.assertIsNotNone(ex.eta.tzinfo)
@override_settings(USE_TZ=False)
def test_naive_eta_no_tz(self):
ex = exceptions.RetryLaterException('', eta=datetime.now())
self.assertIsNone(ex.eta.tzinfo)
@override_settings(USE_TZ=False)
def test_aware_eta_no_tz(self):
ex = exceptions.RetryLaterException('', eta=timezone.now())
self.assertIsNone(ex.eta.tzinfo)
class MigrationsCheck(TestCase):
def test_missing_migrations(self):
from django.db import connection
from django.apps.registry import apps
from django.db.migrations.executor import MigrationExecutor
executor = MigrationExecutor(connection)
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.state import ProjectState
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
self.assertEqual({}, changes)
|
|
# PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO: Needs some work.
import math
def lg(x):
return math.log(x)/math.log(2)
def entropy(labels):
label_set = set(labels)
# setup variables needed for statistics
sums = {}
count = 0.0
for each in label_set:
sums[each] = 0.0
for each in labels:
sums[each] += 1.0
count += 1.0
ent = 0.0
for each in sums.values():
p_i = each/count
ent -= p_i * lg (p_i)
return ent
def maxValue(labels):
label_set = set(labels)
# setup variables needed for statistics
sums = {}
count = 0.0
for each in label_set:
sums[each] = 0.0
for each in labels:
sums[each] += 1.0
count += 1.0
highVal = 0.0
highLab = labels[0]
for key,value in sums.iteritems():
if value > highVal:
highVal = value
highLab = key
return highLab
def getLabels(features):
labels = [ each[0] for each in features ]
return labels
def splitFeatures(feature,features):
split = {}
for label,values in features:
key = values[feature]
if not split.has_key(key):
split[key] = []
split[key].append([label,values])
return split
class ID3:
def __init__(self):
self.training_data = []
self.testing_data = []
self.labels = set()
self.top = None
def addTraining(self,label,feature):
'''Training Data'''
self.training_data.append((label,feature))
self.labels |= self.labels | set([label])
def addTesting(self,label,feature):
'''Training Data'''
self.testing_data.append((label,feature))
#self.labels |= self.labels | set([label])
def train(self):
'''Train the classifier on the current data'''
self.top = Node(self.training_data)
def classify(self,feature):
'''Classify the feature vector'''
return self.top.classify(feature)
def test(self, data = None):
if data == None:
data = self.testing_data
#_logger.info("Running test.")
correct = 0
wrong = 0
for label,feature in data:
c,w = self.classify(feature)
if c == label:
correct += 1
else:
wrong += 1
print "Test: %d/%d"%(correct,correct+wrong)
return float(correct)/float(correct+wrong)
class Node:
def __init__(self,features):
self.cutoff = 2
self.min_entropy = 0.2
self.feature = None
self.entropy = None
self.label = None #
self.children = None
self.train(features)
def train(self,features):
labels = getLabels(features)
print "Ent:",entropy(labels)
print "Max:",maxValue(labels)
self.label = maxValue(labels)
self.entropy = entropy(labels)
if len(features) < self.cutoff or self.entropy < self.min_entropy:
return
no_feature = len(features[0][1])
max_gain = 0.0
max_feature = 0
max_children = {}
for i in range(no_feature):
gain = self.entropy
s = splitFeatures(i,features)
for key,vals in s.iteritems():
scale = float(len(vals))/float(len(features))
e = entropy(getLabels(vals))
#print "Split %3d:"%i,key,len(vals), e
gain -= scale*e
if max_gain < gain:
max_gain = gain
max_feature = i
max_children = s
print "Gain: ",max_gain,max_feature
self.feature = max_feature
self.gain = max_gain
self.children = {}
for label,features in max_children.iteritems():
self.children[label] = Node(features)
#for i in range(features):
def classify(self,feature):
'''Classify the feature vector'''
if self.feature:
val = feature[self.feature]
if self.children.has_key(val):
return self.children[val].classify(feature)
return self.label,None
def toBits(val,bits = 4):
result = []
for i in range(bits):
result.append(val&1)
val = val >> 1
result.reverse()
return result
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os, json
from frappe import _
from frappe.modules import scrub, get_module_path
from frappe.utils import flt, cint, get_html_format, cstr
from frappe.model.utils import render_include
from frappe.translate import send_translations
import frappe.desk.reportview
from frappe.permissions import get_role_permissions
from six import string_types
def get_report_doc(report_name):
doc = frappe.get_doc("Report", report_name)
if not doc.is_permitted():
frappe.throw(_("You don't have access to Report: {0}").format(report_name), frappe.PermissionError)
if not frappe.has_permission(doc.ref_doctype, "report"):
frappe.throw(_("You don't have permission to get a report on: {0}").format(doc.ref_doctype),
frappe.PermissionError)
if doc.disabled:
frappe.throw(_("Report {0} is disabled").format(report_name))
return doc
@frappe.whitelist()
def get_script(report_name):
report = get_report_doc(report_name)
module = report.module or frappe.db.get_value("DocType", report.ref_doctype, "module")
module_path = get_module_path(module)
report_folder = os.path.join(module_path, "report", scrub(report.name))
script_path = os.path.join(report_folder, scrub(report.name) + ".js")
print_path = os.path.join(report_folder, scrub(report.name) + ".html")
script = None
if os.path.exists(script_path):
with open(script_path, "r") as f:
script = f.read()
html_format = get_html_format(print_path)
if not script and report.javascript:
script = report.javascript
if not script:
script = "frappe.query_reports['%s']={}" % report_name
# load translations
if frappe.lang != "en":
send_translations(frappe.get_lang_dict("report", report_name))
return {
"script": render_include(script),
"html_format": html_format
}
@frappe.whitelist()
def run(report_name, filters=None, user=None):
report = get_report_doc(report_name)
if not user:
user = frappe.session.user
if not filters:
filters = []
if filters and isinstance(filters, string_types):
filters = json.loads(filters)
if not frappe.has_permission(report.ref_doctype, "report"):
frappe.msgprint(_("Must have report permission to access this report."),
raise_exception=True)
columns, result, message, chart, data_to_be_printed = [], [], None, None, None
if report.report_type=="Query Report":
if not report.query:
frappe.msgprint(_("Must specify a Query to run"), raise_exception=True)
if not report.query.lower().startswith("select"):
frappe.msgprint(_("Query must be a SELECT"), raise_exception=True)
result = [list(t) for t in frappe.db.sql(report.query, filters)]
columns = [cstr(c[0]) for c in frappe.db.get_description()]
else:
module = report.module or frappe.db.get_value("DocType", report.ref_doctype, "module")
if report.is_standard=="Yes":
method_name = get_report_module_dotted_path(module, report.name) + ".execute"
res = frappe.get_attr(method_name)(frappe._dict(filters))
columns, result = res[0], res[1]
if len(res) > 2:
message = res[2]
if len(res) > 3:
chart = res[3]
if len(res) > 4:
data_to_be_printed = res[4]
if report.apply_user_permissions and result:
result = get_filtered_data(report.ref_doctype, columns, result, user)
if cint(report.add_total_row) and result:
result = add_total_row(result, columns)
return {
"result": result,
"columns": columns,
"message": message,
"chart": chart,
"data_to_be_printed": data_to_be_printed
}
@frappe.whitelist()
def export_query():
"""export from query reports"""
data = frappe._dict(frappe.local.form_dict)
del data["cmd"]
if "csrf_token" in data:
del data["csrf_token"]
if isinstance(data.get("filters"), string_types):
filters = json.loads(data["filters"])
if isinstance(data.get("report_name"), string_types):
report_name = data["report_name"]
if isinstance(data.get("file_format_type"), string_types):
file_format_type = data["file_format_type"]
if isinstance(data.get("visible_idx"), string_types):
visible_idx = json.loads(data.get("visible_idx"))
else:
visible_idx = None
if file_format_type == "Excel":
data = run(report_name, filters)
data = frappe._dict(data)
columns = get_columns_dict(data.columns)
result = [[]]
# add column headings
for idx in range(len(data.columns)):
result[0].append(columns[idx]["label"])
# build table from dict
if isinstance(data.result[0], dict):
for i,row in enumerate(data.result):
# only rows which are visible in the report
if row and (i+1 in visible_idx):
row_list = []
for idx in range(len(data.columns)):
row_list.append(row.get(columns[idx]["fieldname"],""))
result.append(row_list)
elif not row:
result.append([])
else:
result = result + [d for i,d in enumerate(data.result) if (i+1 in visible_idx)]
from frappe.utils.xlsxutils import make_xlsx
xlsx_file = make_xlsx(result, "Query Report")
frappe.response['filename'] = report_name + '.xlsx'
frappe.response['filecontent'] = xlsx_file.getvalue()
frappe.response['type'] = 'binary'
def get_report_module_dotted_path(module, report_name):
return frappe.local.module_app[scrub(module)] + "." + scrub(module) \
+ ".report." + scrub(report_name) + "." + scrub(report_name)
def add_total_row(result, columns, meta = None):
total_row = [""]*len(columns)
has_percent = []
for i, col in enumerate(columns):
fieldtype, options = None, None
if isinstance(col, string_types):
if meta:
# get fieldtype from the meta
field = meta.get_field(col)
if field:
fieldtype = meta.get_field(col).fieldtype
else:
col = col.split(":")
if len(col) > 1:
if col[1]:
fieldtype = col[1]
if "/" in fieldtype:
fieldtype, options = fieldtype.split("/")
else:
fieldtype = "Data"
else:
fieldtype = col.get("fieldtype")
options = col.get("options")
for row in result:
if fieldtype in ["Currency", "Int", "Float", "Percent"] and flt(row[i]):
total_row[i] = flt(total_row[i]) + flt(row[i])
if fieldtype == "Percent" and i not in has_percent:
has_percent.append(i)
if fieldtype=="Link" and options == "Currency":
total_row[i] = result[0][i]
for i in has_percent:
total_row[i] = flt(total_row[i]) / len(result)
first_col_fieldtype = None
if isinstance(columns[0], string_types):
first_col = columns[0].split(":")
if len(first_col) > 1:
first_col_fieldtype = first_col[1].split("/")[0]
else:
first_col_fieldtype = columns[0].get("fieldtype")
if first_col_fieldtype not in ["Currency", "Int", "Float", "Percent", "Date"]:
if first_col_fieldtype == "Link":
total_row[0] = "'" + _("Total") + "'"
else:
total_row[0] = _("Total")
result.append(total_row)
return result
def get_filtered_data(ref_doctype, columns, data, user):
result = []
linked_doctypes = get_linked_doctypes(columns, data)
match_filters_per_doctype = get_user_match_filters(linked_doctypes, ref_doctype)
shared = frappe.share.get_shared(ref_doctype, user)
columns_dict = get_columns_dict(columns)
role_permissions = get_role_permissions(frappe.get_meta(ref_doctype), user)
if_owner = role_permissions.get("if_owner", {}).get("report")
if match_filters_per_doctype:
for row in data:
# Why linked_doctypes.get(ref_doctype)? because if column is empty, linked_doctypes[ref_doctype] is removed
if linked_doctypes.get(ref_doctype) and shared and row[linked_doctypes[ref_doctype]] in shared:
result.append(row)
elif has_match(row, linked_doctypes, match_filters_per_doctype, ref_doctype, if_owner, columns_dict, user):
result.append(row)
else:
result = list(data)
return result
def has_match(row, linked_doctypes, doctype_match_filters, ref_doctype, if_owner, columns_dict, user):
"""Returns True if after evaluating permissions for each linked doctype
- There is an owner match for the ref_doctype
- `and` There is a user permission match for all linked doctypes
Returns True if the row is empty
Note:
Each doctype could have multiple conflicting user permission doctypes.
Hence even if one of the sets allows a match, it is true.
This behavior is equivalent to the trickling of user permissions of linked doctypes to the ref doctype.
"""
resultant_match = True
if not row:
# allow empty rows :)
return resultant_match
for doctype, filter_list in doctype_match_filters.items():
matched_for_doctype = False
if doctype==ref_doctype and if_owner:
idx = linked_doctypes.get("User")
if (idx is not None
and row[idx]==user
and columns_dict[idx]==columns_dict.get("owner")):
# owner match is true
matched_for_doctype = True
if not matched_for_doctype:
for match_filters in filter_list:
match = True
for dt, idx in linked_doctypes.items():
# case handled above
if dt=="User" and columns_dict[idx]==columns_dict.get("owner"):
continue
cell_value = None
if isinstance(row, dict):
cell_value = row.get(idx)
elif isinstance(row, list):
cell_value = row[idx]
if dt in match_filters and cell_value not in match_filters.get(dt) and frappe.db.exists(dt, cell_value):
match = False
break
# each doctype could have multiple conflicting user permission doctypes, hence using OR
# so that even if one of the sets allows a match, it is true
matched_for_doctype = matched_for_doctype or match
if matched_for_doctype:
break
# each doctype's user permissions should match the row! hence using AND
resultant_match = resultant_match and matched_for_doctype
if not resultant_match:
break
return resultant_match
def get_linked_doctypes(columns, data):
linked_doctypes = {}
columns_dict = get_columns_dict(columns)
for idx, col in enumerate(columns):
df = columns_dict[idx]
if df.get("fieldtype")=="Link":
if isinstance(col, string_types):
linked_doctypes[df["options"]] = idx
else:
# dict
linked_doctypes[df["options"]] = df["fieldname"]
# remove doctype if column is empty
columns_with_value = []
for row in data:
if row:
if len(row) != len(columns_with_value):
if isinstance(row, (list, tuple)):
row = enumerate(row)
elif isinstance(row, dict):
row = row.items()
for col, val in row:
if val and col not in columns_with_value:
columns_with_value.append(col)
for doctype, key in linked_doctypes.items():
if key not in columns_with_value:
del linked_doctypes[doctype]
return linked_doctypes
def get_columns_dict(columns):
"""Returns a dict with column docfield values as dict
The keys for the dict are both idx and fieldname,
so either index or fieldname can be used to search for a column's docfield properties
"""
columns_dict = frappe._dict()
for idx, col in enumerate(columns):
col_dict = frappe._dict()
# string
if isinstance(col, string_types):
col = col.split(":")
if len(col) > 1:
if "/" in col[1]:
col_dict["fieldtype"], col_dict["options"] = col[1].split("/")
else:
col_dict["fieldtype"] = col[1]
col_dict["label"] = col[0]
col_dict["fieldname"] = frappe.scrub(col[0])
# dict
else:
col_dict.update(col)
if "fieldname" not in col_dict:
col_dict["fieldname"] = frappe.scrub(col_dict["label"])
columns_dict[idx] = col_dict
columns_dict[col_dict["fieldname"]] = col_dict
return columns_dict
def get_user_match_filters(doctypes, ref_doctype):
match_filters = {}
for dt in doctypes:
filter_list = frappe.desk.reportview.build_match_conditions(dt, False)
if filter_list:
match_filters[dt] = filter_list
return match_filters
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, The OpenThread Commissioner Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Thread 1.2 commissioner interface implementation
"""
from future.utils import raise_
import serial
import time
import re
import uuid
import json
import base64
import binascii
import sys
import commissioner
from commissioner import TLV_TYPE_BORDER_AGENT_LOCATOR
from commissioner import TLV_TYPE_COMM_SESSION_ID
from commissioner import TLV_TYPE_STEERING_DATA
from commissioner import TLV_TYPE_AE_STEERING_DATA
from commissioner import TLV_TYPE_NMKP_STEERING_DATA
from commissioner import TLV_TYPE_JOINER_UDP_PORT
from commissioner import TLV_TYPE_AE_UDP_PORT
from commissioner import TLV_TYPE_NMKP_UDP_PORT
from commissioner import TLV_TYPE_ACTIVE_TIMESTAMP
from commissioner import TLV_TYPE_CHANNEL
from commissioner import TLV_TYPE_CHANNEL_MASK
from commissioner import TLV_TYPE_EXTENDED_PAN_ID
from commissioner import TLV_TYPE_MESH_LOCAL_PREFIX
from commissioner import TLV_TYPE_NETWORK_MASTER_KEY
from commissioner import TLV_TYPE_NETWORK_NAME
from commissioner import TLV_TYPE_PAN_ID
from commissioner import TLV_TYPE_PSKC
from commissioner import TLV_TYPE_SECURITY_POLICY
from commissioner import TLV_TYPE_DELAY_TIMER
from commissioner import TLV_TYPE_PENDING_TIMESTAMP
from commissioner import TLV_TYPE_TRI_HOSTNAME
from commissioner import TLV_TYPE_REGISTRAR_HOSTNAME
from commissioner import TLV_TYPE_REGISTRAR_IPv6_ADDR
from commissioner import JOINER_TYPE_MESHCOP
from commissioner import JOINER_TYPE_CCM_AE
from commissioner import JOINER_TYPE_CCM_NMKP
from commissioner import ICommissioner
from GRLLibs.UtilityModules.enums import PlatformDiagnosticPacket_Direction, PlatformDiagnosticPacket_Type
from GRLLibs.ThreadPacket.PlatformPackets import PlatformDiagnosticPacket, PlatformPackets
NEW_LINE = re.compile(r'\r\n|\n')
COMMISSIONER_USER = 'pi'
COMMISSIONER_PASSWORD = 'raspberry'
COMMISSIONER_PROMPT = 'pi@raspberry'
COMMISSIONER_CTL = 'sudo commissioner_ctl.py'
# Command line length cannot exceed this
TTY_COLS = 4096
CONTROL_SEQUENCE = re.compile(r'\x1b' # ESC
r'\[' # CSI
r'[0-?]*' # Parameter bytes
r'[!-/]*' # Intermediate bytes
r'[@-~]' # Final byte
)
TLV_TYPE_TO_STRING = {
TLV_TYPE_BORDER_AGENT_LOCATOR: 'BorderAgentLocator',
TLV_TYPE_COMM_SESSION_ID: 'SessionId',
TLV_TYPE_STEERING_DATA: 'SteeringData',
TLV_TYPE_AE_STEERING_DATA: 'AeSteeringData',
TLV_TYPE_NMKP_STEERING_DATA: 'NmkpSteeringData',
TLV_TYPE_JOINER_UDP_PORT: 'JoinerUdpPort',
TLV_TYPE_AE_UDP_PORT: 'AeUdpPort',
TLV_TYPE_NMKP_UDP_PORT: 'NmkpUdpPort',
TLV_TYPE_ACTIVE_TIMESTAMP: 'ActiveTimestamp',
TLV_TYPE_CHANNEL: 'Channel',
TLV_TYPE_CHANNEL_MASK: 'ChannelMask',
TLV_TYPE_EXTENDED_PAN_ID: 'ExtendedPanId',
TLV_TYPE_MESH_LOCAL_PREFIX: 'MeshLocalPrefix',
TLV_TYPE_NETWORK_MASTER_KEY: 'NetworkMasterKey',
TLV_TYPE_NETWORK_NAME: 'NetworkName',
TLV_TYPE_PAN_ID: 'PanId',
TLV_TYPE_PSKC: 'PSKc',
TLV_TYPE_SECURITY_POLICY: 'SecurityPolicy',
TLV_TYPE_DELAY_TIMER: 'DelayTimer',
TLV_TYPE_PENDING_TIMESTAMP: 'PendingTimestamp',
TLV_TYPE_TRI_HOSTNAME: 'TriHostname',
TLV_TYPE_REGISTRAR_HOSTNAME: 'RegistrarHostname',
TLV_TYPE_REGISTRAR_IPv6_ADDR: 'RegistrarIpv6Addr',
}
TLV_TYPE_FROM_STRING = {
TLV_TYPE_TO_STRING[key]: key for key in TLV_TYPE_TO_STRING
}
JOINER_TYPE_TO_STRING = {
JOINER_TYPE_MESHCOP: 'meshcop',
JOINER_TYPE_CCM_AE: 'ae',
JOINER_TYPE_CCM_NMKP: 'nmkp',
}
class OTCommissioner(ICommissioner):
def __init__(self, config, handler, simulator=None):
super(OTCommissioner, self).__init__(config)
self.enable_dtls_debug_logging = True
self.logging_level = 'debug'
self.keep_alive_interval = 40
self.max_connection_num = 100
self.log_file = '/tmp/commissioner.log'
self._simulator = simulator
self._handler = handler
self._lines = []
self._command('stty cols {}'.format(TTY_COLS))
config_path = '/tmp/commissioner.{}.json'.format(uuid.uuid4())
self._write_config(config_path=config_path, config=config)
response = self._command('{} init "{}"'.format(COMMISSIONER_CTL,
config_path))
if self._command('echo $?')[0] != '0':
raise commissioner.Error('Failed to init, error:\n{}'.format(
'\n'.join(response)))
@staticmethod
def makeLocalCommissioner(config, simulator):
import pexpect
handler = pexpect.spawn("/bin/bash")
return OTCommissioner(config, handler, simulator)
@staticmethod
def makeHarnessCommissioner(config, serial_handler):
if not isinstance(serial_handler, serial.Serial):
raise commissioner.Error("expect a serial handler")
return OTCommissioner(config, serial_handler)
def start(self, borderAgentAddr, borderAgentPort):
self._command(f'sudo rm {self.log_file}')
self._command(f'sudo touch {self.log_file}')
self._execute_and_check('start {} {}'.format(
borderAgentAddr,
borderAgentPort,
))
def stop(self):
self._execute_and_check('stop')
def isActive(self):
response = self._execute_and_check('active')
if 'true' in response[0]:
return True
elif 'false' in response[0]:
return False
else:
raise commissioner.Error('Unrecognized result "{}"'.format(
response[0]))
def getSessionId(self):
response = self._execute_and_check('sessionid')
try:
return int(response[0])
except ValueError as e:
raise_(commissioner.Error, repr(e), sys.exc_info()[2])
def MGMT_COMMISSIONER_GET(self, tlvTypes):
types = ' '.join(map(lambda x: TLV_TYPE_TO_STRING[x], tlvTypes))
command = 'commdataset get ' + types
response = self._execute_and_check(command)
try:
result = json.loads(' '.join(response[:-1]))
result = {TLV_TYPE_FROM_STRING[key]: result[key] for key in result}
for key in [
TLV_TYPE_STEERING_DATA, TLV_TYPE_AE_STEERING_DATA,
TLV_TYPE_NMKP_STEERING_DATA
]:
if key in result:
result[key] = OTCommissioner._hex_to_bytes(result[key])
return result
except Exception as e:
raise_(commissioner.Error, repr(e), sys.exc_info()[2])
def MGMT_COMMISSIONER_SET(self, commDataset):
for key in [
TLV_TYPE_STEERING_DATA, TLV_TYPE_AE_STEERING_DATA,
TLV_TYPE_NMKP_STEERING_DATA
]:
if key in commDataset:
commDataset[key] = OTCommissioner._bytes_to_hex(
commDataset[key])
dataset = {
TLV_TYPE_TO_STRING[key]: commDataset[key] for key in commDataset
}
data = json.dumps(dataset)
self._execute_and_check("commdataset set '{}'".format(data))
def enableJoiner(self, joinerType, eui64=None, password=None):
command = ['joiner', 'enable', JOINER_TYPE_TO_STRING[joinerType]]
if eui64:
command.append(str(eui64))
else:
command[1] = 'enableall'
if password:
command.append(password)
self._execute_and_check(' '.join(command))
def disableJoiner(self, joinerType, eui64=None):
command = ['joiner', 'disable', JOINER_TYPE_TO_STRING[joinerType]]
if eui64:
command.append(str(eui64))
else:
command[1] = 'disableall'
self._execute_and_check(' '.join(command))
def MGMT_ACTIVE_GET(self, tlvTypes):
types = ' '.join(map(lambda x: TLV_TYPE_TO_STRING[x], tlvTypes))
result = self._execute_and_check(
'opdataset get active {}'.format(types))
try:
return OTCommissioner._active_op_dataset_from_json(' '.join(
result[:-1]))
except Exception as e:
raise_(commissioner.Error, repr(e), sys.exc_info()[2])
def MGMT_ACTIVE_SET(self, activeOpDataset):
self._execute_and_check("opdataset set active '{}'".format(
OTCommissioner._active_op_dataset_to_json(activeOpDataset)))
def MGMT_PENDING_GET(self, tlvTypes):
types = ' '.join(map(lambda x: TLV_TYPE_TO_STRING[x], tlvTypes))
result = self._execute_and_check(
'opdataset get pending {}'.format(types))
try:
return OTCommissioner._pending_op_dataset_from_json(' '.join(
result[:-1]))
except Exception as e:
raise_(commissioner.Error, repr(e), sys.exc_info()[2])
def MGMT_PENDING_SET(self, pendingOpDataset):
self._execute_and_check("opdataset set pending '{}'".format(
OTCommissioner._pending_op_dataset_to_json(pendingOpDataset)))
def MGMT_BBR_GET(self, tlvTypes):
types = ' '.join(map(lambda x: TLV_TYPE_TO_STRING[x], tlvTypes))
result = self._execute_and_check('bbrdataset get {}'.format(types))
try:
result = json.loads(' '.join(result[:-1]))
return {TLV_TYPE_FROM_STRING[key]: result[key] for key in result}
except Exception as e:
raise_(commissioner.Error, repr(e), sys.exc_info()[2])
def MGMT_BBR_SET(self, bbrDataset):
dataset = {
TLV_TYPE_TO_STRING[key]: bbrDataset[key] for key in bbrDataset
}
dataset = json.dumps(dataset)
self._execute_and_check("bbrdataset set '{}'".format(dataset))
def MLR(self, multicastAddrs, timeout):
self._execute_and_check('mlr {} {}'.format(
' '.join(multicastAddrs),
timeout,
),
check=False)
def MGMT_ANNOUNCE_BEGIN(self, channelMask, count, period, dstAddr):
self._execute_and_check('announce {} {} {} {}'.format(
channelMask,
count,
period,
dstAddr,
))
def MGMT_PANID_QUERY(self, channelMask, panId, dstAddr, timeout):
self._execute_and_check('panid query {} {} {}'.format(
channelMask,
panId,
dstAddr,
))
self._sleep(timeout)
result = self._execute_and_check('panid conflict {}'.format(panId))
result = int(result[0])
return False if result == 0 else True
def MGMT_ED_SCAN(self, channelMask, count, period, scanDuration, dstAddr,
timeout):
self._execute_and_check('energy scan {} {} {} {} {}'.format(
channelMask,
count,
period,
scanDuration,
dstAddr,
))
self._sleep(timeout)
result = self._execute_and_check('energy report {}'.format(dstAddr))
if result[0] == 'null':
raise commissioner.Error(
'No energy report found for {}'.format(dstAddr))
try:
result = json.loads(' '.join(result[:-1]))
return OTCommissioner.EnergyReport(
channelMask=self._channel_mask_from_json_obj(
result['ChannelMask']),
energyList=self._hex_to_bytes(result['EnergyList']),
)
except Exception as e:
raise_(commissioner.Error, repr(e), sys.exc_info()[2])
def MGMT_REENROLL(self, dstAddr):
self._execute_and_check('reenroll {}'.format(dstAddr))
def MGMT_DOMAIN_RESET(self, dstAddr):
self._execute_and_check('domainreset {}'.format(dstAddr))
def MGMT_NET_MIGRATE(self, dstAddr, designatedNetwork):
self._execute_and_check('migrate {} {}'.format(
dstAddr,
designatedNetwork,
))
def requestCOM_TOK(self, registrarAddr, registrarPort):
self._execute_and_check('token request {} {}'.format(
registrarAddr,
registrarPort,
))
def setCOM_TOK(self, signedCOM_TOK):
path_token = '/tmp/commissioner.token.{}'.format(uuid.uuid4())
step = 40
for i in range(0, len(signedCOM_TOK), step):
data = self._bytes_to_hex(signedCOM_TOK[i:i + step])
self._command('echo {} >> "{}"'.format(data, path_token))
self._execute_and_check('token set {}'.format(path_token))
def getCOM_TOK(self):
result = self._execute_and_check('token print')
return self._hex_to_bytes(result[0])
def getCommissioningLogs(self):
processed_logs = []
for log in self._getThciLogs():
if "JOIN_FIN.req:" in log:
encrypted_packet = PlatformDiagnosticPacket()
hex_value = encrypted_packet.split("JOIN_FIN.req:")[-1].strip()
payload = list(bytearray.fromhex(hex_value))
encrypted_packet.Direction = PlatformDiagnosticPacket_Direction.IN
encrypted_packet.Type = PlatformDiagnosticPacket_Type.JOIN_FIN_req
encrypted_packet.TLVsLength = len(payload)
encrypted_packet.TLVs = PlatformPackets.read(
encrypted_packet.Type, payload)
processed_logs.append(encrypted_packet)
elif "JOIN_FIN.rsp:" in log:
encrypted_packet = PlatformDiagnosticPacket()
hex_value = encrypted_packet.split("JOIN_FIN.rsp:")[-1].strip()
payload = list(bytearray.fromhex(hex_value))
encrypted_packet.Direction = PlatformDiagnosticPacket_Direction.OUT
encrypted_packet.Type = PlatformDiagnosticPacket_Type.JOIN_FIN_rsp
encrypted_packet.TLVsLength = len(payload)
encrypted_packet.TLVs = PlatformPackets.read(
encrypted_packet.Type, payload)
processed_logs.append(encrypted_packet)
return processed_logs
def getMlrLogs(self):
processed_logs = []
for mlr in [log for log in self._getThciLogs() if "MLR.rsp" in log]:
encrypted_packet = PlatformDiagnosticPacket()
hex_value = mlr.split("MLR.rsp:")[-1].strip()
payload = list(bytearray.fromhex(hex_value))
encrypted_packet.Direction = PlatformDiagnosticPacket_Direction.OUT
encrypted_packet.Type = PlatformDiagnosticPacket_Type.MLR_rsp
encrypted_packet.TLVsLength = len(payload)
encrypted_packet.TLVs = PlatformPackets.read(
encrypted_packet.Type, payload)
processed_logs.append(encrypted_packet)
return processed_logs
def _getThciLogs(self):
return self._command("grep \"\\[ thci \\]\" {}".format(self.log_file))
def _execute_and_check(self, command, check=True):
# Escape quotes for bash
command = command.replace('"', r'"\""')
response = self._command('{} execute "{}"'.format(
COMMISSIONER_CTL, command))
if check:
response = OTCommissioner._check_response(response)
return response
def _sleep(self, duration):
time.sleep(duration)
def _command(self, cmd, timeout=10):
lines = self._handler.bash(cmd, timeout=timeout)
lines = [re.sub(r'\x1b\[\d+m', '', l) for l in lines]
lines = [l for l in lines if l]
return lines
def _write_config(self, config_path, config):
data = {
'EnableCcm': config.isCcmMode,
'Id': config.id,
'PSKc': binascii.hexlify(config.pskc).decode(),
'DomainName': config.domainName,
'EnableDtlsDebugLogging': self.enable_dtls_debug_logging,
'LogLevel': self.logging_level,
'KeepAliveInterval': self.keep_alive_interval,
'MaxConnectionNum': self.max_connection_num,
'LogFile': self.log_file,
}
if config.isCcmMode:
if config.privateKey:
path = '/tmp/commissioner.private_key.{}'.format(uuid.uuid4())
self._send_file(local_path=config.privateKey, remote_path=path)
data['PrivateKeyFile'] = path
if config.cert:
path = '/tmp/commissioner.cert.{}'.format(uuid.uuid4())
self._send_file(local_path=config.cert, remote_path=path)
data['CertificateFile'] = path
if config.trustAnchor:
path = '/tmp/commissioner.trush_anchor.{}'.format(uuid.uuid4())
self._send_file(local_path=config.trustAnchor, remote_path=path)
data['TrustAnchorFile'] = path
self._command("echo '{}' >> '{}'".format(json.dumps(data), config_path))
def _send_file(self, local_path, remote_path):
with open(local_path, 'rb') as f:
b64 = base64.b64encode(f.read()).decode()
self._command('echo "{}" | base64 -d - > "{}"'.format(b64, remote_path))
@staticmethod
def _check_response(response):
if response[-1] != '[done]':
raise commissioner.Error('Error message:\n{!r}'.format(response))
return response
@staticmethod
def _hex_to_bytes(hex_string):
return bytes(bytearray.fromhex(hex_string))
@staticmethod
def _bytes_to_hex(byte_string):
return binascii.hexlify(bytearray(byte_string)).decode()
@staticmethod
def _active_op_dataset_from_json(json_str):
result = json.loads(json_str)
result = {TLV_TYPE_FROM_STRING[key]: result[key] for key in result}
if TLV_TYPE_ACTIVE_TIMESTAMP in result:
timestamp = result[TLV_TYPE_ACTIVE_TIMESTAMP]
result[TLV_TYPE_ACTIVE_TIMESTAMP] = \
OTCommissioner._timestamp_from_json_obj(timestamp)
if TLV_TYPE_CHANNEL in result:
channel = result[TLV_TYPE_CHANNEL]
result[TLV_TYPE_CHANNEL] = OTCommissioner.Channel(
number=channel['Number'],
page=channel['Page'],
)
if TLV_TYPE_CHANNEL_MASK in result:
result[TLV_TYPE_CHANNEL_MASK] = \
OTCommissioner._channel_mask_from_json_obj(
result[TLV_TYPE_CHANNEL_MASK])
if TLV_TYPE_EXTENDED_PAN_ID in result:
xpanid = result[TLV_TYPE_EXTENDED_PAN_ID]
result[TLV_TYPE_EXTENDED_PAN_ID] = OTCommissioner._hex_to_bytes(
xpanid)
if TLV_TYPE_NETWORK_MASTER_KEY in result:
key = result[TLV_TYPE_NETWORK_MASTER_KEY]
result[TLV_TYPE_NETWORK_MASTER_KEY] = OTCommissioner._hex_to_bytes(
key)
if TLV_TYPE_SECURITY_POLICY in result:
security_policy = result[TLV_TYPE_SECURITY_POLICY]
result[TLV_TYPE_SECURITY_POLICY] = OTCommissioner.SecurityPolicy(
flags=security_policy['Flags'],
rotationTime=security_policy['RotationTime'],
)
return result
@staticmethod
def _active_op_dataset_to_json(dataset):
if TLV_TYPE_ACTIVE_TIMESTAMP in dataset:
dataset[TLV_TYPE_ACTIVE_TIMESTAMP] = \
OTCommissioner._timestamp_to_json_obj(
dataset[TLV_TYPE_ACTIVE_TIMESTAMP])
if TLV_TYPE_CHANNEL in dataset:
channel = dataset[TLV_TYPE_CHANNEL]
dataset[TLV_TYPE_CHANNEL] = {
'Number': channel.number,
'Page': channel.page,
}
if TLV_TYPE_CHANNEL_MASK in dataset:
dataset[TLV_TYPE_CHANNEL_MASK] = \
OTCommissioner._channel_mask_to_json_obj(
dataset[TLV_TYPE_CHANNEL_MASK])
if TLV_TYPE_EXTENDED_PAN_ID in dataset:
xpanid = dataset[TLV_TYPE_EXTENDED_PAN_ID]
dataset[TLV_TYPE_EXTENDED_PAN_ID] = OTCommissioner._bytes_to_hex(
xpanid)
if TLV_TYPE_NETWORK_MASTER_KEY in dataset:
key = dataset[TLV_TYPE_NETWORK_MASTER_KEY]
dataset[TLV_TYPE_NETWORK_MASTER_KEY] = OTCommissioner._bytes_to_hex(
key)
if TLV_TYPE_SECURITY_POLICY in dataset:
security_policy = dataset[TLV_TYPE_SECURITY_POLICY]
dataset[TLV_TYPE_SECURITY_POLICY] = {
'Flags': security_policy.flags,
'RotationTime': security_policy.rotationTime,
}
dataset = {TLV_TYPE_TO_STRING[key]: dataset[key] for key in dataset}
return json.dumps(dataset)
@staticmethod
def _pending_op_dataset_from_json(json_str):
result = OTCommissioner._active_op_dataset_from_json(json_str)
if TLV_TYPE_PENDING_TIMESTAMP in result:
timestamp = result[TLV_TYPE_PENDING_TIMESTAMP]
result[TLV_TYPE_PENDING_TIMESTAMP] = \
OTCommissioner._timestamp_from_json_obj(timestamp)
return result
@staticmethod
def _pending_op_dataset_to_json(dataset):
if TLV_TYPE_PENDING_TIMESTAMP in dataset:
dataset[TLV_TYPE_PENDING_TIMESTAMP] = \
OTCommissioner._timestamp_to_json_obj(
dataset[TLV_TYPE_PENDING_TIMESTAMP])
return OTCommissioner._active_op_dataset_to_json(dataset)
@staticmethod
def _timestamp_from_json_obj(json_obj):
return OTCommissioner.Timestamp(
seconds=json_obj['Seconds'],
ticks=json_obj['Ticks'],
u=False if json_obj['U'] == 0 else True,
)
@staticmethod
def _timestamp_to_json_obj(timestamp):
return {
'Seconds': timestamp.seconds,
'Ticks': timestamp.ticks,
'U': 1 if timestamp.u else 0,
}
@staticmethod
def _channel_mask_from_json_obj(json_obj):
result = []
for entry in json_obj:
result.append(
OTCommissioner.ChannelMaskEntry(
masks=OTCommissioner._hex_to_bytes(entry['Masks']),
page=entry['Page'],
))
return result
@staticmethod
def _channel_mask_to_json_obj(channel_mask):
result = []
for entry in channel_mask:
result.append({
'Masks': OTCommissioner._bytes_to_hex(entry.masks),
'Page': entry.page,
})
return result
|
|
"""Script to check the configuration file."""
from __future__ import annotations
import argparse
import asyncio
from collections import OrderedDict
from collections.abc import Mapping, Sequence
from glob import glob
import logging
import os
from typing import Any, Callable
from unittest.mock import patch
from homeassistant import core
from homeassistant.config import get_default_config_dir
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.check_config import async_check_ha_config_file
from homeassistant.util.yaml import Secrets
import homeassistant.util.yaml.loader as yaml_loader
# mypy: allow-untyped-calls, allow-untyped-defs
REQUIREMENTS = ("colorlog==5.0.1",)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=protected-access
MOCKS: dict[str, tuple[str, Callable]] = {
"load": ("homeassistant.util.yaml.loader.load_yaml", yaml_loader.load_yaml),
"load*": ("homeassistant.config.load_yaml", yaml_loader.load_yaml),
"secrets": ("homeassistant.util.yaml.loader.secret_yaml", yaml_loader.secret_yaml),
}
PATCHES: dict[str, Any] = {}
C_HEAD = "bold"
ERROR_STR = "General Errors"
def color(the_color, *args, reset=None):
"""Color helper."""
# pylint: disable=import-outside-toplevel
from colorlog.escape_codes import escape_codes, parse_colors
try:
if not args:
assert reset is None, "You cannot reset if nothing being printed"
return parse_colors(the_color)
return parse_colors(the_color) + " ".join(args) + escape_codes[reset or "reset"]
except KeyError as k:
raise ValueError(f"Invalid color {k!s} in {the_color}") from k
def run(script_args: list) -> int:
"""Handle check config commandline script."""
parser = argparse.ArgumentParser(description="Check Home Assistant configuration.")
parser.add_argument("--script", choices=["check_config"])
parser.add_argument(
"-c",
"--config",
default=get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"-i",
"--info",
nargs="?",
default=None,
const="all",
help="Show a portion of the config",
)
parser.add_argument(
"-f", "--files", action="store_true", help="Show used configuration files"
)
parser.add_argument(
"-s", "--secrets", action="store_true", help="Show secret information"
)
args, unknown = parser.parse_known_args()
if unknown:
print(color("red", "Unknown arguments:", ", ".join(unknown)))
config_dir = os.path.join(os.getcwd(), args.config)
print(color("bold", "Testing configuration at", config_dir))
res = check(config_dir, args.secrets)
domain_info: list[str] = []
if args.info:
domain_info = args.info.split(",")
if args.files:
print(color(C_HEAD, "yaml files"), "(used /", color("red", "not used") + ")")
deps = os.path.join(config_dir, "deps")
yaml_files = [
f
for f in glob(os.path.join(config_dir, "**/*.yaml"), recursive=True)
if not f.startswith(deps)
]
for yfn in sorted(yaml_files):
the_color = "" if yfn in res["yaml_files"] else "red"
print(color(the_color, "-", yfn))
if res["except"]:
print(color("bold_white", "Failed config"))
for domain, config in res["except"].items():
domain_info.append(domain)
print(" ", color("bold_red", domain + ":"), color("red", "", reset="red"))
dump_dict(config, reset="red")
print(color("reset"))
if domain_info:
if "all" in domain_info:
print(color("bold_white", "Successful config (all)"))
for domain, config in res["components"].items():
print(" ", color(C_HEAD, domain + ":"))
dump_dict(config)
else:
print(color("bold_white", "Successful config (partial)"))
for domain in domain_info:
if domain == ERROR_STR:
continue
print(" ", color(C_HEAD, domain + ":"))
dump_dict(res["components"].get(domain))
if args.secrets:
flatsecret: dict[str, str] = {}
for sfn, sdict in res["secret_cache"].items():
sss = []
for skey in sdict:
if skey in flatsecret:
_LOGGER.error(
"Duplicated secrets in files %s and %s", flatsecret[skey], sfn
)
flatsecret[skey] = sfn
sss.append(color("green", skey) if skey in res["secrets"] else skey)
print(color(C_HEAD, "Secrets from", sfn + ":"), ", ".join(sss))
print(color(C_HEAD, "Used Secrets:"))
for skey, sval in res["secrets"].items():
if sval is None:
print(" -", skey + ":", color("red", "not found"))
continue
print(" -", skey + ":", sval)
return len(res["except"])
def check(config_dir, secrets=False):
"""Perform a check by mocking hass load functions."""
logging.getLogger("homeassistant.loader").setLevel(logging.CRITICAL)
res: dict[str, Any] = {
"yaml_files": OrderedDict(), # yaml_files loaded
"secrets": OrderedDict(), # secret cache and secrets loaded
"except": OrderedDict(), # exceptions raised (with config)
#'components' is a HomeAssistantConfig # noqa: E265
"secret_cache": {},
}
# pylint: disable=possibly-unused-variable
def mock_load(filename, secrets=None):
"""Mock hass.util.load_yaml to save config file names."""
res["yaml_files"][filename] = True
return MOCKS["load"][1](filename, secrets)
# pylint: disable=possibly-unused-variable
def mock_secrets(ldr, node):
"""Mock _get_secrets."""
try:
val = MOCKS["secrets"][1](ldr, node)
except HomeAssistantError:
val = None
res["secrets"][node.value] = val
return val
# Patches with local mock functions
for key, val in MOCKS.items():
if not secrets and key == "secrets":
continue
# The * in the key is removed to find the mock_function (side_effect)
# This allows us to use one side_effect to patch multiple locations
mock_function = locals()[f"mock_{key.replace('*', '')}"]
PATCHES[key] = patch(val[0], side_effect=mock_function)
# Start all patches
for pat in PATCHES.values():
pat.start()
if secrets:
# Ensure !secrets point to the patched function
yaml_loader.SafeLineLoader.add_constructor("!secret", yaml_loader.secret_yaml)
def secrets_proxy(*args):
secrets = Secrets(*args)
res["secret_cache"] = secrets._cache
return secrets
try:
with patch.object(yaml_loader, "Secrets", secrets_proxy):
res["components"] = asyncio.run(async_check_config(config_dir))
res["secret_cache"] = {
str(key): val for key, val in res["secret_cache"].items()
}
for err in res["components"].errors:
domain = err.domain or ERROR_STR
res["except"].setdefault(domain, []).append(err.message)
if err.config:
res["except"].setdefault(domain, []).append(err.config)
except Exception as err: # pylint: disable=broad-except
print(color("red", "Fatal error while loading config:"), str(err))
res["except"].setdefault(ERROR_STR, []).append(str(err))
finally:
# Stop all patches
for pat in PATCHES.values():
pat.stop()
if secrets:
# Ensure !secrets point to the original function
yaml_loader.SafeLineLoader.add_constructor(
"!secret", yaml_loader.secret_yaml
)
return res
async def async_check_config(config_dir):
"""Check the HA config."""
hass = core.HomeAssistant()
hass.config.config_dir = config_dir
components = await async_check_ha_config_file(hass)
await hass.async_stop(force=True)
return components
def line_info(obj, **kwargs):
"""Display line config source."""
if hasattr(obj, "__config_file__"):
return color(
"cyan", f"[source {obj.__config_file__}:{obj.__line__ or '?'}]", **kwargs
)
return "?"
def dump_dict(layer, indent_count=3, listi=False, **kwargs):
"""Display a dict.
A friendly version of print yaml_loader.yaml.dump(config).
"""
def sort_dict_key(val):
"""Return the dict key for sorting."""
key = str(val[0]).lower()
return "0" if key == "platform" else key
indent_str = indent_count * " "
if listi or isinstance(layer, list):
indent_str = indent_str[:-1] + "-"
if isinstance(layer, Mapping):
for key, value in sorted(layer.items(), key=sort_dict_key):
if isinstance(value, (dict, list)):
print(indent_str, str(key) + ":", line_info(value, **kwargs))
dump_dict(value, indent_count + 2)
else:
print(indent_str, str(key) + ":", value)
indent_str = indent_count * " "
if isinstance(layer, Sequence):
for i in layer:
if isinstance(i, dict):
dump_dict(i, indent_count + 2, True)
else:
print(" ", indent_str, i)
|
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import unittest
from xosconfig import Config
from xosconfig import Config as Config2
basic_conf = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/basic_conf.yaml"
)
yaml_not_valid = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/yaml_not_valid.yaml"
)
invalid_format = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/invalid_format.yaml"
)
sample_conf = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/sample_conf.yaml"
)
override_conf = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/override_conf.yaml"
)
extend_conf = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/confs/extend_conf.yaml"
)
small_schema = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)) + "/schemas/small_schema.yaml"
)
services_list = {"xos-ws": [], "xos-db": []}
db_service = [
{
"ModifyIndex": 6,
"CreateIndex": 6,
"Node": "0152982c3159",
"Address": "172.19.0.2",
"ServiceID": "0d53ce210785:frontend_xos_db_1:5432",
"ServiceName": "xos-db",
"ServiceTags": [],
"ServiceAddress": "172.18.0.4",
"ServicePort": 5432,
"ServiceEnableTagOverride": "false",
}
]
class XOSConfigTest(unittest.TestCase):
"""
Testing the XOS Config Module
"""
def setUp(self):
# In case some other testcase in nose has left config in an unclean state
Config.clear()
def tearDown(self):
# NOTE clear the config after each test
Config.clear()
def test_initialize_only_once(self):
"""
[XOS-Config] Raise if initialized twice
"""
with self.assertRaises(Exception) as e:
Config.init(sample_conf)
Config2.init(sample_conf)
self.assertEqual(str(e.exception), "[XOS-Config] Module already initialized")
def test_config_not_initialized(self):
"""
[XOS-Config] Raise if accessing properties without initialization
"""
with self.assertRaises(Exception) as e:
Config.get("database")
self.assertEqual(
str(e.exception), "[XOS-Config] Module has not been initialized"
)
def test_missing_file_exception(self):
"""
[XOS-Config] Raise if file not found
"""
with self.assertRaises(Exception) as e:
Config.init("missing_conf")
self.assertEqual(
str(e.exception), "[XOS-Config] Config file not found at: missing_conf"
)
def test_yaml_not_valid(self):
"""
[XOS-Config] Raise if yaml is not valid
"""
with self.assertRaises(Exception) as e:
Config.init(yaml_not_valid)
self.assertTrue(
str(e.exception).startswith("[XOS-Config] The config format is wrong:")
)
def test_invalid_format(self):
"""
[XOS-Config] Raise if format is not valid (we expect a dictionary)
"""
with self.assertRaises(Exception) as e:
Config.init(invalid_format)
self.assertEqual(
str(e.exception),
(
"[XOS-Config] The config format is wrong: Schema validation failed:\n"
" - Value '['I am', 'a yaml', 'but the', 'format is not', 'correct']' is not a dict. Value path: ''."
),
)
def test_env_override(self):
"""
[XOS-Config] the XOS_CONFIG_FILE environment variable should override the config_file
"""
os.environ["XOS_CONFIG_FILE"] = "env.yaml"
with self.assertRaises(Exception) as e:
Config.init("missing_conf")
self.assertEqual(
str(e.exception), "[XOS-Config] Config file not found at: env.yaml"
)
del os.environ["XOS_CONFIG_FILE"]
def test_schema_override(self):
"""
[XOS-Config] the XOS_CONFIG_SCHEMA environment variable should override the config_schema
"""
os.environ["XOS_CONFIG_SCHEMA"] = "env-schema.yaml"
with self.assertRaises(Exception) as e:
Config.init(basic_conf)
self.assertRegexpMatches(
str(e.exception),
r"\[XOS\-Config\] Config schema not found at: (.+)env-schema\.yaml",
)
# self.assertEqual(str(e.exception), "[XOS-Config] Config schema not found at: env-schema.yaml")
del os.environ["XOS_CONFIG_SCHEMA"]
def test_schema_override_usage(self):
"""
[XOS-Config] the XOS_CONFIG_SCHEMA should be used to validate a config
"""
os.environ["XOS_CONFIG_SCHEMA"] = small_schema
with self.assertRaises(Exception) as e:
Config.init(basic_conf)
self.assertEqual(
str(e.exception),
(
"[XOS-Config] The config format is wrong: Schema validation failed:\n"
" - Key 'database' was not defined. Path: ''."
),
)
del os.environ["XOS_CONFIG_SCHEMA"]
def test_get_cli_param(self):
"""
[XOS-Config] Should read CLI -C param
"""
args = ["-A", "Foo", "-c", "Bar", "-C", "config.yaml"]
res = Config.get_cli_param(args)
self.assertEqual(res, "config.yaml")
def test_get_default_val_for_missing_param(self):
"""
[XOS-Config] Should get the default value if nothing is specified
"""
Config.init(basic_conf)
dir = Config.get("xos_dir")
self.assertEqual(dir, "/opt/xos")
def test_get_config_file(self):
"""
[XOS-Config] Should return the config file in use
"""
Config.init(sample_conf)
res = Config.get_config_file()
self.assertEqual(res, sample_conf)
def test_get_missing_param(self):
"""
[XOS-Config] Should return None reading a missing param
"""
Config.init(sample_conf)
res = Config.get("foo")
self.assertEqual(res, None)
def test_get_first_level(self):
"""
[XOS-Config] Should return a first level param
"""
Config.init(sample_conf)
# NOTE we are using Config2 here to be sure that the configuration is readable from any import,
# not only from the one that has been used to initialize it
res = Config2.get("database")
self.assertEqual(res, {"name": "xos", "username": "test", "password": "safe"})
def test_get_child_level(self):
"""
[XOS-Config] Should return a child level param
"""
Config.init(sample_conf)
res = Config.get("database.name")
self.assertEqual(res, "xos")
def test_config_override(self):
"""
[XOS-Config] If an override is provided for the config, it should return the overridden value
"""
Config.init(sample_conf, "xos-config-schema.yaml", override_conf)
res = Config.get("logging.level")
self.assertEqual(res, "info")
res = Config.get("database.password")
self.assertEqual(res, "overridden_password")
def test_config_extend(self):
"""
[XOS-Config] If an override is provided for the config, it should
return the overridden value (also if not defined in the base one)
"""
Config.init(sample_conf, "xos-config-schema.yaml", extend_conf)
res = Config.get("xos_dir")
self.assertEqual(res, "/opt/xos")
res = Config.get("database.password")
self.assertEqual(res, "safe")
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This is a self-profiling tool."""
import inspect
import itertools
import os
import sys
import threading
import time
from collections import namedtuple
PROFILING_TIMER_DELAY = 0.01
BLACKLIST = [
"copy.py:_deepcopy_dict",
"copy.py:_deepcopy_list",
"__init__.py:query",
"encoder.py:_iterencode",
"encoder.py:_iterencode_list",
"encoder.py:_iterencode_dict",
]
PROFILING_USEFULNESS_THRESHOLD_DELAY = 0.02
HIGHLIGHTS_THRESHOLD_DELAY = 0.5
StackFrame = namedtuple("StackFrame", "filename line function")
class AggregatedStackNode(object):
def __init__(self, frame, callstack):
self.frame = frame
self.callstack = callstack
self.duration = 0
self.children = {}
self.useful = True
def add_children(self, frame, callstack, duration):
key = frame.filename + ":" + str(frame.line)
if key in self.children:
subnode = self.children[key]
subnode.duration += duration
else:
subnode = AggregatedStackNode(frame, callstack)
self.children[key] = subnode
subnode.duration += duration
return subnode
def add_callstack(self, callstack, duration, reverse_callstack):
current_node = self
if reverse_callstack:
callstack = reversed(callstack)
for frame in callstack:
next_node = current_node.add_children(frame, callstack, duration)
current_node = next_node
class Profiler(object):
def __init__(self, reverse_callstack):
# The callstack storage
self._aggregated_callstack = AggregatedStackNode(None, None)
self._stopped = False
self.total_time = 0
self.start_time = 0
self._thread = None
self._last_start_time = 0
self._stopthread = None
self._reverse_callstack = reverse_callstack
def start(self):
"""Start the profiler."""
self.start_time = time.time()
self._stopthread = threading.Event()
self._last_start_time = time.time()
frame = inspect.currentframe()
frames_items = sys._current_frames().items() # pylint: disable=protected-access
tid = [k for k, f in frames_items if f == frame][0]
self._thread = threading.Thread(
target=self._stack_trace_collection_thread,
args=(tid,),
name="sampler thread",
)
self._thread.start()
def stop(self):
"""Stop the profiler."""
self.total_time = time.time() - self.start_time
self._stopped = True
self._stopthread.set()
self._thread.join()
self._stopthread = None
def _stack_trace_collection_thread(self, tid):
while not self._stopthread.is_set():
now = time.time()
frame = sys._current_frames()[tid] # pylint: disable=protected-access
callstack = Profiler._frame_stack_to_call_stack_frame(frame)
self._aggregated_callstack.add_callstack(
callstack, now - self._last_start_time, self._reverse_callstack
)
self._last_start_time = now
time.sleep(PROFILING_TIMER_DELAY)
self._stopthread.clear()
@staticmethod
def _frame_stack_to_call_stack_frame(frame):
result = []
while frame:
stack_frame = StackFrame(
frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name
)
frame = frame.f_back
result.append(stack_frame)
result.reverse()
return list(
itertools.takewhile(
lambda x: not Profiler._is_stack_frame_in_blacklist(x), result
)
)
@staticmethod
def _is_stack_frame_in_blacklist(frame):
"""Returns wether the stack frame should be ignored."""
filename = os.path.basename(frame.filename)
call_identifier = "{}:{}".format(filename, frame.function)
return call_identifier in BLACKLIST
@staticmethod
def _items_sorted_by_duration(items):
return sorted(
items, key=lambda current_child: current_child.duration, reverse=True
)
def generate_report(self):
# type: () -> str
"""Generate string with a nice visualization of the result of the profiling."""
Profiler._recursive_mark_useful_leaf(self._aggregated_callstack)
content = ["# Highlights\n"]
highlights = []
if self._reverse_callstack:
for node in self._aggregated_callstack.children.values():
if node.duration > HIGHLIGHTS_THRESHOLD_DELAY:
highlights.append(node)
else:
Profiler._recursive_collect_highlights_report(
self._aggregated_callstack, highlights
)
highlights = Profiler._items_sorted_by_duration(highlights)
content += Profiler._generate_highlights_report(highlights)
content += ["\n", "# More details\n"]
content += self._generate_callstack_report(highlights)
return "".join(content)
@staticmethod
def _is_useful_leaf(node):
"""Returns whether a leaf needs to be highlighted."""
if node.frame is None:
return True
filename = os.path.basename(node.frame.filename)
dirname = os.path.dirname(node.frame.filename)
if node.duration < PROFILING_USEFULNESS_THRESHOLD_DELAY:
return False
return True
@staticmethod
def _recursive_mark_useful_leaf(node):
"""Mark the node as needing to be shown."""
useful = Profiler._is_useful_leaf(node)
children = node.children.values()
for child in Profiler._items_sorted_by_duration(children):
Profiler._recursive_mark_useful_leaf(child)
if child.useful:
useful = True
else:
frame = child.frame
key = frame.filename + ":" + str(frame.line)
node.children.pop(key, None)
node.useful = useful
def _generate_callstack_report(self, highlights):
"""Generate a string with a vizualisation of the aggregated call stack tree."""
highlights_set = set()
if not self._reverse_callstack:
for item in highlights:
frame = item.frame
highlights_set.add(frame.filename + ":" + str(frame.line))
return Profiler._recursive_write_callstack_report(
self._aggregated_callstack, "", highlights_set
)
@staticmethod
def _recursive_write_callstack_report(node, prefix_str, highlights_set):
"""Generate an aggregated call stack tree that looks like this one:
|-0.04 glob_watchman (glob_watchman.py:103)
| \-0.04 wrapped (util.py:76)
| \-0.04 glob (buck.py:328)
| \-0.04 _glob (buck.py:689)
| \-0.04 <module> (BUCK:58)
| \-0.04 _process (buck.py:930)
| \-0.04 _process_build_file (buck.py:970)
| \-0.04 process (buck.py:976)
| \-0.04 process_with_diagnostics (buck.py:1085)
| \-0.04 main (buck.py:1379)
| \-0.04 <module> (__main__.py:11)
|-0.03 _update_functions (buck.py:620)
| \-0.02 _set_build_env (buck.py:772)
| \-0.02 __enter__ (contextlib.py:17)
| \-0.02 _process (buck.py:889)
| \-0.02 _process_build_file (buck.py:970)
| \-0.02 process (buck.py:976)
| \-0.02 process_with_diagnostics (buck.py:1085)
| \-0.02 main (buck.py:1379)
| \-0.02 <module> (__main__.py:11)
\-0.02 encode (encoder.py:209)
\-0.02 encode_result (buck.py:1051)
\-0.02 java_process_send_result (buck.py:1112)
\-0.02 process_with_diagnostics (buck.py:1104)
\-0.02 main (buck.py:1379)
\-0.02 <module> (__main__.py:11)
"""
children = node.children.values()
nodes_count = len(children)
result = []
for i, child in enumerate(Profiler._items_sorted_by_duration(children)):
frame = child.frame
highlight_key = frame.filename + ":" + str(frame.line)
highlighted = highlight_key in highlights_set
if i == nodes_count - 1:
node_prefix_str = prefix_str + "\\-"
next_prefix_str = prefix_str + " "
else:
node_prefix_str = prefix_str + "|-"
next_prefix_str = prefix_str + "| "
str_to_write = node_prefix_str + "{:.2f} {} ({}:{})".format(
child.duration,
frame.function,
os.path.basename(frame.filename),
frame.line,
)
if highlighted:
if len(str_to_write) < 120:
highlighted_str = "-" * (120 - len(str_to_write))
else:
highlighted_str = "-" * 10
else:
highlighted_str = ""
result += [str_to_write, highlighted_str, "\n"]
result += Profiler._recursive_write_callstack_report(
child, next_prefix_str, highlights_set
)
return result
@staticmethod
def _recursive_collect_highlights_report(node, highlights):
"""Store into `highlights` the frames that needs to be highlighted."""
children = node.children.values()
nodes_count = len(children)
if nodes_count == 0:
if node.duration > HIGHLIGHTS_THRESHOLD_DELAY:
highlights.append(node)
else:
children = Profiler._items_sorted_by_duration(children)
for child in children:
Profiler._recursive_collect_highlights_report(child, highlights)
@staticmethod
def _generate_highlights_report(highlights):
"""Write the list of highlights to the given file."""
result = []
for node in highlights:
frame = node.frame
filename = os.path.basename(frame.filename)
result.append(
"{:.2f} {} ({}:{})\n".format(
node.duration, frame.function, filename, frame.line
)
)
# Output the stack frame of that highlight.
for current_frame in node.callstack:
if current_frame.function == "<module>":
continue
item_filename = os.path.basename(current_frame.filename)
result.append(
" {} ({}:{})\n".format(
current_frame.function, item_filename, current_frame.line
)
)
if (
frame.filename == current_frame.filename
and frame.line == current_frame.line
and frame.function == current_frame.function
):
break
result.append("\n")
return result
|
|
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learner goals services."""
from __future__ import annotations
from core import feconf
from core.constants import constants
from core.domain import learner_goals_services
from core.domain import learner_progress_services
from core.domain import topic_domain
from core.domain import topic_services
from core.platform import models
from core.tests import test_utils
(user_models,) = models.Registry.import_models([models.NAMES.user])
MAX_CURRENT_GOALS_COUNT = (
feconf.MAX_CURRENT_GOALS_COUNT)
class LearnerGoalsTests(test_utils.GenericTestBase):
"""Test the services related to learner goals services."""
OWNER_EMAIL = 'owner@example.com'
OWNER_USERNAME = 'owner'
TOPIC_ID_1 = 'Topic_id_1'
TOPIC_NAME_1 = 'Topic name 1'
TOPIC_ID_2 = 'Topic_id_2'
TOPIC_NAME_2 = 'Topic name 2'
TOPIC_ID_3 = 'Topic_id_3'
TOPIC_NAME_3 = 'Topic name 3'
TOPIC_ID_4 = 'Topic_id_4'
TOPIC_NAME_4 = 'Topic name 4'
subtopic_1 = topic_domain.Subtopic(
0, 'Title 1', ['skill_id_1'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131,
'dummy-subtopic-zero')
subtopic_2 = topic_domain.Subtopic(
0, 'Title 1', ['skill_id_1'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131,
'dummy-subtopic-zero')
subtopic_3 = topic_domain.Subtopic(
0, 'Title 1', ['skill_id_1'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131,
'dummy-subtopic-zero')
subtopic_4 = topic_domain.Subtopic(
0, 'Title 1', ['skill_id_1'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131,
'dummy-subtopic-zero'
)
def setUp(self):
super(LearnerGoalsTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.curriculum_admin_id = self.get_user_id_from_email(
self.CURRICULUM_ADMIN_EMAIL)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
# Save the topics.
self.save_new_topic(
self.TOPIC_ID_1, self.owner_id, name=self.TOPIC_NAME_1,
url_fragment='topic-one',
description='A new topic', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[self.subtopic_1], next_subtopic_id=1)
topic_services.publish_topic(self.TOPIC_ID_1, self.curriculum_admin_id)
self.save_new_topic(
self.TOPIC_ID_2, self.owner_id, name=self.TOPIC_NAME_2,
url_fragment='topic-two',
description='A new topic', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[self.subtopic_2], next_subtopic_id=1)
topic_services.publish_topic(self.TOPIC_ID_2, self.curriculum_admin_id)
self.save_new_topic(
self.TOPIC_ID_3, self.owner_id, name=self.TOPIC_NAME_3,
url_fragment='topic-three',
description='A new topic', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[self.subtopic_3], next_subtopic_id=1)
topic_services.publish_topic(self.TOPIC_ID_3, self.curriculum_admin_id)
self.save_new_topic(
self.TOPIC_ID_4, self.owner_id, name=self.TOPIC_NAME_4,
url_fragment='topic-four',
description='A new topic', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[self.subtopic_4], next_subtopic_id=1)
topic_services.publish_topic(self.TOPIC_ID_4, self.curriculum_admin_id)
def _get_all_topic_ids_to_learn(self, user_id):
"""Returns the list of all the topic ids to learn
corresponding to the given user id.
"""
learner_goals_model = user_models.LearnerGoalsModel.get(
user_id, strict=False)
return (
learner_goals_model.topic_ids_to_learn if
learner_goals_model else [])
def test_single_topic_is_added_correctly_to_learn(self):
# Test adding a single topic_id to learn.
self.assertEqual(
self._get_all_topic_ids_to_learn(self.viewer_id), [])
learner_progress_services.validate_and_add_topic_to_learn_goal(
self.viewer_id, self.TOPIC_ID_1)
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_1])
def test_multiple_topics_are_added_correctly_to_learn(self):
# Test adding two topics to the learn.
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [])
learner_progress_services.validate_and_add_topic_to_learn_goal(
self.viewer_id, self.TOPIC_ID_1)
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_1])
learner_progress_services.validate_and_add_topic_to_learn_goal(
self.viewer_id, self.TOPIC_ID_2)
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2])
def test_adding_exisiting_topic_is_not_added_again(self):
# Test adding the topic_id if it is already in
# learner_goals.topic_id.
learner_progress_services.validate_and_add_topic_to_learn_goal(
self.viewer_id, self.TOPIC_ID_1)
learner_progress_services.validate_and_add_topic_to_learn_goal(
self.viewer_id, self.TOPIC_ID_2)
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2])
with self.assertRaisesRegex(
Exception,
'The topic id Topic_id_1 is already present in the learner goals'):
learner_progress_services.validate_and_add_topic_to_learn_goal(
self.viewer_id, self.TOPIC_ID_1)
def test_completed_topic_is_not_added_to_learner_goals(self):
learner_progress_services.validate_and_add_topic_to_learn_goal(
self.viewer_id, self.TOPIC_ID_1)
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_1])
learner_progress_services.mark_topic_as_learnt(
self.viewer_id, self.TOPIC_ID_2)
# Test that the topic added to the in the learnt list doesn't get
# added to the learner goals.
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_1])
def test_number_of_topics_cannot_exceed_max(self):
# Add MAX_CURRENT_GOALS_COUNT topics.
topic_ids = ['SAMPLE_TOPIC_ID_%s' % index for index in (
range(0, MAX_CURRENT_GOALS_COUNT))]
for topic_id in topic_ids:
learner_progress_services.validate_and_add_topic_to_learn_goal(
self.viewer_id, topic_id)
self.assertEqual(
self._get_all_topic_ids_to_learn(self.viewer_id), topic_ids)
# Now if we try to add another topic at the end of the list,
# it shouldn't be added as the list length would exceed
# MAX_CURRENT_GOALS_COUNT.
learner_goals_services.mark_topic_to_learn(
self.viewer_id, 'SAMPLE_TOPIC_ID_MAX')
self.assertEqual(
self._get_all_topic_ids_to_learn(self.viewer_id), topic_ids)
def test_remove_topic_from_learner_goals(self):
self.assertEqual(self._get_all_topic_ids_to_learn(
self.viewer_id), [])
# Add topic to learner goals.
learner_goals_services.mark_topic_to_learn(
self.viewer_id, self.TOPIC_ID_1)
learner_goals_services.mark_topic_to_learn(
self.viewer_id, self.TOPIC_ID_2)
self.assertEqual(self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2])
# Removing a topic.
learner_goals_services.remove_topics_from_learn_goal(
self.viewer_id, [self.TOPIC_ID_1])
self.assertEqual(self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_2])
# Removing the same topic raises error.
with self.assertRaisesRegex(
Exception,
'The topic id Topic_id_1 is not present in LearnerGoalsModel'):
learner_goals_services.remove_topics_from_learn_goal(
self.viewer_id, [self.TOPIC_ID_1])
# Removing the second topic.
learner_goals_services.remove_topics_from_learn_goal(
self.viewer_id, [self.TOPIC_ID_2])
self.assertEqual(self._get_all_topic_ids_to_learn(
self.viewer_id), [])
def test_get_all_topic_ids_in_learn(self):
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [])
# Add an topic to the learner goals.
learner_goals_services.mark_topic_to_learn(
self.viewer_id, self.TOPIC_ID_1)
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_1])
# Add another topic.
learner_goals_services.mark_topic_to_learn(
self.viewer_id, self.TOPIC_ID_2)
self.assertEqual(
self._get_all_topic_ids_to_learn(
self.viewer_id), [self.TOPIC_ID_1, self.TOPIC_ID_2])
|
|
from direct.directnotify.DirectNotifyGlobal import *
from otp.ai.AIBaseGlobal import *
from toontown.building import DistributedBuildingAI
from toontown.building import GagshopBuildingAI
from toontown.building import HQBuildingAI
from toontown.building import KartShopBuildingAI
from toontown.building import PetshopBuildingAI
from toontown.building import BankBuildingAI
from toontown.building import LibraryBuildingAI
from toontown.hood import ZoneUtil
# from toontown.building import DistributedAnimBuildingAI
class DistributedBuildingMgrAI:
notify = directNotify.newCategory('DistributedBuildingMgrAI')
def __init__(self, air, branchId, dnaStore, trophyMgr):
self.air = air
self.branchId = branchId
self.canonicalBranchId = ZoneUtil.getCanonicalZoneId(self.branchId)
self.dnaStore = dnaStore
self.trophyMgr = trophyMgr
self.__buildings = {}
self.findAllLandmarkBuildings()
def cleanup(self):
for building in self.__buildings.values():
building.cleanup()
self.__buildings = {}
def isValidBlockNumber(self, blockNumber):
return blockNumber in self.__buildings
def isSuitBlock(self, blockNumber):
if not self.isValidBlockNumber(blockNumber):
return False
return self.__buildings[blockNumber].isSuitBlock()
def getSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getEstablishedSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isEstablishedSuitBlock():
blocks.append(blockNumber)
return blocks
def getToonBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if isinstance(building, HQBuildingAI.HQBuildingAI):
continue
if isinstance(building, GagshopBuildingAI.GagshopBuildingAI):
continue
if isinstance(building, PetshopBuildingAI.PetshopBuildingAI):
continue
if isinstance(building, KartShopBuildingAI.KartShopBuildingAI):
continue
if isinstance(building, BankBuildingAI.BankBuildingAI):
continue
if isinstance(building, LibraryBuildingAI.LibraryBuildingAI):
continue
if not building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getBuildings(self):
return self.__buildings.values()
def getFrontDoorPoint(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].getFrontDoorPoint()
def getBuildingTrack(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].track
def getBuilding(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber]
def setFrontDoorPoint(self, blockNumber, point):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].setFrontDoorPoint(point)
def getDNABlockLists(self):
blocks = []
hqBlocks = []
gagshopBlocks = []
petshopBlocks = []
kartshopBlocks = []
bankBlocks = []
libraryBlocks = []
animBldgBlocks = []
for i in xrange(self.dnaStore.getNumBlockNumbers()):
blockNumber = self.dnaStore.getBlockNumberAt(i)
buildingType = self.dnaStore.getBlockBuildingType(blockNumber)
if buildingType == 'hq':
hqBlocks.append(blockNumber)
elif buildingType == 'gagshop':
gagshopBlocks.append(blockNumber)
elif buildingType == 'petshop':
if self.air.wantPets:
petshopBlocks.append(blockNumber)
elif buildingType == 'kartshop':
kartshopBlocks.append(blockNumber)
elif buildingType == 'bank':
bankBlocks.append(blockNumber)
elif buildingType == 'library':
libraryBlocks.append(blockNumber)
elif buildingType == 'animbldg':
animBldgBlocks.append(blockNumber)
else:
blocks.append(blockNumber)
return (blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
bankBlocks, libraryBlocks, animBldgBlocks)
def findAllLandmarkBuildings(self):
backups = simbase.backups.load('block-info', (self.air.districtId, self.branchId), default={})
(blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
bankBlocks, libraryBlocks, animBldgBlocks) = self.getDNABlockLists()
for blockNumber in blocks:
self.newBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in animBldgBlocks:
self.newAnimBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in hqBlocks:
self.newHQBuilding(blockNumber)
for blockNumber in gagshopBlocks:
self.newGagshopBuilding(blockNumber)
for block in petshopBlocks:
self.newPetshopBuilding(block)
for block in kartshopBlocks:
self.newKartShopBuilding(block)
for block in bankBlocks:
self.newBankBuilding(block)
for block in libraryBlocks:
self.newLibraryBuilding(block)
def newBuilding(self, blockNumber, backup=None):
building = DistributedBuildingAI.DistributedBuildingAI(
self.air, blockNumber, self.branchId, self.trophyMgr)
building.generateWithRequired(self.branchId)
if backup is not None:
state = backup.get('state', 'toon')
if ((state == 'suit') and simbase.air.wantCogbuildings) or (
(state == 'cogdo') and simbase.air.wantCogdominiums):
building.track = backup.get('track', 'c')
building.difficulty = backup.get('difficulty', 1)
building.numFloors = backup.get('numFloors', 1)
building.updateSavedBy(backup.get('savedBy'))
building.becameSuitTime = backup.get('becameSuitTime', time.time())
if (state == 'suit') and simbase.air.wantCogbuildings:
building.setState('suit')
elif (state == 'cogdo') and simbase.air.wantCogdominiums:
building.setState('cogdo')
else:
building.setState('toon')
else:
building.setState('toon')
else:
building.setState('toon')
self.__buildings[blockNumber] = building
return building
def newAnimBuilding(self, blockNumber, backup=None):
return self.newBuilding(blockNumber, backup=backup)
def newHQBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = HQBuildingAI.HQBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newGagshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = GagshopBuildingAI.GagshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newPetshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = PetshopBuildingAI.PetshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newKartShopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = KartShopBuildingAI.KartShopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newBankBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = BankBuildingAI.BankBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newLibraryBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = LibraryBuildingAI.LibraryBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def save(self):
backups = {}
for blockNumber in self.getSuitBlocks():
building = self.getBuilding(blockNumber)
backup = {
'state': building.fsm.getCurrentState().getName(),
'block': building.block,
'track': building.track,
'difficulty': building.difficulty,
'numFloors': building.numFloors,
'savedBy': building.savedBy,
'becameSuitTime': building.becameSuitTime
}
backups[blockNumber] = backup
simbase.backups.save('block-info', (self.air.districtId, self.branchId), backups)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
random_seed.set_random_seed(23)
rng = np.random.RandomState(0)
class AssertZeroImagPartTest(test.TestCase):
def test_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([0., 2, 3])
with self.test_session():
# Should not raise.
linear_operator_util.assert_zero_imag_part(x, message="ABC123").run()
def test_complex_tensor_with_imag_zero_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([0., 0, 0])
z = math_ops.complex(x, y)
with self.test_session():
# Should not raise.
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
def test_complex_tensor_with_nonzero_imag_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.test_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
class AssertNoEntriesWithModulusZeroTest(test.TestCase):
def test_nonzero_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 2, 3])
with self.test_session():
# Should not raise.
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123").run()
def test_nonzero_complex_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.test_session():
# Should not raise.
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123").run()
def test_zero_real_tensor_raises(self):
x = ops.convert_to_tensor([1., 0, 3])
with self.test_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123").run()
def test_zero_complex_tensor_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.test_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123").run()
class BroadcastMatrixBatchDimsTest(test.TestCase):
def test_zero_batch_matrices_returned_as_empty_list(self):
self.assertAllEqual(
[], linear_operator_util.broadcast_matrix_batch_dims([]))
def test_one_batch_matrix_returned_after_tensor_conversion(self):
arr = rng.rand(2, 3, 4)
tensor, = linear_operator_util.broadcast_matrix_batch_dims([arr])
self.assertTrue(isinstance(tensor, ops.Tensor))
with self.test_session():
self.assertAllClose(arr, tensor.eval())
def test_static_dims_broadcast(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5)
y = rng.rand(4, 1, 3, 7)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.test_session() as sess:
self.assertAllEqual(x_bc_expected.shape, x_bc.get_shape())
self.assertAllEqual(y_bc_expected.shape, y_bc.get_shape())
x_bc_, y_bc_ = sess.run([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_static_dims_broadcast_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [1, 3, 1]
# broadcast batch shape = [1, 3, 2]
x = rng.rand(1, 2, 1, 5)
y = rng.rand(1, 3, 2, 3, 7)
batch_of_zeros = np.zeros((1, 3, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.test_session() as sess:
self.assertAllEqual(x_bc_expected.shape, x_bc.get_shape())
self.assertAllEqual(y_bc_expected.shape, y_bc.get_shape())
x_bc_, y_bc_ = sess.run([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_dynamic_dims_broadcast_32bit(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5).astype(np.float32)
y = rng.rand(4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder(dtypes.float32)
y_ph = array_ops.placeholder(dtypes.float32)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
with self.test_session() as sess:
x_bc_, y_bc_ = sess.run([x_bc, y_bc], feed_dict={x_ph: x, y_ph: y})
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_dynamic_dims_broadcast_32bit_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [3, 4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(1, 2, 1, 5).astype(np.float32)
y = rng.rand(3, 4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder(dtypes.float32)
y_ph = array_ops.placeholder(dtypes.float32)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
with self.test_session() as sess:
x_bc_, y_bc_ = sess.run([x_bc, y_bc], feed_dict={x_ph: x, y_ph: y})
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_less_than_two_dims_raises_static(self):
x = rng.rand(3)
y = rng.rand(1, 1)
with self.assertRaisesRegexp(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.assertRaisesRegexp(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([y, x])
class MatmulWithBroadcastTest(test.TestCase):
def test_static_dims_broadcast(self):
# batch_shape = [2]
# for each batch member, we have a 1x3 matrix times a 3x7 matrix ==> 1x7
x = rng.rand(2, 1, 3)
y = rng.rand(3, 7)
y_broadcast = y + np.zeros((2, 1, 1))
with self.test_session():
result = linear_operator_util.matmul_with_broadcast(x, y)
self.assertAllEqual((2, 1, 7), result.get_shape())
expected = math_ops.matmul(x, y_broadcast)
self.assertAllEqual(expected.eval(), result.eval())
def test_dynamic_dims_broadcast_32bit(self):
# batch_shape = [2]
# for each batch member, we have a 1x3 matrix times a 3x7 matrix ==> 1x7
x = rng.rand(2, 1, 3)
y = rng.rand(3, 7)
y_broadcast = y + np.zeros((2, 1, 1))
x_ph = array_ops.placeholder(dtypes.float64)
y_ph = array_ops.placeholder(dtypes.float64)
with self.test_session() as sess:
result, expected = sess.run(
[linear_operator_util.matmul_with_broadcast(x_ph, y_ph),
math_ops.matmul(x, y_broadcast)],
feed_dict={x_ph: x, y_ph: y})
self.assertAllEqual(expected, result)
class DomainDimensionStubOperator(object):
def __init__(self, domain_dimension):
self._domain_dimension = ops.convert_to_tensor(domain_dimension)
def domain_dimension_tensor(self):
return self._domain_dimension
class AssertCompatibleMatrixDimensionsTest(test.TestCase):
def test_compatible_dimensions_do_not_raise(self):
with self.test_session():
x = ops.convert_to_tensor(rng.rand(2, 3, 4))
operator = DomainDimensionStubOperator(3)
# Should not raise
linear_operator_util.assert_compatible_matrix_dimensions(
operator, x).run()
def test_incompatible_dimensions_raise(self):
with self.test_session():
x = ops.convert_to_tensor(rng.rand(2, 4, 4))
operator = DomainDimensionStubOperator(3)
with self.assertRaisesOpError("Incompatible matrix dimensions"):
linear_operator_util.assert_compatible_matrix_dimensions(
operator, x).run()
if __name__ == "__main__":
test.main()
|
|
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+
#
import os
import re
import sys
import socket
import traceback
import time
import datetime
import psutil
import string
import urlparse
from azure.storage import TableService, Entity
from Utils.WAAgentUtil import waagent, AddExtensionEvent
import Utils.HandlerUtil as Util
FAILED_TO_RETRIEVE_MDS_DATA="(03100)Failed to retrieve mds data"
FAILED_TO_RETRIEVE_LOCAL_DATA="(03101)Failed to retrieve local data"
FAILED_TO_RETRIEVE_STORAGE_DATA="(03102)Failed to retrieve storage data"
FAILED_TO_SERIALIZE_PERF_COUNTERS="(03103)Failed to serialize perf counters"
def timedelta_total_seconds(delta):
if not hasattr(datetime.timedelta, 'total_seconds'):
return delta.days * 86400 + delta.seconds
else:
return delta.total_seconds()
def get_host_base_from_uri(blob_uri):
uri = urlparse.urlparse(blob_uri)
netloc = uri.netloc
if netloc is None:
return None
return netloc[netloc.find('.'):]
MonitoringIntervalInMinute = 1 #One minute
MonitoringInterval = 60 * MonitoringIntervalInMinute
#It takes sometime before the performance date reaches azure table.
AzureTableDelayInMinute = 5 #Five minute
AzureTableDelay = 60 * AzureTableDelayInMinute
AzureEnhancedMonitorVersion = "1.0.0"
LibDir = "/var/lib/AzureEnhancedMonitor"
def printable(s):
return filter(lambda c : c in string.printable, str(s))
def easyHash(s):
"""
MDSD used the following hash algorithm to cal a first part of partition key
"""
strHash = 0
multiplier = 37
for c in s:
strHash = strHash * multiplier + ord(c)
#Only keep the last 64bit, since the mod base is 100
strHash = strHash % (1<<64)
return strHash % 100 #Assume eventVolume is Large
Epoch = datetime.datetime(1, 1, 1)
tickInOneSecond = 1000 * 10000 # 1s = 1000 * 10000 ticks
def getMDSTimestamp(unixTimestamp):
unixTime = datetime.datetime.utcfromtimestamp(unixTimestamp)
startTimestamp = int(timedelta_total_seconds(unixTime - Epoch))
return startTimestamp * tickInOneSecond
def getIdentity():
identity = socket.gethostname()
return identity
def getMDSPartitionKey(identity, timestamp):
hashVal = easyHash(identity)
return "{0:0>19d}___{1:0>19d}".format(hashVal, timestamp)
def getAzureDiagnosticKeyRange():
#Round down by MonitoringInterval
endTime = (int(time.time()) / MonitoringInterval) * MonitoringInterval
endTime = endTime - AzureTableDelay
startTime = endTime - MonitoringInterval
identity = getIdentity()
startKey = getMDSPartitionKey(identity, getMDSTimestamp(startTime))
endKey = getMDSPartitionKey(identity, getMDSTimestamp(endTime))
return startKey, endKey
def getAzureDiagnosticCPUData(accountName, accountKey, hostBase,
startKey, endKey, hostname):
try:
waagent.Log("Retrieve diagnostic data(CPU).")
table = "LinuxPerfCpuVer1v0"
tableService = TableService(account_name = accountName,
account_key = accountKey,
host_base = hostBase)
ofilter = ("PartitionKey ge '{0}' and PartitionKey lt '{1}' "
"and Host eq '{2}'").format(startKey, endKey, hostname)
oselect = ("PercentProcessorTime,Host")
data = tableService.query_entities(table, ofilter, oselect, 1)
if data is None or len(data) == 0:
return None
cpuPercent = float(data[0].PercentProcessorTime)
return cpuPercent
except Exception, e:
waagent.Error(("Failed to retrieve diagnostic data(CPU): {0} {1}"
"").format(printable(e), traceback.format_exc()))
AddExtensionEvent(message=FAILED_TO_RETRIEVE_MDS_DATA)
return None
def getAzureDiagnosticMemoryData(accountName, accountKey, hostBase,
startKey, endKey, hostname):
try:
waagent.Log("Retrieve diagnostic data: Memory")
table = "LinuxPerfMemVer1v0"
tableService = TableService(account_name = accountName,
account_key = accountKey,
host_base = hostBase)
ofilter = ("PartitionKey ge '{0}' and PartitionKey lt '{1}' "
"and Host eq '{2}'").format(startKey, endKey, hostname)
oselect = ("PercentAvailableMemory,Host")
data = tableService.query_entities(table, ofilter, oselect, 1)
if data is None or len(data) == 0:
return None
memoryPercent = 100 - float(data[0].PercentAvailableMemory)
return memoryPercent
except Exception, e:
waagent.Error(("Failed to retrieve diagnostic data(Memory): {0} {1}"
"").format(printable(e), traceback.format_exc()))
AddExtensionEvent(message=FAILED_TO_RETRIEVE_MDS_DATA)
return None
class AzureDiagnosticData(object):
def __init__(self, config):
self.config = config
accountName = config.getLADName()
accountKey = config.getLADKey()
hostBase = config.getLADHostBase()
hostname = socket.gethostname()
startKey, endKey = getAzureDiagnosticKeyRange()
self.cpuPercent = getAzureDiagnosticCPUData(accountName,
accountKey,
hostBase,
startKey,
endKey,
hostname)
self.memoryPercent = getAzureDiagnosticMemoryData(accountName,
accountKey,
hostBase,
startKey,
endKey,
hostname)
def getCPUPercent(self):
return self.cpuPercent
def getMemoryPercent(self):
return self.memoryPercent
class AzureDiagnosticMetric(object):
def __init__(self, config):
self.config = config
self.linux = LinuxMetric(self.config)
self.azure = AzureDiagnosticData(self.config)
self.timestamp = int(time.time()) - AzureTableDelay
def getTimestamp(self):
return self.timestamp
def getCurrHwFrequency(self):
return self.linux.getCurrHwFrequency()
def getMaxHwFrequency(self):
return self.linux.getMaxHwFrequency()
def getCurrVMProcessingPower(self):
return self.linux.getCurrVMProcessingPower()
def getGuaranteedVMProcessingPower(self):
return self.linux.getGuaranteedVMProcessingPower()
def getMaxVMProcessingPower(self):
return self.linux.getMaxVMProcessingPower()
def getNumOfCoresPerCPU(self):
return self.linux.getNumOfCoresPerCPU()
def getNumOfThreadsPerCore(self):
return self.linux.getNumOfThreadsPerCore()
def getPhysProcessingPowerPerVCPU(self):
return self.linux.getPhysProcessingPowerPerVCPU()
def getProcessorType(self):
return self.linux.getProcessorType()
def getReferenceComputeUnit(self):
return self.linux.getReferenceComputeUnit()
def getVCPUMapping(self):
return self.linux.getVCPUMapping()
def getVMProcessingPowerConsumption(self):
return self.azure.getCPUPercent()
def getCurrMemAssigned(self):
return self.linux.getCurrMemAssigned()
def getGuaranteedMemAssigned(self):
return self.linux.getGuaranteedMemAssigned()
def getMaxMemAssigned(self):
return self.linux.getMaxMemAssigned()
def getVMMemConsumption(self):
return self.azure.getMemoryPercent()
def getNetworkAdapterIds(self):
return self.linux.getNetworkAdapterIds()
def getNetworkAdapterMapping(self, adapterId):
return self.linux.getNetworkAdapterMapping(adapterId)
def getMaxNetworkBandwidth(self, adapterId):
return self.linux.getMaxNetworkBandwidth(adapterId)
def getMinNetworkBandwidth(self, adapterId):
return self.linux.getMinNetworkBandwidth(adapterId)
def getNetworkReadBytes(self):
return self.linux.getNetworkReadBytes()
def getNetworkWriteBytes(self):
return self.linux.getNetworkWriteBytes()
def getNetworkPacketRetransmitted(self):
return self.linux.getNetworkPacketRetransmitted()
def getLastHardwareChange(self):
return self.linux.getLastHardwareChange()
class CPUInfo(object):
@staticmethod
def getCPUInfo():
cpuinfo = waagent.GetFileContents("/proc/cpuinfo")
ret, lscpu = waagent.RunGetOutput("lscpu")
return CPUInfo(cpuinfo, lscpu)
def __init__(self, cpuinfo, lscpu):
self.cpuinfo = cpuinfo
self.lscpu = lscpu
self.cores = 1;
self.coresPerCpu = 1;
self.threadsPerCore = 1;
coresMatch = re.search("CPU(s):\s+(\d+)", self.lscpu)
if coresMatch:
self.cores = int(coresMatch.group(1))
coresPerCpuMatch = re.search("Core(s) per socket:\s+(\d+)", self.lscpu)
if coresPerCpuMatch:
self.coresPerCpu = int(coresPerCpuMatch.group(1))
threadsPerCoreMatch = re.search("Core(s) per socket:\s+(\d+)", self.lscpu)
if threadsPerCoreMatch:
self.threadsPerCore = int(threadsPerCoreMatch.group(1))
model = re.search("model name\s+:\s+(.*)\s", self.cpuinfo)
vendorId = re.search("vendor_id\s+:\s+(.*)\s", self.cpuinfo)
if model and vendorId:
self.processorType = "{0}, {1}".format(model.group(1),
vendorId.group(1))
else:
self.processorType = None
freqMatch = re.search("CPU MHz:\s+(.*)\s", self.lscpu)
if freqMatch:
self.frequency = float(freqMatch.group(1))
else:
self.frequency = None
ht = re.match("flags\s.*\sht\s", self.cpuinfo)
self.isHTon = ht is not None
def getNumOfCoresPerCPU(self):
return self.coresPerCpu
def getNumOfCores(self):
return self.cores
def getNumOfThreadsPerCore(self):
return self.threadsPerCore
def getProcessorType(self):
return self.processorType
def getFrequency(self):
return self.frequency
def isHyperThreadingOn(self):
return self.isHTon
def getCPUPercent(self):
return psutil.cpu_percent()
class MemoryInfo(object):
def __init__(self):
self.memInfo = psutil.virtual_memory()
def getMemSize(self):
return self.memInfo[0] / 1024 / 1024 #MB
def getMemPercent(self):
return self.memInfo[2] #%
def getMacAddress(adapterId):
nicAddrPath = os.path.join("/sys/class/net", adapterId, "address")
mac = waagent.GetFileContents(nicAddrPath)
mac = mac.strip()
mac = mac.replace(":", "-")
return mac
def sameList(l1, l2):
if l1 is None or l2 is None:
return l1 == l2
if len(l1) != len(l2):
return False
for i in range(0, len(l1)):
if l1[i] != l2[i]:
return False
return True
class NetworkInfo(object):
def __init__(self):
self.nics = psutil.net_io_counters(pernic=True)
self.nicNames = []
self.readBytes = 0
self.writeBytes = 0
for nicName, stat in self.nics.iteritems():
if nicName != 'lo':
self.nicNames.append(nicName)
self.readBytes = self.readBytes + stat[1] #bytes_recv
self.writeBytes = self.writeBytes + stat[0] #bytes_sent
def getAdapterIds(self):
return self.nicNames
def getNetworkReadBytes(self):
return self.readBytes
def getNetworkWriteBytes(self):
return self.writeBytes
def getNetstat(self):
retCode, output = waagent.RunGetOutput("netstat -s", chk_err=False)
return output
def getNetworkPacketRetransmitted(self):
netstat = self.getNetstat()
match = re.search("(\d+)\s*segments retransmited", netstat)
if match != None:
return int(match.group(1))
else:
waagent.Error("Failed to parse netstat output: {0}".format(netstat))
AddExtensionEvent(message=FAILED_TO_RETRIEVE_LOCAL_DATA)
return None
HwInfoFile = os.path.join(LibDir, "HwInfo")
class HardwareChangeInfo(object):
def __init__(self, networkInfo):
self.networkInfo = networkInfo
def getHwInfo(self):
if not os.path.isfile(HwInfoFile):
return None, None
hwInfo = waagent.GetFileContents(HwInfoFile).split("\n")
return int(hwInfo[0]), hwInfo[1:]
def setHwInfo(self, timestamp, hwInfo):
content = str(timestamp)
content = content + "\n" + "\n".join(hwInfo)
waagent.SetFileContents(HwInfoFile, content)
def getLastHardwareChange(self):
oldTime, oldMacs = self.getHwInfo()
newMacs = map(lambda x : getMacAddress(x),
self.networkInfo.getAdapterIds())
newTime = int(time.time())
newMacs.sort()
if oldMacs is None or not sameList(newMacs, oldMacs):
#Hardware changed
if newTime < oldTime:
waagent.Warn(("Hardware change detected. But the old timestamp "
"is greater than now, {0}>{1}.").format(oldTime,
newTime))
self.setHwInfo(newTime, newMacs)
return newTime
else:
return oldTime
class LinuxMetric(object):
def __init__(self, config):
self.config = config
#CPU
self.cpuInfo = CPUInfo.getCPUInfo()
#Memory
self.memInfo = MemoryInfo()
#Network
self.networkInfo = NetworkInfo()
#Detect hardware change
self.hwChangeInfo = HardwareChangeInfo(self.networkInfo)
self.timestamp = int(time.time())
def getTimestamp(self):
return self.timestamp
def getCurrHwFrequency(self):
return self.cpuInfo.getFrequency()
def getMaxHwFrequency(self):
return self.getCurrHwFrequency()
def getCurrVMProcessingPower(self):
if self.config.isCpuOverCommitted():
return None
else:
return self.cpuInfo.getNumOfCores()
def getGuaranteedVMProcessingPower(self):
return self.getCurrVMProcessingPower()
def getMaxVMProcessingPower(self):
return self.getCurrVMProcessingPower()
def getNumOfCoresPerCPU(self):
return self.cpuInfo.getNumOfCoresPerCPU()
def getNumOfThreadsPerCore(self):
return self.cpuInfo.getNumOfThreadsPerCore()
def getPhysProcessingPowerPerVCPU(self):
return 1 / float(self.getNumOfThreadsPerCore())
def getProcessorType(self):
return self.cpuInfo.getProcessorType()
def getReferenceComputeUnit(self):
return self.getProcessorType()
def getVCPUMapping(self):
return "thread" if self.cpuInfo.isHyperThreadingOn() else "core"
def getVMProcessingPowerConsumption(self):
return self.memInfo.getMemPercent()
def getCurrMemAssigned(self):
if self.config.isMemoryOverCommitted():
return None
else:
return self.memInfo.getMemSize()
def getGuaranteedMemAssigned(self):
return self.getCurrMemAssigned()
def getMaxMemAssigned(self):
return self.getCurrMemAssigned()
def getVMMemConsumption(self):
return self.memInfo.getMemPercent()
def getNetworkAdapterIds(self):
return self.networkInfo.getAdapterIds()
def getNetworkAdapterMapping(self, adapterId):
return getMacAddress(adapterId)
def getMaxNetworkBandwidth(self, adapterId):
return 1000 #Mbit/s
def getMinNetworkBandwidth(self, adapterId):
return 1000 #Mbit/s
def getNetworkReadBytes(self):
return self.networkInfo.getNetworkReadBytes()
def getNetworkWriteBytes(self):
return self.networkInfo.getNetworkWriteBytes()
def getNetworkPacketRetransmitted(self):
return self.networkInfo.getNetworkPacketRetransmitted()
def getLastHardwareChange(self):
return self.hwChangeInfo.getLastHardwareChange()
class VMDataSource(object):
def __init__(self, config):
self.config = config
def collect(self):
counters = []
if self.config.isLADEnabled():
metrics = AzureDiagnosticMetric(self.config)
else:
metrics = LinuxMetric(self.config)
#CPU
counters.append(self.createCounterCurrHwFrequency(metrics))
counters.append(self.createCounterMaxHwFrequency(metrics))
counters.append(self.createCounterCurrVMProcessingPower(metrics))
counters.append(self.createCounterGuaranteedVMProcessingPower(metrics))
counters.append(self.createCounterMaxVMProcessingPower(metrics))
counters.append(self.createCounterNumOfCoresPerCPU(metrics))
counters.append(self.createCounterNumOfThreadsPerCore(metrics))
counters.append(self.createCounterPhysProcessingPowerPerVCPU(metrics))
counters.append(self.createCounterProcessorType(metrics))
counters.append(self.createCounterReferenceComputeUnit(metrics))
counters.append(self.createCounterVCPUMapping(metrics))
counters.append(self.createCounterVMProcessingPowerConsumption(metrics))
#Memory
counters.append(self.createCounterCurrMemAssigned(metrics))
counters.append(self.createCounterGuaranteedMemAssigned(metrics))
counters.append(self.createCounterMaxMemAssigned(metrics))
counters.append(self.createCounterVMMemConsumption(metrics))
#Network
adapterIds = metrics.getNetworkAdapterIds()
for adapterId in adapterIds:
counters.append(self.createCounterAdapterId(adapterId))
counters.append(self.createCounterNetworkMapping(metrics, adapterId))
counters.append(self.createCounterMinNetworkBandwidth(metrics,
adapterId))
counters.append(self.createCounterMaxNetworkBandwidth(metrics,
adapterId))
counters.append(self.createCounterNetworkReadBytes(metrics))
counters.append(self.createCounterNetworkWriteBytes(metrics))
counters.append(self.createCounterNetworkPacketRetransmitted(metrics))
#Hardware change
counters.append(self.createCounterLastHardwareChange(metrics))
return counters
def createCounterLastHardwareChange(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,
category = "config",
name = "Last Hardware Change",
value = metrics.getLastHardwareChange(),
unit="posixtime")
def createCounterCurrHwFrequency(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "cpu",
name = "Current Hw Frequency",
value = metrics.getCurrHwFrequency(),
unit = "MHz",
refreshInterval = 60)
def createCounterMaxHwFrequency(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "cpu",
name = "Max Hw Frequency",
value = metrics.getMaxHwFrequency(),
unit = "MHz")
def createCounterCurrVMProcessingPower(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "cpu",
name = "Current VM Processing Power",
value = metrics.getCurrVMProcessingPower(),
unit = "compute unit")
def createCounterMaxVMProcessingPower(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "cpu",
name = "Max. VM Processing Power",
value = metrics.getMaxVMProcessingPower(),
unit = "compute unit")
def createCounterGuaranteedVMProcessingPower(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "cpu",
name = "Guaranteed VM Processing Power",
value = metrics.getGuaranteedVMProcessingPower(),
unit = "compute unit")
def createCounterNumOfCoresPerCPU(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "cpu",
name = "Number of Cores per CPU",
value = metrics.getNumOfCoresPerCPU())
def createCounterNumOfThreadsPerCore(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "cpu",
name = "Number of Threads per Core",
value = metrics.getNumOfThreadsPerCore())
def createCounterPhysProcessingPowerPerVCPU(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "cpu",
name = "Phys. Processing Power per vCPU",
value = metrics.getPhysProcessingPowerPerVCPU())
def createCounterProcessorType(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "cpu",
name = "Processor Type",
value = metrics.getProcessorType())
def createCounterReferenceComputeUnit(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "cpu",
name = "Reference Compute Unit",
value = metrics.getReferenceComputeUnit())
def createCounterVCPUMapping(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "cpu",
name = "vCPU Mapping",
value = metrics.getVCPUMapping())
def createCounterVMProcessingPowerConsumption(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "cpu",
name = "VM Processing Power Consumption",
value = metrics.getVMProcessingPowerConsumption(),
unit = "%",
timestamp = metrics.getTimestamp(),
refreshInterval = 60)
def createCounterCurrMemAssigned(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "memory",
name = "Current Memory assigned",
value = metrics.getCurrMemAssigned(),
unit = "MB")
def createCounterMaxMemAssigned(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "memory",
name = "Max Memory assigned",
value = metrics.getMaxMemAssigned(),
unit = "MB")
def createCounterGuaranteedMemAssigned(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "memory",
name = "Guaranteed Memory assigned",
value = metrics.getGuaranteedMemAssigned(),
unit = "MB")
def createCounterVMMemConsumption(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "memory",
name = "VM Memory Consumption",
value = metrics.getVMMemConsumption(),
unit = "%",
timestamp = metrics.getTimestamp(),
refreshInterval = 60)
def createCounterAdapterId(self, adapterId):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "network",
name = "Adapter Id",
instance = adapterId,
value = adapterId)
def createCounterNetworkMapping(self, metrics, adapterId):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "network",
name = "Mapping",
instance = adapterId,
value = metrics.getNetworkAdapterMapping(adapterId))
def createCounterMaxNetworkBandwidth(self, metrics, adapterId):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "network",
name = "Maximum Network Bandwidth",
instance = adapterId,
value = metrics.getMaxNetworkBandwidth(adapterId),
unit = "Mbit/s")
def createCounterMinNetworkBandwidth(self, metrics, adapterId):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "network",
name = "Minimum Network Bandwidth",
instance = adapterId,
value = metrics.getMinNetworkBandwidth(adapterId),
unit = "Mbit/s")
def createCounterNetworkReadBytes(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,
category = "network",
name = "Network Read Bytes",
value = metrics.getNetworkReadBytes(),
unit = "byte/s")
def createCounterNetworkWriteBytes(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,
category = "network",
name = "Network Write Bytes",
value = metrics.getNetworkWriteBytes(),
unit = "byte/s")
def createCounterNetworkPacketRetransmitted(self, metrics):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "network",
name = "Packets Retransmitted",
value = metrics.getNetworkPacketRetransmitted(),
unit = "packets/min")
def getStorageTimestamp(unixTimestamp):
tformat = "{0:0>4d}{1:0>2d}{2:0>2d}T{3:0>2d}{4:0>2d}"
ts = time.gmtime(unixTimestamp)
return tformat.format(ts.tm_year,
ts.tm_mon,
ts.tm_mday,
ts.tm_hour,
ts.tm_min)
def getStorageTableKeyRange():
#Round down by MonitoringInterval
endTime = int(time.time()) / MonitoringInterval * MonitoringInterval
endTime = endTime - AzureTableDelay
startTime = endTime - MonitoringInterval
return getStorageTimestamp(startTime), getStorageTimestamp(endTime)
def getStorageMetrics(account, key, hostBase, table, startKey, endKey):
try:
waagent.Log("Retrieve storage metrics data.")
tableService = TableService(account_name = account,
account_key = key,
host_base = hostBase)
ofilter = ("PartitionKey ge '{0}' and PartitionKey lt '{1}'"
"").format(startKey, endKey)
oselect = ("TotalRequests,TotalIngress,TotalEgress,AverageE2ELatency,"
"AverageServerLatency,RowKey")
metrics = tableService.query_entities(table, ofilter, oselect)
waagent.Log("{0} records returned.".format(len(metrics)))
return metrics
except Exception, e:
waagent.Error(("Failed to retrieve storage metrics data: {0} {1}"
"").format(printable(e), traceback.format_exc()))
AddExtensionEvent(message=FAILED_TO_RETRIEVE_STORAGE_DATA)
return None
def getDataDisks():
blockDevs = os.listdir('/sys/block')
dataDisks = filter(lambda d : re.match("sd[c-z]", d), blockDevs)
return dataDisks
def getFirstLun(dev):
path = os.path.join("/sys/block", dev, "device/scsi_disk")
for lun in os.listdir(path):
return int(lun[-1])
class DiskInfo(object):
def __init__(self, config):
self.config = config
def getDiskMapping(self):
osdiskVhd = "{0} {1}".format(self.config.getOSDiskAccount(),
self.config.getOSDiskName())
diskMapping = {
"/dev/sda": osdiskVhd,
"/dev/sdb": "not mapped to vhd"
}
dataDisks = getDataDisks()
if dataDisks is None or len(dataDisks) == 0:
return diskMapping
lunToDevMap = {}
for dev in dataDisks:
lun = getFirstLun(dev)
lunToDevMap[lun] = dev
diskCount = self.config.getDataDiskCount()
for i in range(0, diskCount):
lun = self.config.getDataDiskLun(i)
vhd = "{0} {1}".format(self.config.getDataDiskAccount(i),
self.config.getDataDiskName(i))
if lun in lunToDevMap:
dev = lunToDevMap[lun]
diskMapping[dev] = vhd
else:
waagent.Warn("Couldn't find disk with lun: {0}".format(lun))
return diskMapping
def isUserRead(op):
if not op.startswith("user;"):
return False
op = op[5:]
for prefix in ["Get", "List", "Preflight"]:
if op.startswith(prefix):
return True
return False
def isUserWrite(op):
if not op.startswith("user;"):
return False
op = op[5:]
for prefix in ["Put" ,"Set" ,"Clear" ,"Delete" ,"Create" ,"Snapshot"]:
if op.startswith(prefix):
return True
return False
def storageStat(metrics, opFilter):
stat = {}
stat['bytes'] = None
stat['ops'] = None
stat['e2eLatency'] = None
stat['serverLatency'] = None
stat['throughput'] = None
if metrics is None:
return stat
metrics = filter(lambda x : opFilter(x.RowKey), metrics)
stat['bytes'] = sum(map(lambda x : x.TotalIngress + x.TotalEgress,
metrics))
stat['ops'] = sum(map(lambda x : x.TotalRequests, metrics))
if stat['ops'] != 0:
stat['e2eLatency'] = sum(map(lambda x : x.TotalRequests * \
x.AverageE2ELatency,
metrics)) / stat['ops']
stat['serverLatency'] = sum(map(lambda x : x.TotalRequests * \
x.AverageServerLatency,
metrics)) / stat['ops']
#Convert to MB/s
stat['throughput'] = float(stat['bytes']) / (1024 * 1024) / 60
return stat
class AzureStorageStat(object):
def __init__(self, metrics):
self.metrics = metrics
self.rStat = storageStat(metrics, isUserRead)
self.wStat = storageStat(metrics, isUserWrite)
def getReadBytes(self):
return self.rStat['bytes']
def getReadOps(self):
return self.rStat['ops']
def getReadOpE2ELatency(self):
return self.rStat['e2eLatency']
def getReadOpServerLatency(self):
return self.rStat['serverLatency']
def getReadOpThroughput(self):
return self.rStat['throughput']
def getWriteBytes(self):
return self.wStat['bytes']
def getWriteOps(self):
return self.wStat['ops']
def getWriteOpE2ELatency(self):
return self.wStat['e2eLatency']
def getWriteOpServerLatency(self):
return self.wStat['serverLatency']
def getWriteOpThroughput(self):
return self.wStat['throughput']
class StorageDataSource(object):
def __init__(self, config):
self.config = config
def collect(self):
counters = []
diskMapping = DiskInfo(self.config).getDiskMapping()
for dev, vhd in diskMapping.iteritems():
counters.append(self.createCounterDiskMapping(dev, vhd))
accounts = self.config.getStorageAccountNames()
startKey, endKey = getStorageTableKeyRange()
for account in accounts:
tableName = self.config.getStorageAccountMinuteTable(account)
accountKey = self.config.getStorageAccountKey(account)
hostBase = self.config.getStorageHostBase(account)
metrics = getStorageMetrics(account,
accountKey,
hostBase,
tableName,
startKey,
endKey)
stat = AzureStorageStat(metrics)
counters.append(self.createCounterStorageId(account))
counters.append(self.createCounterReadBytes(account, stat))
counters.append(self.createCounterReadOps(account, stat))
counters.append(self.createCounterReadOpE2ELatency(account, stat))
counters.append(self.createCounterReadOpServerLatency(account,
stat))
counters.append(self.createCounterReadOpThroughput(account, stat))
counters.append(self.createCounterWriteBytes(account, stat))
counters.append(self.createCounterWriteOps(account, stat))
counters.append(self.createCounterWriteOpE2ELatency(account, stat))
counters.append(self.createCounterWriteOpServerLatency(account,
stat))
counters.append(self.createCounterWriteOpThroughput(account, stat))
return counters
def createCounterReadBytes(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,
category = "storage",
name = "Storage Read Bytes",
instance = account,
value = stat.getReadBytes(),
unit = 'byte',
refreshInterval = 60)
def createCounterReadOps(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "storage",
name = "Storage Read Ops",
instance = account,
value = stat.getReadOps(),
refreshInterval = 60)
def createCounterReadOpE2ELatency(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "storage",
name = "Storage Read Op Latency E2E msec",
instance = account,
value = stat.getReadOpE2ELatency(),
unit = 'ms',
refreshInterval = 60)
def createCounterReadOpServerLatency(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "storage",
name = "Storage Read Op Latency Server msec",
instance = account,
value = stat.getReadOpServerLatency(),
unit = 'ms',
refreshInterval = 60)
def createCounterReadOpThroughput(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "storage",
name = "Storage Read Throughput E2E MB/sec",
instance = account,
value = stat.getReadOpThroughput(),
unit = 'MB/s',
refreshInterval = 60)
def createCounterWriteBytes(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_LARGE,
category = "storage",
name = "Storage Write Bytes",
instance = account,
value = stat.getWriteBytes(),
unit = 'byte',
refreshInterval = 60)
def createCounterWriteOps(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_INT,
category = "storage",
name = "Storage Write Ops",
instance = account,
value = stat.getWriteOps(),
refreshInterval = 60)
def createCounterWriteOpE2ELatency(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "storage",
name = "Storage Write Op Latency E2E msec",
instance = account,
value = stat.getWriteOpE2ELatency(),
unit = 'ms',
refreshInterval = 60)
def createCounterWriteOpServerLatency(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "storage",
name = "Storage Write Op Latency Server msec",
instance = account,
value = stat.getWriteOpServerLatency(),
unit = 'ms',
refreshInterval = 60)
def createCounterWriteOpThroughput(self, account, stat):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_DOUBLE,
category = "storage",
name = "Storage Write Throughput E2E MB/sec",
instance = account,
value = stat.getWriteOpThroughput(),
unit = 'MB/s',
refreshInterval = 60)
def createCounterStorageId(self, account):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "storage",
name = "Storage ID",
instance = account,
value = account)
def createCounterDiskMapping(self, dev, vhd):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "storage",
name = "Phys. Disc to Storage Mapping",
instance = dev,
value = vhd)
class HvInfo(object):
def __init__(self):
self.hvName = None;
self.hvVersion = None;
root_dir = os.path.dirname(__file__)
cmd = os.path.join(root_dir, "bin/hvinfo")
ret, output = waagent.RunGetOutput(cmd, chk_err=False)
print ret
if ret ==0 and output is not None:
lines = output.split("\n")
if len(lines) >= 2:
self.hvName = lines[0]
self.hvVersion = lines[1]
def getHvName(self):
return self.hvName
def getHvVersion(self):
return self.hvVersion
class StaticDataSource(object):
def __init__(self, config):
self.config = config
def collect(self):
counters = []
hvInfo = HvInfo()
counters.append(self.createCounterCloudProvider())
counters.append(self.createCounterCpuOverCommitted())
counters.append(self.createCounterMemoryOverCommitted())
counters.append(self.createCounterDataProviderVersion())
counters.append(self.createCounterDataSources())
counters.append(self.createCounterInstanceType())
counters.append(self.createCounterVirtSln(hvInfo.getHvName()))
counters.append(self.createCounterVirtSlnVersion(hvInfo.getHvVersion()))
return counters
def createCounterCloudProvider(self):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "config",
name = "Cloud Provider",
value = "Microsoft Azure")
def createCounterVirtSlnVersion(self, hvVersion):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "config",
name = "Virtualization Solution Version",
value = hvVersion)
def createCounterVirtSln(self, hvName):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "config",
name = "Virtualization Solution",
value = hvName)
def createCounterInstanceType(self):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "config",
name = "Instance Type",
value = self.config.getVmSize())
def createCounterDataSources(self):
dataSource = "wad" if self.config.isLADEnabled() else "local"
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "config",
name = "Data Sources",
value = dataSource)
def createCounterDataProviderVersion(self):
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "config",
name = "Data Provider Version",
value = AzureEnhancedMonitorVersion)
def createCounterMemoryOverCommitted(self):
value = "yes" if self.config.isMemoryOverCommitted() else "no"
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "config",
name = "Memory Over-Provisioning",
value = value)
def createCounterCpuOverCommitted(self):
value = "yes" if self.config.isCpuOverCommitted() else "no"
return PerfCounter(counterType = PerfCounterType.COUNTER_TYPE_STRING,
category = "config",
name = "CPU Over-Provisioning",
value = value)
class PerfCounterType(object):
COUNTER_TYPE_INVALID = 0
COUNTER_TYPE_INT = 1
COUNTER_TYPE_DOUBLE = 2
COUNTER_TYPE_LARGE = 3
COUNTER_TYPE_STRING = 4
class PerfCounter(object):
def __init__(self,
counterType,
category,
name,
value,
instance="",
unit="none",
timestamp = None,
refreshInterval=0):
self.counterType = counterType
self.category = category
self.name = name
self.instance = instance
self.value = value
self.unit = unit
self.refreshInterval = refreshInterval
if(timestamp):
self.timestamp = timestamp
else:
self.timestamp = int(time.time())
self.machine = socket.gethostname()
def __str__(self):
return (u"{0};{1};{2};{3};{4};{5};{6};{7};{8};{9};\n"
"").format(self.counterType,
self.category,
self.name,
self.instance,
0 if self.value is not None else 1,
self.value if self.value is not None else "",
self.unit,
self.refreshInterval,
self.timestamp,
self.machine)
__repr__ = __str__
class EnhancedMonitor(object):
def __init__(self, config):
self.dataSources = []
self.dataSources.append(VMDataSource(config))
self.dataSources.append(StorageDataSource(config))
self.dataSources.append(StaticDataSource(config))
self.writer = PerfCounterWriter()
def run(self):
counters = []
for dataSource in self.dataSources:
counters.extend(dataSource.collect())
self.writer.write(counters)
EventFile=os.path.join(LibDir, "PerfCounters")
class PerfCounterWriter(object):
def write(self, counters, maxRetry = 3, eventFile=EventFile):
for i in range(0, maxRetry):
try:
self._write(counters, eventFile)
waagent.Log(("Write {0} counters to event file."
"").format(len(counters)))
return
except IOError, e:
waagent.Warn(("Write to perf counters file failed: {0}"
"").format(printable(e)))
waagent.Log("Retry: {0}".format(i))
time.sleep(1)
waagent.Error(("Failed to serialize perf counter to file:"
"{0}").format(eventFile))
AddExtensionEvent(message=FAILED_TO_SERIALIZE_PERF_COUNTERS)
raise
def _write(self, counters, eventFile):
with open(eventFile, "w+") as F:
F.write("".join(map(lambda c : str(c), counters)).encode("utf8"))
class EnhancedMonitorConfig(object):
def __init__(self, privateConfig, publicConfig):
self.configData = {}
diskCount = 0
accountNames = []
for item in publicConfig["cfg"]:
self.configData[item["key"]] = item["value"]
if item["key"].startswith("disk.lun"):
diskCount = diskCount + 1
for item in privateConfig["cfg"]:
self.configData[item["key"]] = item["value"]
if item["key"].endswith("minute.name"):
accountNames.append(item["value"])
self.configData["disk.count"] = diskCount
self.configData["account.names"] = accountNames
def getVmSize(self):
return self.configData["vmsize"]
def getVmRoleInstance(self):
return self.configData["vm.roleinstance"]
def getVmDeploymentId(self):
return self.configData["vm.depoymentId"]
def isMemoryOverCommitted(self):
return self.configData["vm.memory.isovercommitted"]
def isCpuOverCommitted(self):
return self.configData["vm.cpu.isovercommitted"]
def getScriptVersion(self):
return self.configData["script.version"]
def isVerbose(self):
flag = self.configData["verbose"]
return flag == "1" or flag == 1
def getOSDiskName(self):
return self.configData["osdisk.name"]
def getOSDiskAccount(self):
osdiskConnMinute = self.getOSDiskConnMinute()
return self.configData["{0}.name".format(osdiskConnMinute)]
def getOSDiskConnMinute(self):
return self.configData["osdisk.connminute"]
def getOSDiskConnHour(self):
return self.configData["osdisk.connhour"]
def getDataDiskCount(self):
return self.configData["disk.count"]
def getDataDiskLun(self, index):
return self.configData["disk.lun.{0}".format(index)]
def getDataDiskName(self, index):
return self.configData["disk.name.{0}".format(index)]
def getDataDiskAccount(self, index):
return self.configData["disk.account.{0}".format(index)]
def getDataDiskConnMinute(self, index):
return self.configData["disk.connminute.{0}".format(index)]
def getDataDiskConnHour(self, index):
return self.configData["disk.connhour.{0}".format(index)]
def getStorageAccountNames(self):
return self.configData["account.names"]
def getStorageAccountKey(self, name):
return self.configData["{0}.minute.key".format(name)]
def getStorageHostBase(self, name):
return get_host_base_from_uri(self.getStorageAccountMinuteUri(name))
def getStorageAccountMinuteUri(self, name):
return self.configData["{0}.minute.uri".format(name)]
def getStorageAccountMinuteTable(self, name):
uri = self.getStorageAccountMinuteUri(name)
pos = uri.rfind('/')
tableName = uri[pos+1:]
return tableName
def getStorageAccountHourUri(self, name):
return self.configData["{0}.hour.uri".format(name)]
def isLADEnabled(self):
flag = self.configData["wad.isenabled"]
return flag == "1" or flag == 1
def getLADKey(self):
return self.configData["wad.key"]
def getLADName(self):
return self.configData["wad.name"]
def getLADHostBase(self):
return get_host_base_from_uri(self.getLADUri())
def getLADUri(self):
return self.configData["wad.uri"]
|
|
from swiftype import swiftype
import os
import time
import unittest2 as unittest
from six.moves.urllib_parse import urlparse, parse_qs
import vcr
from mock import Mock
class TestClientFunctions(unittest.TestCase):
def setUp(self):
try:
api_key = os.environ['API_KEY']
except:
api_key = "a-test-api-key"
self.client = swiftype.Client(api_key=api_key, host='localhost:3000')
self.engine = 'api-test'
self.document_type = 'books'
def test_engines(self):
with vcr.use_cassette('fixtures/engines.yaml'):
self.__is_expected_collection(self.client.engines, 200, 3, {'slug': 'api-test'})
def test_engine(self):
with vcr.use_cassette('fixtures/engine.yaml'):
slug = self.client.engine(self.engine)['body']['slug']
self.assertEqual(slug, self.engine)
def test_engine_create(self):
with vcr.use_cassette('fixtures/engine_create.yaml'):
engine = 'myengine'
slug = self.client.create_engine(engine)['body']['slug']
self.assertEqual(slug, engine)
def test_engine_destroy(self):
with vcr.use_cassette('fixtures/engine_destroy.yaml'):
engine = 'myengine'
response = self.client.destroy_engine(engine)
self.assertEqual(response['status'], 204)
def test_document_types(self):
with vcr.use_cassette('fixtures/document_types.yaml'):
self.__is_expected_collection(self.client.document_types, 200, 2, {'slug': 'books'}, self.engine)
def test_document_type(self):
with vcr.use_cassette('fixtures/document_type.yaml'):
self.__is_expected_result(self.client.document_type, 200, {'slug': self.document_type}, self.engine, self.document_type)
def test_create_document_type(self):
with vcr.use_cassette('fixtures/create_document_type.yaml'):
document_type = 'videos'
slug = self.client.create_document_type(self.engine, document_type)['body']['slug']
self.assertEqual(slug, document_type)
def test_destroy_document_type(self):
with vcr.use_cassette('fixtures/destroy_document_type.yaml'):
document_type = 'videos'
response = self.client.destroy_document_type(self.engine, document_type)
self.assertEqual(response['status'], 204)
def test_documents(self):
with vcr.use_cassette('fixtures/documents.yaml'):
self.__is_expected_collection(self.client.documents, 200, 2, {'external_id': '1'}, self.engine, self.document_type)
def test_documents_pagination(self):
with vcr.use_cassette('fixtures/documents_pagination.yaml'):
self.__is_expected_collection(self.client.documents, 200, 2, {'external_id': '1'}, self.engine, self.document_type, 2, 10)
def test_document(self):
with vcr.use_cassette('fixtures/document.yaml'):
external_id = '1'
id = self.client.document(self.engine, self.document_type, external_id)['body']['external_id']
self.assertEqual(id, external_id)
def test_create_document(self):
with vcr.use_cassette('fixtures/create_document.yaml'):
doc_id = 'doc_id'
id = self.client.create_document(self.engine, self.document_type, {'external_id': doc_id})['body']['external_id']
self.assertEqual(id, doc_id)
def test_create_documents(self):
with vcr.use_cassette('fixtures/create_documents.yaml'):
docs = [{'external_id': 'doc_id1'}, {'external_id': 'doc_id2'}]
stati = self.client.create_documents(self.engine, self.document_type, docs)['body']
self.assertEqual(stati, [True, True])
def test_create_or_update_document(self):
with vcr.use_cassette('fixtures/create_or_update_document.yaml'):
id = '1'
external_id = self.client.create_or_update_document(self.engine, self.document_type, {'external_id': id, 'fields': {}})['body']['external_id']
self.assertEqual(external_id, id)
def test_create_or_update_documents(self):
with vcr.use_cassette('fixtures/create_or_update_documents.yaml'):
docs = [{'external_id': '1'}, {'external_id': '2'}]
stati = self.client.create_or_update_documents(self.engine, self.document_type, docs)['body']
self.assertEqual(stati, [True, True])
def test_create_or_update_documents_failure(self):
with vcr.use_cassette('fixtures/create_or_update_documents_failure.yaml'):
docs = [{'external_id': '1', 'fields': [{'type': 'string', 'name': 'title'}]}] # <= missing 'value'
stati = self.client.create_or_update_documents(self.engine, self.document_type, docs)['body']
self.assertEqual(stati, [False])
def test_create_or_update_documents_verbose(self):
with vcr.use_cassette('fixtures/create_or_update_documents_verbose.yaml'):
docs = [{'external_id': '1'}, {'external_id': '2'}]
stati = self.client.create_or_update_documents_verbose(self.engine, self.document_type, docs)['body']
self.assertEqual(stati, [True, True])
def test_create_or_update_documents_verbose_failure(self):
with vcr.use_cassette('fixtures/create_or_update_documents_verbose_failure.yaml'):
docs = [{'external_id': '1', 'fields': [{'type': 'string', 'name': 'title'}]}] # <= missing 'value'
stati = self.client.create_or_update_documents_verbose(self.engine, self.document_type, docs)['body']
self.assertRegexpMatches(stati[0], r'^Invalid field definition')
def test_update_document(self):
with vcr.use_cassette('fixtures/update_document.yaml'):
document_id = '2'
id = self.client.update_document(self.engine, self.document_type, document_id, {'title': 'a new title'})['body']['external_id']
self.assertEqual(id, document_id)
def test_update_documents(self):
with vcr.use_cassette('fixtures/update_documents.yaml'):
documents = [ {'external_id': '1', 'fields': { 'myfieldthathasnotbeencreated': 'foobar' }},
{'external_id': '2', 'fields': { 'title': 'new title' }} ]
stati = self.client.update_documents(self.engine, self.document_type, documents)['body']
self.assertEqual(stati, [False, True])
def test_destroy_document(self):
with vcr.use_cassette('fixtures/destroy_document.yaml'):
response = self.client.destroy_document(self.engine, self.document_type, 'doc_id')
self.assertEqual(response['status'], 204)
def test_destroy_documents(self):
with vcr.use_cassette('fixtures/destroy_documents.yaml'):
documents = ['doc_id1', 'doc_id2']
stati = self.client.destroy_documents(self.engine, self.document_type, documents)['body']
self.assertEqual(stati, [True, True])
def test_search(self):
with vcr.use_cassette('fixtures/search.yaml'):
total_count = len(self.client.document_types(self.engine)['body'])
self.assertTrue(total_count > 1)
self.__is_expected_search_result(self.client.search, total_count)
def test_search_with_options(self):
with vcr.use_cassette('fixtures/search_with_options.yaml'):
total_count = len(self.client.document_types(self.engine)['body'])
self.assertTrue(total_count > 1)
response = self.client.search(self.engine, 'query', {'page': 2})
self.assertEqual(len(response['body']['records']), total_count)
def test_search_document_type(self):
with vcr.use_cassette('fixtures/search_document_type.yaml'):
self.__is_expected_search_result(self.client.search_document_type, 1, [self.document_type])
def test_search_document_type_with_options(self):
with vcr.use_cassette('fixtures/search_document_type_with_options.yaml'):
response = self.client.search_document_type(self.engine, self.document_type, "query", {'page': 2})
self.assertEqual(len(response['body']['records']), 1)
def test_suggest(self):
with vcr.use_cassette('fixtures/suggest.yaml'):
total_count = len(self.client.document_types(self.engine)['body'])
self.assertTrue(total_count > 1)
self.__is_expected_search_result(self.client.suggest, total_count)
def test_suggest_with_options(self):
with vcr.use_cassette('fixtures/suggest_with_options.yaml'):
total_count = len(self.client.document_types(self.engine)['body'])
self.assertTrue(total_count > 1)
response = self.client.suggest(self.engine, 'query', {'page': 2})
self.assertEqual(len(response['body']['records']), total_count)
def test_suggest_document_type(self):
with vcr.use_cassette('fixtures/suggest_document_type.yaml'):
self.__is_expected_search_result(self.client.suggest_document_type, 1, [self.document_type])
def test_suggest_document_type_with_options(self):
with vcr.use_cassette('fixtures/suggest_document_type_with_options.yaml'):
response = self.client.suggest_document_type(self.engine, self.document_type, "query", {'page': 2})
self.assertEqual(len(response['body']['records']), 1)
def test_analytics_searches(self):
with vcr.use_cassette('fixtures/analytics_searches.yaml'):
searches = self.client.analytics_searches(self.engine)['body']
self.assertTrue(len(searches) == 15)
def test_analytics_searches_pagination(self):
with vcr.use_cassette('fixtures/analytics_searches_pagination.yaml'):
searches = self.client.analytics_searches(self.engine, '2013-12-31', '2014-01-01')['body']
self.assertTrue(len(searches) == 2)
def test_analytics_autoselects(self):
with vcr.use_cassette('fixtures/analytics_autoselects.yaml'):
autoselects = self.client.analytics_autoselects(self.engine)['body']
self.assertTrue(len(autoselects) == 15)
def test_analytics_autoselects_pagination(self):
with vcr.use_cassette('fixtures/analytics_autoselects_pagination.yaml'):
autoselects = self.client.analytics_autoselects(self.engine, '2013-12-31', '2014-01-01')['body']
self.assertTrue(len(autoselects) == 2)
def test_analytics_top_queries(self):
with vcr.use_cassette('fixtures/analytics_top_queries.yaml'):
top_queries = self.client.analytics_top_queries(self.engine)['body']
self.assertTrue(len(top_queries) == 2)
def test_analytics_top_queries_pagination(self):
with vcr.use_cassette('fixtures/analytics_top_queries_pagination.yaml'):
top_queries = self.client.analytics_top_queries(self.engine, 2, 10)['body']
self.assertTrue(len(top_queries) == 2)
def test_analytics_top_queries_in_range(self):
with vcr.use_cassette('fixtures/analytics_top_queries_in_range.yaml'):
top_queries = self.client.analytics_top_queries_in_range(self.engine, '2013-12-31', '2014-01-01')['body']
self.assertTrue(len(top_queries) == 2)
def test_analytics_top_no_result_queries(self):
with vcr.use_cassette('fixtures/analytics_top_no_result_queries.yaml'):
autoselects = self.client.analytics_top_no_result_queries(self.engine)['body']
self.assertTrue(len(autoselects) == 2)
def test_analytics_top_no_result_queries_with_dates(self):
with vcr.use_cassette('fixtures/analytics_top_no_result_queries_with_dates.yaml'):
autoselects = self.client.analytics_top_no_result_queries(self.engine, '2013-12-31', '2014-01-01')['body']
self.assertTrue(len(autoselects) == 2)
def test_domains(self):
with vcr.use_cassette('fixtures/domains.yaml'):
domains = self.client.domains('crawler-demo')['body']
self.assertTrue(len(domains) == 2)
def test_domain(self):
with vcr.use_cassette('fixtures/domain.yaml'):
domain_id = '52c759423ae7403ec900003b'
domain = self.client.domain('crawler-demo', domain_id)['body']
self.assertEqual(domain['id'], domain_id)
def test_create_domain(self):
with vcr.use_cassette('fixtures/create_domain.yaml'):
url = 'http://www.example.com'
domain_url = self.client.create_domain('crawler-demo', url)['body']['submitted_url']
self.assertEqual(domain_url, url)
def test_destroy_domain(self):
with vcr.use_cassette('fixtures/destroy_domain.yaml'):
status = self.client.destroy_domain('crawler-demo', '52c759423ae7403ec900003b')['status']
self.assertEqual(status, 204)
def test_recrawl_domain(self):
with vcr.use_cassette('fixtures/recrawl_domain.yaml'):
domain_id = '52c754fb3ae7406fd3000001'
domain = self.client.recrawl_domain('crawler-demo', domain_id)['body']
self.assertEqual(domain['id'], domain_id)
def test_crawl_url(self):
with vcr.use_cassette('fixtures/crawl_domain.yaml'):
domain_id = '52c754fb3ae7406fd3000001'
url = 'http://crawler-demo-site.herokuapp.com/2012/01/01/first-post.html'
crawled_url = self.client.crawl_url('crawler-demo', domain_id, url)['body']['url']
self.assertEqual(crawled_url, url)
def __is_expected_search_result(self, request, document_type_count, args=[]):
response = request(self.engine, *(args + ['*']))
self.assertEqual(len(response['body']['records']), document_type_count)
def __is_expected_result(self, request, status_code, expected_values, *args):
response = request(*args)
self.assertEqual(response['status'], status_code)
for k,v in expected_values.items():
self.assertEqual(response['body'][k], v)
def __is_expected_collection(self, request, status_code, collection_length, expected_values, *args):
response = request(*args)
self.assertEqual(response['status'], status_code)
self.assertEqual(len(response['body']), collection_length)
for k,v in expected_values.items():
self.assertEqual(len([item for item in response['body'] if item[k] == v]), 1)
def __time_name(self):
return str(int(time.mktime(time.gmtime())))
def __create_temporary_engine(self, name = None):
name = name if name else self.__time_name()
return
class TestClientUsernameAndPassword(unittest.TestCase):
def setUp(self):
self.client = swiftype.Client(
username='some_user',
password='some_pasword',
host='localhost:3000'
)
def test_engine_create(self):
with vcr.use_cassette('fixtures/engine_create.yaml'):
engine = 'myengine'
slug = self.client.create_engine(engine)['body']['slug']
self.assertEqual(slug, engine)
class TestPlatformUsers(unittest.TestCase):
def setUp(self):
try:
api_key = os.environ['API_KEY']
except:
api_key = "a-test-api-key"
client_id = '3e4fd842fc99aecb4dc50e5b88a186c1e206ddd516cdd336da3622c4afd7e2e9'
client_secret = '4441879b5e2a9c3271f5b1a4bc223b715f091e5ed20fe75d1352e1290c7a6dfb'
self.client = swiftype.Client(api_key=api_key, client_id=client_id, client_secret=client_secret, host='localhost:3000')
def test_users(self):
with vcr.use_cassette('fixtures/users.yaml'):
response = self.client.users()
self.assertEqual(response['status'], 200)
self.assertEqual(len(response['body']), 2)
def test_users_pagination(self):
with vcr.use_cassette('fixtures/users_pagination.yaml'):
response = self.client.users(page=2)
self.assertEqual(response['status'], 200)
self.assertEqual(len(response['body']), 0)
def test_user(self):
with vcr.use_cassette('fixtures/user.yaml'):
user_id = '12345'
response = self.client.user(user_id)
self.assertEqual(response['body']['id'], user_id)
def test_create_user(self):
with vcr.use_cassette('fixtures/create_user.yaml'):
response = self.client.create_user()
self.assertEqual(response['status'], 200)
def test_sso_token(self):
timestamp = 1379382520
user_id = '5064a7de2ed960e715000276'
token = self.client._sso_token(user_id, timestamp)
self.assertEqual(token, '81033d182ad51f231cc9cda9fb24f2298a411437')
def test_sso_url(self):
self.client._get_timestamp = Mock(return_value=1379382520)
user_id = '5064a7de2ed960e715000276'
url = self.client.sso_url(user_id)
self.assertEqual(
parse_qs(urlparse(url).query),
{
'user_id': ['5064a7de2ed960e715000276'],
'client_id': ['3e4fd842fc99aecb4dc50e5b88a186c1e206ddd516cdd336da3622c4afd7e2e9'],
'token': ['81033d182ad51f231cc9cda9fb24f2298a411437'],
'timestamp': ['1379382520'],
},
)
class TestPlatformResources(unittest.TestCase):
def setUp(self):
access_token = '6cf7fbd297f00a8e3863a0595f55ff7d141cbef2fcbe00159d0f7403649b384e'
self.engine = 'myusersengine'
self.document_type = 'videos'
self.client = swiftype.Client(access_token=access_token, host='localhost:3000')
def test_platform_engine_create(self):
with vcr.use_cassette('fixtures/platform_engine_create.yaml'):
response = self.client.create_engine(self.engine)
self.assertEqual(response['body']['name'], self.engine)
def test_platform_create_document_type(self):
with vcr.use_cassette('fixtures/platform_create_document_type.yaml'):
response = self.client.create_document_type(self.engine, self.document_type)
self.assertEqual(response['body']['slug'], self.document_type)
def test_platform_create_document(self):
with vcr.use_cassette('fixtures/platform_create_document.yaml'):
doc_id = 'doc_id'
id = self.client.create_document(self.engine, self.document_type, {'external_id': doc_id})['body']['external_id']
self.assertEqual(id, doc_id)
def test_platform_create_documents(self):
with vcr.use_cassette('fixtures/platform_create_documents.yaml'):
docs = [{'external_id': 'doc_id1'}, {'external_id': 'doc_id2'}]
stati = self.client.create_documents(self.engine, self.document_type, docs)['body']
self.assertEqual(stati, [True, True])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import os
import subprocess
import tempfile
from oslo_log import log as logging
from oslo_utils import netutils
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common.i18n import _LI
from trove.common import instance as rd_instance
from trove.common.stream_codecs import PropertiesCodec
from trove.common import utils as utils
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import ImportOverrideStrategy
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.vertica import system
from trove.guestagent.datastore import service
from trove.guestagent.db import models
from trove.guestagent import pkg
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
packager = pkg.Package()
DB_NAME = 'db_srvr'
MOUNT_POINT = CONF.vertica.mount_point
# We will use a fake configuration file for the options managed through
# configuration groups that we apply directly with ALTER DB ... SET ...
FAKE_CFG = os.path.join(MOUNT_POINT, "vertica.cfg.fake")
class VerticaAppStatus(service.BaseDbStatus):
def _get_actual_db_status(self):
"""Get the status of dbaas and report it back."""
try:
out, err = system.shell_execute(system.STATUS_ACTIVE_DB,
system.VERTICA_ADMIN)
if out.strip() == DB_NAME:
# UP status is confirmed
LOG.info(_("Service Status is RUNNING."))
return rd_instance.ServiceStatuses.RUNNING
else:
LOG.info(_("Service Status is SHUTDOWN."))
return rd_instance.ServiceStatuses.SHUTDOWN
except exception.ProcessExecutionError:
LOG.exception(_("Failed to get database status."))
return rd_instance.ServiceStatuses.CRASHED
class VerticaApp(object):
"""Prepares DBaaS on a Guest container."""
def __init__(self, status):
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
revision_dir = \
guestagent_utils.build_file_path(
os.path.join(MOUNT_POINT,
os.path.dirname(system.VERTICA_ADMIN)),
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
if not operating_system.exists(FAKE_CFG):
operating_system.write_file(FAKE_CFG, '', as_root=True)
operating_system.chown(FAKE_CFG, system.VERTICA_ADMIN,
system.VERTICA_ADMIN_GRP, as_root=True)
operating_system.chmod(FAKE_CFG, FileMode.ADD_GRP_RX_OTH_RX(),
as_root=True)
self.configuration_manager = \
ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN,
system.VERTICA_ADMIN_GRP,
PropertiesCodec(delimiter='='),
requires_root=True,
override_strategy=ImportOverrideStrategy(
revision_dir, "cnf"))
def update_overrides(self, context, overrides, remove=False):
if overrides:
self.apply_overrides(overrides)
def remove_overrides(self):
config = self.configuration_manager.get_user_override()
self._reset_config(config)
self.configuration_manager.remove_user_override()
def apply_overrides(self, overrides):
self.configuration_manager.apply_user_override(overrides)
self._apply_config(overrides)
def _reset_config(self, config):
try:
db_password = self._get_database_password()
for k, v in config.iteritems():
alter_db_cmd = system.ALTER_DB_RESET_CFG % (DB_NAME, str(k))
out, err = system.exec_vsql_command(db_password, alter_db_cmd)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to remove config %s") % k)
except Exception:
LOG.exception(_("Vertica configuration remove failed."))
raise RuntimeError(_("Vertica configuration remove failed."))
LOG.info(_("Vertica configuration reset completed."))
def _apply_config(self, config):
try:
db_password = self._get_database_password()
for k, v in config.iteritems():
alter_db_cmd = system.ALTER_DB_CFG % (DB_NAME, str(k), str(v))
out, err = system.exec_vsql_command(db_password, alter_db_cmd)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to apply config %s") % k)
except Exception:
LOG.exception(_("Vertica configuration apply failed"))
raise RuntimeError(_("Vertica configuration apply failed"))
LOG.info(_("Vertica config apply completed."))
def _enable_db_on_boot(self):
try:
command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c",
(system.SET_RESTART_POLICY % (DB_NAME, "always"))]
subprocess.Popen(command)
command = ["sudo", "su", "-", "root", "-c",
(system.VERTICA_AGENT_SERVICE_COMMAND % "enable")]
subprocess.Popen(command)
except Exception:
LOG.exception(_("Failed to enable db on boot."))
raise RuntimeError("Could not enable db on boot.")
def _disable_db_on_boot(self):
try:
command = (system.SET_RESTART_POLICY % (DB_NAME, "never"))
system.shell_execute(command, system.VERTICA_ADMIN)
command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable")
system.shell_execute(command)
except exception.ProcessExecutionError:
LOG.exception(_("Failed to disable db on boot."))
raise RuntimeError("Could not disable db on boot.")
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
"""Stop the database."""
LOG.info(_("Stopping Vertica."))
if do_not_start_on_reboot:
self._disable_db_on_boot()
try:
# Stop vertica-agent service
command = (system.VERTICA_AGENT_SERVICE_COMMAND % "stop")
system.shell_execute(command)
# Using Vertica adminTools to stop db.
db_password = self._get_database_password()
stop_db_command = (system.STOP_DB % (DB_NAME, db_password))
out, err = system.shell_execute(system.STATUS_ACTIVE_DB,
system.VERTICA_ADMIN)
if out.strip() == DB_NAME:
system.shell_execute(stop_db_command, system.VERTICA_ADMIN)
if not self.status._is_restarting:
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop Vertica."))
self.status.end_restart()
raise RuntimeError("Could not stop Vertica!")
LOG.debug("Database stopped.")
else:
LOG.debug("Database is not running.")
except exception.ProcessExecutionError:
LOG.exception(_("Failed to stop database."))
raise RuntimeError("Could not stop database.")
def start_db(self, update_db=False):
"""Start the database."""
LOG.info(_("Starting Vertica."))
try:
self._enable_db_on_boot()
# Start vertica-agent service
command = ["sudo", "su", "-", "root", "-c",
(system.VERTICA_AGENT_SERVICE_COMMAND % "start")]
subprocess.Popen(command)
# Using Vertica adminTools to start db.
db_password = self._get_database_password()
start_db_command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c",
(system.START_DB % (DB_NAME, db_password))]
subprocess.Popen(start_db_command)
if not self.status._is_restarting:
self.status.end_restart()
LOG.debug("Database started.")
except Exception as e:
raise RuntimeError(_("Could not start Vertica due to %s") % e)
def start_db_with_conf_changes(self, config_contents):
"""
Currently all that this method does is to start Vertica. This method
needs to be implemented to enable volume resize on guestagent side.
"""
LOG.info(_("Starting Vertica with configuration changes."))
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
LOG.info(_("Initiating config."))
self.configuration_manager.save_configuration(config_contents)
self.start_db(True)
def restart(self):
"""Restart the database."""
try:
self.status.begin_restart()
self.stop_db()
self.start_db()
finally:
self.status.end_restart()
def add_db_to_node(self, members=netutils.get_my_ipv4()):
"""Add db to host with admintools"""
LOG.info(_("Calling admintools to add DB to host"))
try:
# Create db after install
db_password = self._get_database_password()
create_db_command = (system.ADD_DB_TO_NODE % (members,
DB_NAME,
db_password))
system.shell_execute(create_db_command, "dbadmin")
except exception.ProcessExecutionError:
# Give vertica some time to get the the node up, won't be available
# by the time adminTools -t db_add_node completes
LOG.info(_("adminTools failed as expected - wait for node"))
self.wait_for_node_status()
LOG.info(_("Vertica add db to host completed."))
def remove_db_from_node(self, members=netutils.get_my_ipv4()):
"""Remove db from node with admintools"""
LOG.info(_("Removing db from node"))
try:
# Create db after install
db_password = self._get_database_password()
create_db_command = (system.REMOVE_DB_FROM_NODE % (members,
DB_NAME,
db_password))
system.shell_execute(create_db_command, "dbadmin")
except exception.ProcessExecutionError:
# Give vertica some time to get the the node up, won't be available
# by the time adminTools -t db_add_node completes
LOG.info(_("adminTools failed as expected - wait for node"))
# Give vertica some time to take the node down - it won't be available
# by the time adminTools -t db_add_node completes
self.wait_for_node_status()
LOG.info(_("Vertica remove host from db completed."))
def create_db(self, members=netutils.get_my_ipv4()):
"""Prepare the guest machine with a Vertica db creation."""
LOG.info(_("Creating database on Vertica host."))
try:
# Create db after install
db_password = self._get_database_password()
create_db_command = (system.CREATE_DB % (members, DB_NAME,
MOUNT_POINT, MOUNT_POINT,
db_password))
system.shell_execute(create_db_command, system.VERTICA_ADMIN)
except Exception:
LOG.exception(_("Vertica database create failed."))
raise RuntimeError(_("Vertica database create failed."))
LOG.info(_("Vertica database create completed."))
def install_vertica(self, members=netutils.get_my_ipv4()):
"""Prepare the guest machine with a Vertica db creation."""
LOG.info(_("Installing Vertica Server."))
try:
# Create db after install
install_vertica_cmd = (system.INSTALL_VERTICA % (members,
MOUNT_POINT))
system.shell_execute(install_vertica_cmd)
except exception.ProcessExecutionError:
LOG.exception(_("install_vertica failed."))
raise RuntimeError(_("install_vertica failed."))
self._generate_database_password()
LOG.info(_("install_vertica completed."))
def update_vertica(self, command, members=netutils.get_my_ipv4()):
LOG.info(_("Calling update_vertica with command %s") % command)
try:
update_vertica_cmd = (system.UPDATE_VERTICA % (command, members,
MOUNT_POINT))
system.shell_execute(update_vertica_cmd)
except exception.ProcessExecutionError:
LOG.exception(_("update_vertica failed."))
raise RuntimeError(_("update_vertica failed."))
# self._generate_database_password()
LOG.info(_("update_vertica completed."))
def add_udls(self):
"""Load the user defined load libraries into the database."""
LOG.info(_("Adding configured user defined load libraries."))
password = self._get_database_password()
loaded_udls = []
for lib in system.UDL_LIBS:
func_name = lib['func_name']
lib_name = lib['lib_name']
language = lib['language']
factory = lib['factory']
path = lib['path']
if os.path.isfile(path):
LOG.debug("Adding the %s library as %s." %
(func_name, lib_name))
out, err = system.exec_vsql_command(
password,
system.CREATE_LIBRARY % (lib_name, path)
)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to create library %s.")
% lib_name)
out, err = system.exec_vsql_command(
password,
system.CREATE_SOURCE % (func_name, language,
factory, lib_name)
)
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to create source %s.")
% func_name)
loaded_udls.append(func_name)
else:
LOG.warning("Skipping %s as path %s not found." %
(func_name, path))
LOG.info(_("The following UDL functions are available for use: %s")
% loaded_udls)
def _generate_database_password(self):
"""Generate and write the password to vertica.cnf file."""
config = ConfigParser.ConfigParser()
config.add_section('credentials')
config.set('credentials', 'dbadmin_password',
utils.generate_random_password())
self.write_config(config)
def write_config(self, config,
unlink_function=os.unlink,
temp_function=tempfile.NamedTemporaryFile):
"""Write the configuration contents to vertica.cnf file."""
LOG.debug('Defining config holder at %s.' % system.VERTICA_CONF)
tempfile = temp_function(delete=False)
try:
config.write(tempfile)
tempfile.close()
command = (("install -o root -g root -m 644 %(source)s %(target)s"
) % {'source': tempfile.name,
'target': system.VERTICA_CONF})
system.shell_execute(command)
unlink_function(tempfile.name)
except Exception:
unlink_function(tempfile.name)
raise
def read_config(self):
"""Reads and returns the Vertica config."""
try:
config = ConfigParser.ConfigParser()
config.read(system.VERTICA_CONF)
return config
except Exception:
LOG.exception(_("Failed to read config %s.") % system.VERTICA_CONF)
raise RuntimeError
def _get_database_password(self):
"""Read the password from vertica.cnf file and return it."""
return self.read_config().get('credentials', 'dbadmin_password')
def install_if_needed(self, packages):
"""Install Vertica package if needed."""
LOG.info(_("Preparing Guest as Vertica Server."))
if not packager.pkg_is_installed(packages):
LOG.debug("Installing Vertica Package.")
packager.pkg_install(packages, None, system.INSTALL_TIMEOUT)
def _set_readahead_for_disks(self):
"""This method sets readhead size for disks as needed by Vertica."""
device = volume.VolumeDevice(CONF.device_path)
device.set_readahead_size(CONF.vertica.readahead_size)
LOG.debug("Set readhead size as required by Vertica.")
def prepare_for_install_vertica(self):
"""This method executes preparatory methods before
executing install_vertica.
"""
command = ("VERT_DBA_USR=%s VERT_DBA_HOME=/home/dbadmin "
"VERT_DBA_GRP=%s /opt/vertica/oss/python/bin/python"
" -m vertica.local_coerce" %
(system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP))
try:
self._set_readahead_for_disks()
system.shell_execute(command)
except exception.ProcessExecutionError:
LOG.exception(_("Failed to prepare for install_vertica."))
raise
def mark_design_ksafe(self, k):
"""Wrapper for mark_design_ksafe function for setting k-safety """
LOG.info(_("Setting Vertica k-safety to %s") % str(k))
out, err = system.exec_vsql_command(self._get_database_password(),
system.MARK_DESIGN_KSAFE % k)
# Only fail if we get an ERROR as opposed to a warning complaining
# about setting k = 0
if "ERROR" in err:
LOG.error(err)
raise RuntimeError(_("Failed to set k-safety level %s.") % k)
def _create_user(self, username, password, role=None):
"""Creates a user, granting and enabling the given role for it."""
LOG.info(_("Creating user in Vertica database."))
out, err = system.exec_vsql_command(self._get_database_password(),
system.CREATE_USER %
(username, password))
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to create user %s.") % username)
if role:
self._grant_role(username, role)
def _grant_role(self, username, role):
"""Grants a role to the user on the schema."""
out, err = system.exec_vsql_command(self._get_database_password(),
system.GRANT_TO_USER
% (role, username))
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to grant role %(r)s to user "
"%(u)s.")
% {'r': role, 'u': username})
out, err = system.exec_vsql_command(self._get_database_password(),
system.ENABLE_FOR_USER
% (username, role))
if err:
LOG.warning(err)
def enable_root(self, root_password=None):
"""Resets the root password."""
LOG.info(_LI("Enabling root."))
user = models.RootUser()
user.name = "root"
user.host = "%"
user.password = root_password or utils.generate_random_password()
if not self.is_root_enabled():
self._create_user(user.name, user.password, 'pseudosuperuser')
else:
LOG.debug("Updating %s password." % user.name)
try:
out, err = system.exec_vsql_command(
self._get_database_password(),
system.ALTER_USER_PASSWORD % (user.name, user.password))
if err:
if err.is_warning():
LOG.warning(err)
else:
LOG.error(err)
raise RuntimeError(_("Failed to update %s "
"password.") % user.name)
except exception.ProcessExecutionError:
LOG.error(_("Failed to update %s password.") % user.name)
raise RuntimeError(_("Failed to update %s password.")
% user.name)
return user.serialize()
def is_root_enabled(self):
"""Return True if root access is enabled else False."""
LOG.debug("Checking is root enabled.")
try:
out, err = system.shell_execute(system.USER_EXISTS %
(self._get_database_password(),
'root'), system.VERTICA_ADMIN)
if err:
LOG.error(err)
raise RuntimeError(_("Failed to query for root user."))
except exception.ProcessExecutionError:
raise RuntimeError(_("Failed to query for root user."))
return out.rstrip() == "1"
def get_public_keys(self, user):
"""Generates key (if not found), and sends public key for user."""
LOG.debug("Public keys requested for user: %s." % user)
user_home_directory = os.path.expanduser('~' + user)
public_key_file_name = user_home_directory + '/.ssh/id_rsa.pub'
try:
key_generate_command = (system.SSH_KEY_GEN % user_home_directory)
system.shell_execute(key_generate_command, user)
except exception.ProcessExecutionError:
LOG.debug("Cannot generate key.")
try:
read_key_cmd = ("cat %(file)s" % {'file': public_key_file_name})
out, err = system.shell_execute(read_key_cmd)
except exception.ProcessExecutionError:
LOG.exception(_("Cannot read public key."))
raise
return out.strip()
def authorize_public_keys(self, user, public_keys):
"""Adds public key to authorized_keys for user."""
LOG.debug("public keys to be added for user: %s." % (user))
user_home_directory = os.path.expanduser('~' + user)
authorized_file_name = user_home_directory + '/.ssh/authorized_keys'
try:
read_key_cmd = ("cat %(file)s" % {'file': authorized_file_name})
out, err = system.shell_execute(read_key_cmd)
public_keys.append(out.strip())
except exception.ProcessExecutionError:
LOG.debug("Cannot read authorized_keys.")
all_keys = '\n'.join(public_keys) + "\n"
try:
with tempfile.NamedTemporaryFile(delete=False) as tempkeyfile:
tempkeyfile.write(all_keys)
copy_key_cmd = (("install -o %(user)s -m 600 %(source)s %(target)s"
) % {'user': user, 'source': tempkeyfile.name,
'target': authorized_file_name})
system.shell_execute(copy_key_cmd)
os.remove(tempkeyfile.name)
except exception.ProcessExecutionError:
LOG.exception(_("Cannot install public keys."))
os.remove(tempkeyfile.name)
raise
def _export_conf_to_members(self, members):
"""This method exports conf files to other members."""
try:
for member in members:
COPY_CMD = (system.SEND_CONF_TO_SERVER % (system.VERTICA_CONF,
member,
system.VERTICA_CONF))
system.shell_execute(COPY_CMD)
except exception.ProcessExecutionError:
LOG.exception(_("Cannot export configuration."))
raise
def install_cluster(self, members):
"""Installs & configures cluster."""
cluster_members = ','.join(members)
LOG.debug("Installing cluster with members: %s." % cluster_members)
self.install_vertica(cluster_members)
self._export_conf_to_members(members)
LOG.debug("Creating database with members: %s." % cluster_members)
self.create_db(cluster_members)
LOG.debug("Cluster configured on members: %s." % cluster_members)
def grow_cluster(self, members):
"""Adds nodes to cluster."""
cluster_members = ','.join(members)
LOG.debug("Growing cluster with members: %s." % cluster_members)
self.update_vertica("--add-hosts", cluster_members)
self._export_conf_to_members(members)
LOG.debug("Creating database with members: %s." % cluster_members)
self.add_db_to_node(cluster_members)
LOG.debug("Cluster configured on members: %s." % cluster_members)
def shrink_cluster(self, members):
"""Removes nodes from cluster."""
cluster_members = ','.join(members)
LOG.debug("Shrinking cluster with members: %s." % cluster_members)
self.remove_db_from_node(cluster_members)
self.update_vertica("--remove-hosts", cluster_members)
def wait_for_node_status(self, status='UP'):
"""Wait until all nodes are the same status"""
# select node_state from nodes where node_state <> 'UP'
def _wait_for_node_status():
out, err = system.exec_vsql_command(self._get_database_password(),
system.NODE_STATUS % status)
LOG.debug("Polled vertica node states: %s" % out)
if err:
LOG.error(err)
raise RuntimeError(_("Failed to query for root user."))
return "0 rows" in out
try:
utils.poll_until(_wait_for_node_status, time_out=600,
sleep_time=15)
except exception.PollTimeOut:
raise RuntimeError(_("Timed out waiting for cluster to"
"change to status %s") % status)
|
|
import os
from ctypes import CDLL, c_char_p, c_int, c_ulonglong, POINTER, byref, create_string_buffer
lib = CDLL('%s/../lib/0/libpqcrypto.so' % os.path.dirname(os.path.abspath(__file__)))
class wrap_hash:
def __init__(self,prefix,hlen):
self.hlen = hlen
self.c_hash = getattr(lib,'pqcrypto_hash_%s_impl' % prefix)
self.c_hash.argtypes = [c_char_p,c_char_p,c_ulonglong]
self.c_hash.restype = c_int
def hash(self,m):
mlen = c_ulonglong(len(m))
m = create_string_buffer(m)
h = create_string_buffer(self.hlen)
if self.c_hash(h,m,mlen): raise Exception('hash failed')
return h.raw
__call__ = hash
class wrap_onetimeauth:
def __init__(self,prefix,klen,alen):
self.klen = klen
self.alen = alen
self.c_auth = getattr(lib,'pqcrypto_onetimeauth_%s_impl' % prefix)
self.c_auth.argtypes = [c_char_p,c_char_p,c_ulonglong,c_char_p]
self.c_auth.restype = c_int
self.c_verify = getattr(lib,'pqcrypto_onetimeauth_%s_impl_verify' % prefix)
self.c_verify.argtypes = [c_char_p,c_char_p,c_ulonglong,c_char_p]
self.c_verify.restype = c_int
def auth(self,m,k):
assert len(k) == self.klen
mlen = c_ulonglong(len(m))
m = create_string_buffer(m)
k = create_string_buffer(k)
a = create_string_buffer(self.alen)
if self.c_auth(a,m,mlen,k): raise Exception('auth failed')
return a.raw
def verify(self,a,m,k):
assert len(k) == self.klen
assert len(a) == self.alen
mlen = c_ulonglong(len(m))
m = create_string_buffer(m)
k = create_string_buffer(k)
a = create_string_buffer(a)
if self.c_verify(a,m,mlen,k): raise Exception('verify failed')
__call__ = auth
class wrap_stream:
def __init__(self,prefix,klen,nlen):
self.klen = klen
self.nlen = nlen
self.c_stream = getattr(lib,'pqcrypto_stream_%s_impl' % prefix)
self.c_stream.argtypes = [c_char_p,c_ulonglong,c_char_p,c_char_p]
self.c_stream.restype = c_int
self.c_xor = getattr(lib,'pqcrypto_stream_%s_impl_xor' % prefix)
self.c_xor.argtypes = [c_char_p,c_char_p,c_ulonglong,c_char_p,c_char_p]
self.c_xor.restype = c_int
def stream(self,mlen,n,k):
assert len(n) == self.nlen
assert len(k) == self.klen
m = create_string_buffer(mlen)
mlen = c_ulonglong(mlen)
n = create_string_buffer(n)
k = create_string_buffer(k)
if self.c_stream(m,mlen,n,k): raise Exception('stream failed')
return m.raw
def xor(self,m,n,k):
assert len(n) == self.nlen
assert len(k) == self.klen
mlen = c_ulonglong(len(m))
c = create_string_buffer(len(m))
m = create_string_buffer(m)
n = create_string_buffer(n)
k = create_string_buffer(k)
# XXX: overlapping c with m doesn't seem to work with openssl aes
if self.c_xor(c,m,mlen,n,k): raise Exception('xor failed')
return c.raw
__call__ = stream
class wrap_scalarmult:
def __init__(self,prefix,pklen,sklen):
self.pklen = pklen
self.sklen = sklen
self.c_base = getattr(lib,'pqcrypto_scalarmult_%s_impl_base' % prefix)
self.c_base.argtypes = [c_char_p,c_char_p]
self.c_base.restype = c_int
self.c_scalarmult = getattr(lib,'pqcrypto_scalarmult_%s_impl' % prefix)
self.c_scalarmult.argtypes = [c_char_p,c_char_p,c_char_p]
self.c_scalarmult.restype = c_int
def base(self,sk):
assert len(sk) == self.sklen
sk = create_string_buffer(sk)
out = create_string_buffer(self.pklen)
if self.c_base(out,sk): raise Exception('scalarmult_base failed')
return out.raw
def scalarmult(self,sk,pk):
assert len(pk) == self.pklen
assert len(sk) == self.sklen
pk = create_string_buffer(pk)
sk = create_string_buffer(sk)
out = create_string_buffer(self.pklen)
if self.c_scalarmult(out,sk,pk): raise Exception('scalarmult failed')
return out.raw
__call__ = scalarmult
class wrap_sign:
def __init__(self,prefix,pklen,sklen,slen):
self.pklen = pklen
self.sklen = sklen
self.slen = slen
self.c_keypair = getattr(lib,'pqcrypto_sign_%s_impl_keypair' % prefix)
self.c_keypair.argtypes = [c_char_p,c_char_p]
self.c_keypair.restype = c_int
self.c_sign = getattr(lib,'pqcrypto_sign_%s_impl' % prefix)
self.c_sign.argtypes = [c_char_p,POINTER(c_ulonglong),c_char_p,c_ulonglong,c_char_p]
self.c_sign.restype = c_int
self.c_open = getattr(lib,'pqcrypto_sign_%s_impl_open' % prefix)
self.c_open.argtypes = [c_char_p,POINTER(c_ulonglong),c_char_p,c_ulonglong,c_char_p]
self.c_open.restype = c_int
def keypair(self):
pk = create_string_buffer(self.pklen)
sk = create_string_buffer(self.sklen)
if self.c_keypair(pk,sk): raise Exception('keypair failed')
return pk.raw,sk.raw
def sign(self,m,sk):
assert len(sk) == self.sklen
mlen = c_ulonglong(len(m))
smlen = c_ulonglong(0)
sm = create_string_buffer(len(m) + self.slen)
m = create_string_buffer(m)
sk = create_string_buffer(sk)
if self.c_sign(sm,byref(smlen),m,mlen,sk): raise Exception('sign failed')
return sm.raw[:smlen.value]
def open(self,sm,pk):
assert len(pk) == self.pklen
smlen = c_ulonglong(len(sm))
m = create_string_buffer(len(sm))
mlen = c_ulonglong(0)
pk = create_string_buffer(pk)
if self.c_open(m,byref(mlen),sm,smlen,pk): raise Exception('open failed')
return m.raw[:mlen.value]
__call__ = keypair
class wrap_kem:
def __init__(self,prefix,pklen,sklen,clen,klen):
self.pklen = pklen
self.sklen = sklen
self.clen = clen
self.klen = klen
self.c_keypair = getattr(lib,'pqcrypto_kem_%s_impl_keypair' % prefix)
self.c_keypair.argtypes = [c_char_p,c_char_p]
self.c_keypair.restype = c_int
self.c_enc = getattr(lib,'pqcrypto_kem_%s_impl_enc' % prefix)
self.c_enc.argtypes = [c_char_p,c_char_p,c_char_p]
self.c_enc.restype = c_int
self.c_dec = getattr(lib,'pqcrypto_kem_%s_impl_dec' % prefix)
self.c_dec.argtypes = [c_char_p,c_char_p,c_char_p]
self.c_dec.restype = c_int
def keypair(self):
pk = create_string_buffer(self.pklen)
sk = create_string_buffer(self.sklen)
if self.c_keypair(pk,sk): raise Exception('keypair failed')
return pk.raw,sk.raw
def enc(self,pk):
assert len(pk) == self.pklen
c = create_string_buffer(self.clen)
k = create_string_buffer(self.klen)
pk = create_string_buffer(pk)
if self.c_enc(c,k,pk): raise Exception('enc failed')
return c.raw,k.raw
def dec(self,c,sk):
assert len(c) == self.clen
assert len(sk) == self.sklen
k = create_string_buffer(self.klen)
c = create_string_buffer(c)
sk = create_string_buffer(sk)
if self.c_dec(k,c,sk): raise Exception('dec failed')
return k.raw
__call__ = keypair
class wrap_randombytes:
def __init__(self):
self.c_randombytes = getattr(lib,'pqrandombytes_impl')
self.c_randombytes.argtypes = [c_char_p,c_ulonglong]
self.c_randombytes.restype = None
def __call__(self,rlen):
r = create_string_buffer(rlen)
rlen = c_ulonglong(rlen)
self.c_randombytes(r,rlen)
return r.raw
randombytes = wrap_randombytes()
class struct:
pass
x = []
%crypto_hash: x += [('PRIMITIVE',CRYPTO_BYTES)]
hash = struct()
for p,hlen in x:
try:
setattr(hash,p,wrap_hash(p,hlen))
except:
pass
x = []
%crypto_onetimeauth: x += [('PRIMITIVE',CRYPTO_KEYBYTES,CRYPTO_BYTES)]
onetimeauth = struct()
for p,klen,alen in x:
try:
setattr(onetimeauth,p,wrap_onetimeauth(p,klen,alen))
except:
pass
x = []
%crypto_stream: x += [('PRIMITIVE',CRYPTO_KEYBYTES,CRYPTO_NONCEBYTES)]
stream = struct()
for p,klen,nlen in x:
try:
setattr(stream,p,wrap_stream(p,klen,nlen))
except:
pass
x = []
%crypto_scalarmult: x += [('PRIMITIVE',CRYPTO_BYTES,CRYPTO_SCALARBYTES)]
scalarmult = struct()
for p,pklen,sklen in x:
try:
setattr(scalarmult,p,wrap_scalarmult(p,pklen,sklen))
except:
pass
x = []
%crypto_sign: x += [('PRIMITIVE',CRYPTO_PUBLICKEYBYTES,CRYPTO_SECRETKEYBYTES,CRYPTO_BYTES)]
sign = struct()
for p,pklen,sklen,slen in x:
try:
setattr(sign,p,wrap_sign(p,pklen,sklen,slen))
except:
pass
x = []
%crypto_kem: x += [('PRIMITIVE',CRYPTO_PUBLICKEYBYTES,CRYPTO_SECRETKEYBYTES,CRYPTO_CIPHERTEXTBYTES,CRYPTO_BYTES)]
kem = struct()
for p,pklen,sklen,clen,klen in x:
try:
setattr(kem,p,wrap_kem(p,pklen,sklen,clen,klen))
except:
pass
|
|
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <michaelpeng@tencent.com>
# Date: October 20, 2011
"""
This is the target module which is the super class
of all of the scons targets.
"""
import os
import string
import configparse
import console
from blade_util import var_to_list
class Target(object):
"""Abstract target class.
This class should be derived by subclass like CcLibrary CcBinary
targets, etc.
"""
def __init__(self,
name,
target_type,
srcs,
deps,
visibility,
blade,
kwargs):
"""Init method.
Init the target.
"""
self.blade = blade
self.build_path = self.blade.get_build_path()
current_source_path = self.blade.get_current_source_path()
self.target_database = self.blade.get_target_database()
self.key = (current_source_path, name)
self.fullname = '%s:%s' % self.key
self.name = name
self.path = current_source_path
self.type = target_type
self.srcs = srcs
self.deps = []
self.expanded_deps = []
self.visibility = 'PUBLIC'
self.data = {}
config = configparse.blade_config.get_config('global_config')
self.data['test_timeout'] = config['test_timeout']
# Keep track of scons variables defined by scons rules
# generated by this target. Note that one blade target
# may correspond to several scons variables:
# proto_library: static lib/shared lib/jar variables
self.data['targets'] = {}
self.data['default_target'] = ''
self._check_name()
self._check_kwargs(kwargs)
self._check_srcs()
self._check_deps(deps)
self._init_target_deps(deps)
self._init_visibility(visibility)
self.scons_rule_buf = []
self.__cached_generate_header_files = None
def _clone_env(self):
"""Clone target's environment. """
self._write_rule('%s = top_env.Clone()' % self._env_name())
def _prepare_to_generate_rule(self):
"""Should be overridden. """
console.error_exit('_prepare_to_generate_rule should be overridden in subclasses')
def _check_name(self):
if '/' in self.name:
console.error_exit('//%s:%s: Invalid target name, should not contain dir part.' % (
self.path, self.name))
def _check_kwargs(self, kwargs):
if kwargs:
console.error_exit('//%s:%s: unrecognized options %s' % (
self.path, self.name, kwargs))
def _allow_duplicate_source(self):
"""Whether the target allows duplicate source file with other targets. """
return False
# Keep the relationship of all src -> target.
# Used by build rules to ensure that a source file occurs in
# exactly one target(only library target).
__src_target_map = {}
def _check_srcs(self):
"""Check source files.
"""
dups = []
srcset = set()
for s in self.srcs:
if s in srcset:
dups.append(s)
else:
srcset.add(s)
if dups:
console.error_exit('%s Duplicate source file paths: %s ' % (
self.fullname, dups))
# Check if one file belongs to two different targets.
config = configparse.blade_config.get_config('global_config')
action = config.get('duplicated_source_action')
for s in self.srcs:
if '..' in s or s.startswith('/'):
console.error_exit('%s Invalid source file path: %s. '
'can only be relative path, and must in current directory '
'or subdirectories.' % (self.fullname, s))
src = os.path.normpath(os.path.join(self.path, s))
target = self.fullname, self._allow_duplicate_source()
if src not in Target.__src_target_map:
Target.__src_target_map[src] = target
else:
target_existed = Target.__src_target_map[src]
if target_existed != target:
# Always preserve the target which disallows
# duplicate source files in the map
if target_existed[1]:
Target.__src_target_map[src] = target
elif target[1]:
pass
else:
message = 'Source file %s belongs to {%s, %s}' % (
s, target_existed[0], target[0])
if action == 'error':
console.error_exit(message)
elif action == 'warning':
console.warning(message)
def _add_hardcode_library(self, hardcode_dep_list):
"""Add hardcode dep list to key's deps. """
for dep in hardcode_dep_list:
dkey = self._convert_string_to_target_helper(dep)
if dkey[0] == '#':
self._add_system_library(dkey, dep)
if dkey not in self.expanded_deps:
self.expanded_deps.append(dkey)
def _add_system_library(self, key, name):
"""Add system library entry to database. """
if key not in self.target_database:
lib = SystemLibrary(name, self.blade)
self.blade.register_target(lib)
def _add_location_reference_target(self, m):
"""
Parameters
-----------
m: A match object capturing the key and type of the referred target
Returns
-----------
(key, type): the key and type of the referred target
Description
-----------
Location reference makes it possible to refer to the build output of
another target in the code base.
General form:
$(location //path/to:target)
Some target types may produce more than one output according to the
build options. Then each output can be referenced by an additional
type tag:
$(location //path:name) # default target output
$(location //path:name jar) # jar output
$(location //path:name so) # so output
Note that this method accepts a match object instead of a simple str.
You could match/search/sub location references in a string with functions
or RegexObject in re module. For example:
m = {location regular expression}.search(s)
if m:
key, type = self._add_location_reference_target(m)
else:
# Not a location reference
"""
if m:
key, type = m.groups()
if not type:
type = ''
type = type.strip()
key = self._unify_dep(key)
if key not in self.expanded_deps:
self.expanded_deps.append(key)
if key not in self.deps:
self.deps.append(key)
return key, type
def _unify_dep(self, dep):
"""Unify dep to key"""
if dep[0] == ':':
# Depend on library in current directory
dkey = (os.path.normpath(self.path), dep[1:])
elif dep.startswith('//'):
# Depend on library in remote directory
if not ':' in dep:
raise Exception, 'Wrong format in %s:%s' % (
self.path, self.name)
(path, lib) = dep[2:].rsplit(':', 1)
dkey = (os.path.normpath(path), lib)
elif dep.startswith('#'):
# System libaray, they don't have entry in BUILD so we need
# to add deps manually.
dkey = ('#', dep[1:])
self._add_system_library(dkey, dep)
else:
# Depend on library in relative subdirectory
if not ':' in dep:
raise Exception, 'Wrong format in %s:%s' % (
self.path, self.name)
(path, lib) = dep.rsplit(':', 1)
if '..' in path:
raise Exception, "Don't use '..' in path"
dkey = (os.path.normpath('%s/%s' % (
self.path, path)), lib)
return dkey
def _init_target_deps(self, deps):
"""Init the target deps.
Parameters
-----------
deps: the deps list in BUILD file.
Description
-----------
Add target into target database and init the deps list.
"""
for d in deps:
dkey = self._unify_dep(d)
if dkey not in self.expanded_deps:
self.expanded_deps.append(dkey)
if dkey not in self.deps:
self.deps.append(dkey)
def _check_format(self, t):
"""
Parameters
-----------
t: could be a dep or visibility specified in BUILD file
Description
-----------
Do some basic format check.
"""
if not (t.startswith(':') or t.startswith('#') or
t.startswith('//') or t.startswith('./')):
console.error_exit('%s: Invalid %s.' % (self.fullname, t))
if t.count(':') > 1:
console.error_exit('%s: Invalid %s, missing \',\' between?' %
(self.fullname, t))
def _check_deps(self, deps):
"""_check_deps
Parameters
-----------
deps: the deps list in BUILD file
Description
-----------
Check whether deps are in valid format.
"""
for dep in deps:
self._check_format(dep)
def _init_visibility(self, visibility):
"""
Parameters
-----------
visibility: the visibility list in BUILD file
Description
-----------
Visibility determines whether another target is able to depend
on this target.
Visibility specify a list of targets in the same form as deps,
i.e. //path/to:target. The default value of visibility is PUBLIC,
which means this target is visible globally within the code base.
Note that targets inside the same BUILD file are always visible
to each other.
"""
if visibility is None:
return
visibility = var_to_list(visibility)
if visibility == ['PUBLIC']:
return
self.visibility = []
for v in visibility:
self._check_format(v)
key = self._unify_dep(v)
if key not in self.visibility:
self.visibility.append(key)
def _check_deprecated_deps(self):
"""check that whether it depends upon deprecated target.
It should be overridden in subclass.
"""
pass
def _get_java_pack_deps(self):
"""_get_java_pack_deps
Returns
-----------
A tuple of (scons vars, jars)
Description
-----------
Return java package dependencies excluding provided dependencies
scons vars represent targets to be built later
jars represent prebuilt jars or maven artifacts within local repository
"""
return [], []
def _regular_variable_name(self, var):
"""_regular_variable_name.
Parameters
-----------
var: the variable to be modified
Returns
-----------
s: the variable modified
Description
-----------
Replace the chars that scons doesn't regconize.
"""
return var.translate(string.maketrans(',-/.+*', '______'))
def _generate_variable_name(self, path, name, suffix=''):
"""_generate_variable_name.
Parameters
-----------
path: the target's path
name: the target's name
suffix: the suffix to be appended to the variable
Returns
-----------
The variable that contains target path, target name and suffix
Description
-----------
Concatenating target path, target name and suffix and returns.
"""
suffix_str = ''
if suffix:
suffix_str = '_suFFix_%s' % suffix
return 'v_%s_mAgIc_%s%s' % (self._regular_variable_name(path),
self._regular_variable_name(name),
suffix_str)
def _env_name(self):
"""_env_name.
Returns
-----------
The environment variable
Description
-----------
Concatenating target path, target name to be environment var and returns.
"""
return 'env_%s' % self._generate_variable_name(self.path, self.name)
def _var_name(self, suffix=''):
"""_var_name.
Returns
-----------
The scons variable name
"""
return self._generate_variable_name(self.path, self.name, suffix)
def _var_name_of(self, name, suffix=''):
"""_var_name_of.
Returns
-----------
The scons variable name
"""
return self._generate_variable_name(self.path, name, suffix)
def _source_file_path(self, name):
"""_source_file_path.
Parameters
-----------
name: the source's name
Returns
-----------
The source's full path in full blade dir tree
"""
return os.path.normpath(os.path.join(self.path, name))
def _target_file_path(self, name=''):
"""_target_file_path.
Parameters
-----------
name: the target's name
Returns
-----------
The target's path below building path
Description
-----------
Concatenating building path, target path and target name to be full
file path.
"""
if not name:
name = self.name
return os.path.join(self.build_path, self.path, name)
def _add_target_var(self, target_type, scons_var):
"""
Parameters
-----------
target_type: type of the scons variable as key in the dictionary
scons_var: the scons variable name as value in the dictionary
Description
-----------
Keep track of the scons variable built by scons rules.
Set the default target if needed.
"""
self.data['targets'][target_type] = scons_var
if not self.data['default_target']:
self.data['default_target'] = scons_var
def _add_default_target_var(self, target_type, scons_var):
"""
Parameters
-----------
target_type: type of the scons variable as key in the dictionary
scons_var: the scons variable name as value in the dictionary
Description
-----------
Keep track of the default scons variable which could be referenced
later without specifying type
"""
self.data['default_target'] = scons_var
self._add_target_var(target_type, scons_var)
def _get_target_var(self, target_type = ''):
"""
Parameters
-----------
target_type: type of the scons variable
Returns
-----------
The scons variable name
Description
-----------
Return the scons variable corresponding to the type passed in,
return empty if type doesn't exist in the dictionary
"""
if target_type:
return self.data['targets'].get(target_type, '')
return self.data['default_target']
def _get_target_vars(self):
"""
Returns
-----------
All the scons variable names built by scons rules
"""
return self.data['targets'].values()
def __generate_header_files(self):
for dkey in self.deps:
dep = self.target_database[dkey]
if dep._generate_header_files():
return True
return False
def _generate_header_files(self):
"""Whether this target generates header files during building."""
if self.__cached_generate_header_files is None:
self.__cached_generate_header_files = self.__generate_header_files()
return self.__cached_generate_header_files
def _write_rule(self, rule):
"""_write_rule.
Parameters
-----------
rule: the rule generated by certain target
Description
-----------
Append the rule to the buffer at first.
"""
self.scons_rule_buf.append('%s\n' % rule)
def scons_rules(self):
"""scons_rules.
This method should be implemented in subclass.
"""
console.error_exit('%s: should be subclassing' % self.type)
def get_rules(self):
"""get_rules.
Returns
-----------
The scons rules buffer
Description
-----------
Returns the buffer.
"""
return self.scons_rule_buf
def _convert_string_to_target_helper(self, target_string):
"""
Converting a string like thirdparty/gtest:gtest to tuple
(target_path, target_name)
"""
if target_string:
if target_string.startswith('#'):
return ('#', target_string[1:])
elif target_string.find(':') != -1:
path, name = target_string.split(':')
path = path.strip()
if path.startswith('//'):
path = path[2:]
return (path, name.strip())
console.error_exit('invalid target lib format: %s, '
'should be #lib_name or lib_path:lib_name' %
target_string)
class SystemLibrary(Target):
def __init__(self, name, blade):
name = name[1:]
Target.__init__(self, name, 'system_library', [], [], None, blade, {})
self.key = ('#', name)
self.fullname = '%s:%s' % self.key
self.path = '#'
|
|
# -*- coding: UTF-8 -*-
"""
Title: Dumpscript management command
Project: Hardytools (queryset-refactor version)
Author: Will Hardy (http://willhardy.com.au)
Date: June 2008
Usage: python manage.py dumpscript appname > scripts/scriptname.py
$Revision: 217 $
Description:
Generates a Python script that will repopulate the database using objects.
The advantage of this approach is that it is easy to understand, and more
flexible than directly populating the database, or using XML.
* It also allows for new defaults to take effect and only transfers what is
needed.
* If a new database schema has a NEW ATTRIBUTE, it is simply not
populated (using a default value will make the transition smooth :)
* If a new database schema REMOVES AN ATTRIBUTE, it is simply ignored
and the data moves across safely (I'm assuming we don't want this
attribute anymore.
* Problems may only occur if there is a new model and is now a required
ForeignKey for an existing model. But this is easy to fix by editing the
populate script. Half of the job is already done as all ForeingKey
lookups occur though the locate_object() function in the generated script.
Improvements:
See TODOs and FIXMEs scattered throughout :-)
"""
import sys
import datetime
import six
from optparse import make_option
import django
from django.db.models import AutoField, BooleanField, FileField, ForeignKey, DateField, DateTimeField
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
# conditional import, force_unicode was renamed in Django 1.5
from django.contrib.contenttypes.models import ContentType
try:
from django.utils.encoding import smart_unicode, force_unicode # NOQA
except ImportError:
from django.utils.encoding import smart_text as smart_unicode, force_text as force_unicode # NOQA
def orm_item_locator(orm_obj):
"""
This function is called every time an object that will not be exported is required.
Where orm_obj is the referred object.
We postpone the lookup to locate_object() which will be run on the generated script
"""
the_class = orm_obj._meta.object_name
original_class = the_class
pk_name = orm_obj._meta.pk.name
original_pk_name = pk_name
pk_value = getattr(orm_obj, pk_name)
while hasattr(pk_value, "_meta") and hasattr(pk_value._meta, "pk") and hasattr(pk_value._meta.pk, "name"):
the_class = pk_value._meta.object_name
pk_name = pk_value._meta.pk.name
pk_value = getattr(pk_value, pk_name)
clean_dict = make_clean_dict(orm_obj.__dict__)
for key in clean_dict:
v = clean_dict[key]
if v is not None and not isinstance(v, (six.string_types, six.integer_types, float, datetime.datetime)):
clean_dict[key] = six.u("%s" % v)
output = """ importer.locate_object(%s, "%s", %s, "%s", %s, %s ) """ % (
original_class, original_pk_name,
the_class, pk_name, pk_value, clean_dict
)
return output
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--autofield', action='store_false', dest='skip_autofield',
default=True, help='Include Autofields (like pk fields)'),
)
help = 'Dumps the data as a customised python script.'
args = '[appname ...]'
def handle(self, *app_labels, **options):
# Get the models we want to export
models = get_models(app_labels)
# A dictionary is created to keep track of all the processed objects,
# so that foreign key references can be made using python variable names.
# This variable "context" will be passed around like the town bicycle.
context = {}
# Create a dumpscript object and let it format itself as a string
script = Script(
models=models,
context=context,
stdout=self.stdout,
stderr=self.stderr,
options=options,
)
self.stdout.write(str(script))
self.stdout.write("\n")
def get_models(app_labels):
""" Gets a list of models for the given app labels, with some exceptions.
TODO: If a required model is referenced, it should also be included.
Or at least discovered with a get_or_create() call.
"""
from django.db.models import get_app, get_apps, get_model
from django.db.models import get_models as get_all_models
# These models are not to be output, e.g. because they can be generated automatically
# TODO: This should be "appname.modelname" string
EXCLUDED_MODELS = (ContentType, )
models = []
# If no app labels are given, return all
if not app_labels:
for app in get_apps():
models += [m for m in get_all_models(app) if m not in EXCLUDED_MODELS]
return models
# Get all relevant apps
for app_label in app_labels:
# If a specific model is mentioned, get only that model
if "." in app_label:
app_label, model_name = app_label.split(".", 1)
models.append(get_model(app_label, model_name))
# Get all models for a given app
else:
models += [m for m in get_all_models(get_app(app_label)) if m not in EXCLUDED_MODELS]
return models
class Code(object):
""" A snippet of python script.
This keeps track of import statements and can be output to a string.
In the future, other features such as custom indentation might be included
in this class.
"""
def __init__(self, indent=-1, stdout=None, stderr=None):
if not stdout:
stdout = sys.stdout
if not stderr:
stderr = sys.stderr
self.indent = indent
self.stdout = stdout
self.stderr = stderr
def __str__(self):
""" Returns a string representation of this script.
"""
if self.imports:
self.stderr.write(repr(self.import_lines))
return flatten_blocks([""] + self.import_lines + [""] + self.lines, num_indents=self.indent)
else:
return flatten_blocks(self.lines, num_indents=self.indent)
def get_import_lines(self):
""" Takes the stored imports and converts them to lines
"""
if self.imports:
return ["from %s import %s" % (value, key) for key, value in self.imports.items()]
else:
return []
import_lines = property(get_import_lines)
class ModelCode(Code):
" Produces a python script that can recreate data for a given model class. "
def __init__(self, model, context=None, stdout=None, stderr=None, options=None):
super(ModelCode, self).__init__(indent=0, stdout=stdout, stderr=stderr)
self.model = model
if context is None:
context = {}
self.context = context
self.options = options
self.instances = []
def get_imports(self):
""" Returns a dictionary of import statements, with the variable being
defined as the key.
"""
return {self.model.__name__: smart_unicode(self.model.__module__)}
imports = property(get_imports)
def get_lines(self):
""" Returns a list of lists or strings, representing the code body.
Each list is a block, each string is a statement.
"""
code = []
for counter, item in enumerate(self.model._default_manager.all()):
instance = InstanceCode(instance=item, id=counter + 1, context=self.context, stdout=self.stdout, stderr=self.stderr, options=self.options)
self.instances.append(instance)
if instance.waiting_list:
code += instance.lines
# After each instance has been processed, try again.
# This allows self referencing fields to work.
for instance in self.instances:
if instance.waiting_list:
code += instance.lines
return code
lines = property(get_lines)
class InstanceCode(Code):
" Produces a python script that can recreate data for a given model instance. "
def __init__(self, instance, id, context=None, stdout=None, stderr=None, options=None):
""" We need the instance in question and an id """
super(InstanceCode, self).__init__(indent=0, stdout=stdout, stderr=stderr)
self.imports = {}
self.options = options
self.instance = instance
self.model = self.instance.__class__
if context is None:
context = {}
self.context = context
self.variable_name = "%s_%s" % (self.instance._meta.db_table, id)
self.skip_me = None
self.instantiated = False
self.waiting_list = list(self.model._meta.fields)
self.many_to_many_waiting_list = {}
for field in self.model._meta.many_to_many:
self.many_to_many_waiting_list[field] = list(getattr(self.instance, field.name).all())
def get_lines(self, force=False):
""" Returns a list of lists or strings, representing the code body.
Each list is a block, each string is a statement.
force (True or False): if an attribute object cannot be included,
it is usually skipped to be processed later. With 'force' set, there
will be no waiting: a get_or_create() call is written instead.
"""
code_lines = []
# Don't return anything if this is an instance that should be skipped
if self.skip():
return []
# Initialise our new object
# e.g. model_name_35 = Model()
code_lines += self.instantiate()
# Add each field
# e.g. model_name_35.field_one = 1034.91
# model_name_35.field_two = "text"
code_lines += self.get_waiting_list()
if force:
# TODO: Check that M2M are not affected
code_lines += self.get_waiting_list(force=force)
# Print the save command for our new object
# e.g. model_name_35.save()
if code_lines:
code_lines.append("%s = importer.save_or_locate(%s)\n" % (self.variable_name, self.variable_name))
code_lines += self.get_many_to_many_lines(force=force)
return code_lines
lines = property(get_lines)
def skip(self):
""" Determine whether or not this object should be skipped.
If this model instance is a parent of a single subclassed
instance, skip it. The subclassed instance will create this
parent instance for us.
TODO: Allow the user to force its creation?
"""
if self.skip_me is not None:
return self.skip_me
def get_skip_version():
""" Return which version of the skip code should be run
Django's deletion code was refactored in r14507 which
was just two days before 1.3 alpha 1 (r14519)
"""
if not hasattr(self, '_SKIP_VERSION'):
version = django.VERSION
# no, it isn't lisp. I swear.
self._SKIP_VERSION = (
version[0] > 1 or ( # django 2k... someday :)
version[0] == 1 and ( # 1.x
version[1] >= 4 or # 1.4+
version[1] == 3 and not ( # 1.3.x
(version[3] == 'alpha' and version[1] == 0)
)
)
)
) and 2 or 1 # NOQA
return self._SKIP_VERSION
if get_skip_version() == 1:
try:
# Django trunk since r7722 uses CollectedObjects instead of dict
from django.db.models.query import CollectedObjects
sub_objects = CollectedObjects()
except ImportError:
# previous versions don't have CollectedObjects
sub_objects = {}
self.instance._collect_sub_objects(sub_objects)
sub_objects = sub_objects.keys()
elif get_skip_version() == 2:
from django.db.models.deletion import Collector
from django.db import router
cls = self.instance.__class__
using = router.db_for_write(cls, instance=self.instance)
collector = Collector(using=using)
collector.collect([self.instance], collect_related=False)
# collector stores its instances in two places. I *think* we
# only need collector.data, but using the batches is needed
# to perfectly emulate the old behaviour
# TODO: check if batches are really needed. If not, remove them.
sub_objects = sum([list(i) for i in collector.data.values()], [])
if hasattr(collector, 'batches'):
# Django 1.6 removed batches for being dead code
# https://github.com/django/django/commit/a170c3f755351beb35f8166ec3c7e9d524d9602
for batch in collector.batches.values():
# batch.values can be sets, which must be converted to lists
sub_objects += sum([list(i) for i in batch.values()], [])
sub_objects_parents = [so._meta.parents for so in sub_objects]
if [self.model in p for p in sub_objects_parents].count(True) == 1:
# since this instance isn't explicitly created, it's variable name
# can't be referenced in the script, so record None in context dict
pk_name = self.instance._meta.pk.name
key = '%s_%s' % (self.model.__name__, getattr(self.instance, pk_name))
self.context[key] = None
self.skip_me = True
else:
self.skip_me = False
return self.skip_me
def instantiate(self):
" Write lines for instantiation "
# e.g. model_name_35 = Model()
code_lines = []
if not self.instantiated:
code_lines.append("%s = %s()" % (self.variable_name, self.model.__name__))
self.instantiated = True
# Store our variable name for future foreign key references
pk_name = self.instance._meta.pk.name
key = '%s_%s' % (self.model.__name__, getattr(self.instance, pk_name))
self.context[key] = self.variable_name
return code_lines
def get_waiting_list(self, force=False):
" Add lines for any waiting fields that can be completed now. "
code_lines = []
skip_autofield = self.options.get('skip_autofield', True)
# Process normal fields
for field in list(self.waiting_list):
try:
# Find the value, add the line, remove from waiting list and move on
value = get_attribute_value(self.instance, field, self.context, force=force, skip_autofield=skip_autofield)
code_lines.append('%s.%s = %s' % (self.variable_name, field.name, value))
self.waiting_list.remove(field)
except SkipValue:
# Remove from the waiting list and move on
self.waiting_list.remove(field)
continue
except DoLater:
# Move on, maybe next time
continue
return code_lines
def get_many_to_many_lines(self, force=False):
""" Generates lines that define many to many relations for this instance. """
lines = []
for field, rel_items in self.many_to_many_waiting_list.items():
for rel_item in list(rel_items):
try:
pk_name = rel_item._meta.pk.name
key = '%s_%s' % (rel_item.__class__.__name__, getattr(rel_item, pk_name))
value = "%s" % self.context[key]
lines.append('%s.%s.add(%s)' % (self.variable_name, field.name, value))
self.many_to_many_waiting_list[field].remove(rel_item)
except KeyError:
if force:
item_locator = orm_item_locator(rel_item)
self.context["__extra_imports"][rel_item._meta.object_name] = rel_item.__module__
lines.append('%s.%s.add( %s )' % (self.variable_name, field.name, item_locator))
self.many_to_many_waiting_list[field].remove(rel_item)
if lines:
lines.append("")
return lines
class Script(Code):
" Produces a complete python script that can recreate data for the given apps. "
def __init__(self, models, context=None, stdout=None, stderr=None, options=None):
super(Script, self).__init__(stdout=stdout, stderr=stderr)
self.imports = {}
self.models = models
if context is None:
context = {}
self.context = context
self.context["__avaliable_models"] = set(models)
self.context["__extra_imports"] = {}
self.options = options
def _queue_models(self, models, context):
""" Works an an appropriate ordering for the models.
This isn't essential, but makes the script look nicer because
more instances can be defined on their first try.
"""
# Max number of cycles allowed before we call it an infinite loop.
MAX_CYCLES = 5
model_queue = []
number_remaining_models = len(models)
allowed_cycles = MAX_CYCLES
while number_remaining_models > 0:
previous_number_remaining_models = number_remaining_models
model = models.pop(0)
# If the model is ready to be processed, add it to the list
if check_dependencies(model, model_queue, context["__avaliable_models"]):
model_class = ModelCode(model=model, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options)
model_queue.append(model_class)
# Otherwise put the model back at the end of the list
else:
models.append(model)
# Check for infinite loops.
# This means there is a cyclic foreign key structure
# That cannot be resolved by re-ordering
number_remaining_models = len(models)
if number_remaining_models == previous_number_remaining_models:
allowed_cycles -= 1
if allowed_cycles <= 0:
# Add the remaining models, but do not remove them from the model list
missing_models = [ModelCode(model=m, context=context, stdout=self.stdout, stderr=self.stderr, options=self.options) for m in models]
model_queue += missing_models
# Replace the models with the model class objects
# (sure, this is a little bit of hackery)
models[:] = missing_models
break
else:
allowed_cycles = MAX_CYCLES
return model_queue
def get_lines(self):
""" Returns a list of lists or strings, representing the code body.
Each list is a block, each string is a statement.
"""
code = [self.FILE_HEADER.strip()]
# Queue and process the required models
for model_class in self._queue_models(self.models, context=self.context):
msg = 'Processing model: %s\n' % model_class.model.__name__
self.stderr.write(msg)
code.append(" # " + msg)
code.append(model_class.import_lines)
code.append("")
code.append(model_class.lines)
# Process left over foreign keys from cyclic models
for model in self.models:
msg = 'Re-processing model: %s\n' % model.model.__name__
self.stderr.write(msg)
code.append(" # " + msg)
for instance in model.instances:
if instance.waiting_list or instance.many_to_many_waiting_list:
code.append(instance.get_lines(force=True))
code.insert(1, " # Initial Imports")
code.insert(2, "")
for key, value in self.context["__extra_imports"].items():
code.insert(2, " from %s import %s" % (value, key))
return code
lines = property(get_lines)
# A user-friendly file header
FILE_HEADER = """
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file has been automatically generated.
# Instead of changing it, create a file called import_helper.py
# and put there a class called ImportHelper(object) in it.
#
# This class will be specially casted so that instead of extending object,
# it will actually extend the class BasicImportHelper()
#
# That means you just have to overload the methods you want to
# change, leaving the other ones inteact.
#
# Something that you might want to do is use transactions, for example.
#
# Also, don't forget to add the necessary Django imports.
#
# This file was generated with the following command:
# %s
#
# to restore it, run
# manage.py runscript module_name.this_script_name
#
# example: if manage.py is at ./manage.py
# and the script is at ./some_folder/some_script.py
# you must make sure ./some_folder/__init__.py exists
# and run ./manage.py runscript some_folder.some_script
from django.db import transaction
class BasicImportHelper(object):
def pre_import(self):
pass
# You probably want to uncomment on of these two lines
# @transaction.atomic # Django 1.6
# @transaction.commit_on_success # Django <1.6
def run_import(self, import_data):
import_data()
def post_import(self):
pass
def locate_similar(self, current_object, search_data):
# You will probably want to call this method from save_or_locate()
# Example:
# new_obj = self.locate_similar(the_obj, {"national_id": the_obj.national_id } )
the_obj = current_object.__class__.objects.get(**search_data)
return the_obj
def locate_object(self, original_class, original_pk_name, the_class, pk_name, pk_value, obj_content):
# You may change this function to do specific lookup for specific objects
#
# original_class class of the django orm's object that needs to be located
# original_pk_name the primary key of original_class
# the_class parent class of original_class which contains obj_content
# pk_name the primary key of original_class
# pk_value value of the primary_key
# obj_content content of the object which was not exported.
#
# You should use obj_content to locate the object on the target db
#
# An example where original_class and the_class are different is
# when original_class is Farmer and the_class is Person. The table
# may refer to a Farmer but you will actually need to locate Person
# in order to instantiate that Farmer
#
# Example:
# if the_class == SurveyResultFormat or the_class == SurveyType or the_class == SurveyState:
# pk_name="name"
# pk_value=obj_content[pk_name]
# if the_class == StaffGroup:
# pk_value=8
search_data = { pk_name: pk_value }
the_obj = the_class.objects.get(**search_data)
#print(the_obj)
return the_obj
def save_or_locate(self, the_obj):
# Change this if you want to locate the object in the database
try:
the_obj.save()
except:
print("---------------")
print("Error saving the following object:")
print(the_obj.__class__)
print(" ")
print(the_obj.__dict__)
print(" ")
print(the_obj)
print(" ")
print("---------------")
raise
return the_obj
importer = None
try:
import import_helper
# We need this so ImportHelper can extend BasicImportHelper, although import_helper.py
# has no knowlodge of this class
importer = type("DynamicImportHelper", (import_helper.ImportHelper, BasicImportHelper ) , {} )()
except ImportError as e:
if str(e) == "No module named import_helper":
importer = BasicImportHelper()
else:
raise
import datetime
from decimal import Decimal
from django.contrib.contenttypes.models import ContentType
try:
import dateutil.parser
except ImportError:
print("Please install python-dateutil")
sys.exit(os.EX_USAGE)
def run():
importer.pre_import()
importer.run_import(import_data)
importer.post_import()
def import_data():
""" % " ".join(sys.argv)
# HELPER FUNCTIONS
#-------------------------------------------------------------------------------
def flatten_blocks(lines, num_indents=-1):
""" Takes a list (block) or string (statement) and flattens it into a string
with indentation.
"""
# The standard indent is four spaces
INDENTATION = " " * 4
if not lines:
return ""
# If this is a string, add the indentation and finish here
if isinstance(lines, six.string_types):
return INDENTATION * num_indents + lines
# If this is not a string, join the lines and recurse
return "\n".join([flatten_blocks(line, num_indents + 1) for line in lines])
def get_attribute_value(item, field, context, force=False, skip_autofield=True):
""" Gets a string version of the given attribute's value, like repr() might. """
# Find the value of the field, catching any database issues
try:
value = getattr(item, field.name)
except ObjectDoesNotExist:
raise SkipValue('Could not find object for %s.%s, ignoring.\n' % (item.__class__.__name__, field.name))
# AutoField: We don't include the auto fields, they'll be automatically recreated
if skip_autofield and isinstance(field, AutoField):
raise SkipValue()
# Some databases (eg MySQL) might store boolean values as 0/1, this needs to be cast as a bool
elif isinstance(field, BooleanField) and value is not None:
return repr(bool(value))
# Post file-storage-refactor, repr() on File/ImageFields no longer returns the path
elif isinstance(field, FileField):
return repr(force_unicode(value))
# ForeignKey fields, link directly using our stored python variable name
elif isinstance(field, ForeignKey) and value is not None:
# Special case for contenttype foreign keys: no need to output any
# content types in this script, as they can be generated again
# automatically.
# NB: Not sure if "is" will always work
if field.rel.to is ContentType:
return 'ContentType.objects.get(app_label="%s", model="%s")' % (value.app_label, value.model)
# Generate an identifier (key) for this foreign object
pk_name = value._meta.pk.name
key = '%s_%s' % (value.__class__.__name__, getattr(value, pk_name))
if key in context:
variable_name = context[key]
# If the context value is set to None, this should be skipped.
# This identifies models that have been skipped (inheritance)
if variable_name is None:
raise SkipValue()
# Return the variable name listed in the context
return "%s" % variable_name
elif value.__class__ not in context["__avaliable_models"] or force:
context["__extra_imports"][value._meta.object_name] = value.__module__
item_locator = orm_item_locator(value)
return item_locator
else:
raise DoLater('(FK) %s.%s\n' % (item.__class__.__name__, field.name))
elif isinstance(field, (DateField, DateTimeField)) and value is not None:
return "dateutil.parser.parse(\"%s\")" % value.isoformat()
# A normal field (e.g. a python built-in)
else:
return repr(value)
def make_clean_dict(the_dict):
if "_state" in the_dict:
clean_dict = the_dict.copy()
del clean_dict["_state"]
return clean_dict
return the_dict
def check_dependencies(model, model_queue, avaliable_models):
" Check that all the depenedencies for this model are already in the queue. "
# A list of allowed links: existing fields, itself and the special case ContentType
allowed_links = [m.model.__name__ for m in model_queue] + [model.__name__, 'ContentType']
# For each ForeignKey or ManyToMany field, check that a link is possible
for field in model._meta.fields:
if field.rel and field.rel.to.__name__ not in allowed_links:
if field.rel.to not in avaliable_models:
continue
return False
for field in model._meta.many_to_many:
if field.rel and field.rel.to.__name__ not in allowed_links:
return False
return True
# EXCEPTIONS
#-------------------------------------------------------------------------------
class SkipValue(Exception):
""" Value could not be parsed or should simply be skipped. """
class DoLater(Exception):
""" Value could not be parsed or should simply be skipped. """
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for the PNASNet classification networks.
Paper: https://arxiv.org/abs/1712.00559
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
from tensorflow.contrib import training as contrib_training
from nets.nasnet import nasnet
from nets.nasnet import nasnet_utils
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
def large_imagenet_config():
"""Large ImageNet configuration based on PNASNet-5."""
return contrib_training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
num_conv_filters=216,
drop_path_keep_prob=0.6,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
def mobile_imagenet_config():
"""Mobile ImageNet configuration based on PNASNet-5."""
return contrib_training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=9,
filter_scaling_rate=2.0,
num_conv_filters=54,
drop_path_keep_prob=1.0,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
def pnasnet_large_arg_scope(weight_decay=4e-5, batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Default arg scope for the PNASNet Large ImageNet model."""
return nasnet.nasnet_large_arg_scope(
weight_decay, batch_norm_decay, batch_norm_epsilon)
def pnasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Default arg scope for the PNASNet Mobile ImageNet model."""
return nasnet.nasnet_mobile_arg_scope(weight_decay, batch_norm_decay,
batch_norm_epsilon)
def _build_pnasnet_base(images,
normal_cell,
num_classes,
hparams,
is_training,
final_endpoint=None):
"""Constructs a PNASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
# pylint: disable=protected-access
stem = lambda: nasnet._imagenet_stem(images, hparams, normal_cell)
# pylint: enable=protected-access
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net):
return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
for cell_num in range(hparams.num_cells):
is_reduction = cell_num in reduction_indices
stride = 2 if is_reduction else 1
if is_reduction: filter_scaling *= hparams.filter_scaling_rate
if hparams.skip_reduction_layer_input or not is_reduction:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = activation_fn(net)
# pylint: disable=protected-access
nasnet._build_aux_head(aux_net, end_points, num_classes, hparams,
scope='aux_{}'.format(cell_num))
# pylint: enable=protected-access
# Final softmax layer
with tf.compat.v1.variable_scope('final_layer'):
net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
def build_pnasnet_large(images,
num_classes,
is_training=True,
final_endpoint=None,
config=None):
"""Build PNASNet Large model for the ImageNet Dataset."""
hparams = copy.deepcopy(config) if config else large_imagenet_config()
# pylint: disable=protected-access
nasnet._update_hparams(hparams, is_training)
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.compat.v1.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the
# total number of cells is equal to the number normal cells plus the number
# of stem cells (two by default).
total_num_cells = hparams.num_cells + 2
normal_cell = PNasNetNormalCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d,
slim.batch_norm, slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_pnasnet_base(
images,
normal_cell=normal_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
final_endpoint=final_endpoint)
build_pnasnet_large.default_image_size = 331
def build_pnasnet_mobile(images,
num_classes,
is_training=True,
final_endpoint=None,
config=None):
"""Build PNASNet Mobile model for the ImageNet Dataset."""
hparams = copy.deepcopy(config) if config else mobile_imagenet_config()
# pylint: disable=protected-access
nasnet._update_hparams(hparams, is_training)
# pylint: enable=protected-access
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.compat.v1.logging.info(
'A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(a=images, perm=[0, 3, 1, 2])
# Calculate the total number of cells in the network.
# There is no distinction between reduction and normal cells in PNAS so the
# total number of cells is equal to the number normal cells plus the number
# of stem cells (two by default).
total_num_cells = hparams.num_cells + 2
normal_cell = PNasNetNormalCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_pnasnet_base(
images,
normal_cell=normal_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
final_endpoint=final_endpoint)
build_pnasnet_mobile.default_image_size = 224
class PNasNetNormalCell(nasnet_utils.NasNetABaseCell):
"""PNASNet Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
# Configuration for the PNASNet-5 model.
operations = [
'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3',
'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3',
'separable_3x3_2', 'none'
]
used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]
hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0]
super(PNasNetNormalCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps,
use_bounded_activation)
|
|
#!/usr/bin/env python
from __future__ import absolute_import
import datetime
import json
import os
import re
import shutil
import sqlite3
import warnings
from subprocess import Popen, PIPE
from textwrap import dedent
from unittest import TestCase, main
import scraperwiki
import six
import sys
# scraperwiki.sql._State.echo = True
DB_NAME = 'scraperwiki.sqlite'
class Setup(TestCase):
def test_setup(self):
try:
os.remove('scraperwiki.sqlite')
except OSError:
pass
# called TestAAAWarning so that it gets run first by nosetests,
# which we need, otherwise the warning has already happened.
class TestAAAWarning(TestCase):
def test_save_no_warn(self):
with warnings.catch_warnings():
warnings.simplefilter("error")
scraperwiki.sql.save(['id'], dict(id=4, tumble='weed'),
table_name="warning_test")
class TestSaveGetVar(TestCase):
def savegetvar(self, var):
scraperwiki.sql.save_var(u"weird\u1234", var)
self.assertEqual(scraperwiki.sql.get_var(u"weird\u1234"), var)
def test_string(self):
self.savegetvar(u"asdio\u1234")
def test_int(self):
self.savegetvar(1)
def test_float(self):
self.savegetvar(1.1)
def test_bool(self):
self.savegetvar(False)
def test_bool2(self):
self.savegetvar(True)
def test_bytes(self):
self.savegetvar(b"asodpa\x00\x22")
def test_date(self):
date1 = datetime.datetime.now()
date2 = datetime.date.today()
scraperwiki.sql.save_var(u"weird\u1234", date1)
self.assertEqual(scraperwiki.sql.get_var(u"weird\u1234"), six.text_type(date1))
scraperwiki.sql.save_var(u"weird\u1234", date2)
self.assertEqual(scraperwiki.sql.get_var(u"weird\u1234"), six.text_type(date2))
def test_save_multiple_values(self):
scraperwiki.sql.save_var(u'foo\xc3', u'hello')
scraperwiki.sql.save_var(u'bar', u'goodbye\u1234')
self.assertEqual(u'hello', scraperwiki.sql.get_var(u'foo\xc3'))
self.assertEqual(u'goodbye\u1234', scraperwiki.sql.get_var(u'bar'))
class TestGetNonexistantVar(TestCase):
def test_get(self):
self.assertIsNone(scraperwiki.sql.get_var(u'meatball\xff'))
class TestSaveVar(TestCase):
def setUp(self):
super(TestSaveVar, self).setUp()
scraperwiki.sql.save_var(u"birthday\xfe", u"\u1234November 30, 1888")
connection = sqlite3.connect(DB_NAME)
self.cursor = connection.cursor()
def test_insert(self):
self.cursor.execute(u"""
SELECT name, value_blob, type
FROM `swvariables`
WHERE name == "birthday\xfe"
""")
((colname, value, _type),) = self.cursor.fetchall()
expected = [(u"birthday\xfe", u"\u1234November 30, 1888", "text",)]
observed = [(colname, type(b'')(value).decode('utf-8'), _type)]
self.assertEqual(observed, expected)
class SaveAndCheck(TestCase):
def save_and_check(self, dataIn, tableIn, dataOut, tableOut=None, twice=True):
if tableOut == None:
tableOut = '[' + tableIn + ']'
# Insert
with scraperwiki.sql.Transaction():
scraperwiki.sql.save([], dataIn, tableIn)
# Observe with pysqlite
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
cursor.execute(u"SELECT * FROM %s" % tableOut)
observed1 = cursor.fetchall()
connection.close()
if twice:
# Observe using this module
observed2 = scraperwiki.sql.select(u'* FROM %s' % tableOut)
# Check
expected1 = dataOut
expected2 = [dataIn] if type(dataIn) == dict else dataIn
self.assertListEqual(observed1, expected1)
self.assertListEqual(observed2, expected2)
class SaveAndSelect(TestCase):
def save_and_select(self, d):
scraperwiki.sql.save([], {u"foo\xdd": d})
observed = scraperwiki.sql.select(u'* FROM swdata')[0][u'foo\xdd']
self.assertEqual(d, observed)
class TestUniqueKeys(SaveAndSelect):
def test_empty(self):
scraperwiki.sql.save([], {u"foo\xde": 3}, table_name=u"Chico\xcc")
observed = scraperwiki.sql.execute(u'PRAGMA index_list(Chico\xcc)')
self.assertEqual(observed, {u'data': [], u'keys': []})
def test_two(self):
scraperwiki.sql.save([u'foo\xdc', u'bar\xcd'], {u'foo\xdc': 3, u'bar\xcd': 9}, u'Harpo\xbb')
observed = scraperwiki.sql.execute(
u'PRAGMA index_info(Harpo_foo_bar_unique)')
# Indexness
self.assertIsNotNone(observed)
# Indexed columns
expected1 = {
u'keys': [u'seqno', u'cid', u'name'],
u'data': [
(0, 0, u'foo\xdc'),
(1, 1, u'bar\xcd'),
]
}
expected2 = {
u'keys': [u'seqno', u'cid', u'name'],
u'data': [
(0, 1, u'foo\xdc'),
(1, 0, u'bar\xcd'),
]
}
try:
self.assertDictEqual(observed, expected1)
except Exception:
self.assertDictEqual(observed, expected2)
# Uniqueness
indices = scraperwiki.sql.execute(u'PRAGMA index_list(Harpo\xbb)')
namecol = indices[u"keys"].index(u'name')
for index in indices[u"data"]:
if index[namecol] == u'Harpo_foo_bar_unique':
break
else:
index = {}
uniquecol = indices[u"keys"].index(u'unique')
self.assertEqual(index[uniquecol], 1)
class TestSaveColumn(TestCase):
def test_add_column(self):
# Indicative for
# https://github.com/scraperwiki/scraperwiki-python/issues/64
# The bug is that in the first .save() of a process, a
# new column cannot be added if the table already exists.
# Because it's only the first .save() of a process, we
# need to run a subprocess.
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
cursor.execute(u'CREATE TABLE frigled\xaa (a TEXT);')
cursor.execute(u'INSERT INTO frigled\xaa VALUES ("boo\xaa")')
connection.close()
script = dedent(u"""
import scraperwiki
scraperwiki.sql.save(['id'], dict(id=1, a=u"bar\xaa", b=u"foo\xaa"))
""")
with open("/dev/null") as null:
process = Popen([sys.executable, "-c", script],
stdout=PIPE, stderr=PIPE, stdin=null)
stdout, stderr = process.communicate()
assert process.returncode == 0
self.assertEqual(stdout, "".encode('utf-8'))
self.assertEqual(stderr, "".encode('utf-8'))
class TestSave(SaveAndCheck):
def test_save_int(self):
self.save_and_check(
{u"model-number\xaa": 293}, u"model-numbers\xaa", [(293,)]
)
def test_save_string(self):
self.save_and_check(
{u"lastname\xaa": u"LeTourneau\u1234"}, u"diesel-engineers\xaa", [
(u'LeTourneau\u1234',)]
)
# Ensure we can round-trip a string and then json encode it.
# https://github.com/scraperwiki/scraperwiki-python/pull/85
scraperwiki.sql.save([], {"test": "teststring"}, table_name="teststring")
data = scraperwiki.sql.select("* FROM teststring")
json.dumps(data)
def test_save_twice(self):
self.save_and_check(
{u"modelNumber\xaa": 293}, u"modelNumbers", [(293,)]
)
self.save_and_check(
{u"modelNumber\xaa": 293}, u"modelNumbers\xaa", [(293,), (293,)], twice=False
)
def test_save_true(self):
self.save_and_check(
{u"a": True}, u"true", [(1,)]
)
def test_save_false(self):
self.save_and_check(
{u"a": False}, u"false", [(0,)]
)
def test_save_table_name(self):
"""
Test that after we use table_name= in one .save() a
subsequent .save without table_name= uses the `swdata`
table again.
"""
scraperwiki.sql.save(['id'], dict(id=1, stuff=1),
table_name=u'sticky\u1234')
scraperwiki.sql.save(['id'], dict(id=2, stuff=2))
results = scraperwiki.sql.select(u'* FROM sticky\u1234')
self.assertEqual(1, len(results))
(row, ) = results
self.assertDictEqual(dict(id=1, stuff=1), row)
def test_lxml_string(self):
"""Save lxml string."""
import lxml.html
# See https://github.com/scraperwiki/scraperwiki-python/issues/65
# Careful, this looks like a string (eg, when printed or
# repr()d), but is actually an instance of some class
# internal to lxml.
s = lxml.html.fromstring(b'<b>HelloӒ/b>').xpath(b'//b')[0].text_content()
self.save_and_check(
{"text": s},
"lxml",
[(six.text_type(s),)]
)
def test_save_and_drop(self):
scraperwiki.sql.save([], dict(foo=7), table_name=u"dropper\xaa")
scraperwiki.sql.execute(u"DROP TABLE dropper\xaa")
scraperwiki.sql.save([], dict(foo=9), table_name=u"dropper\xaa")
class TestQuestionMark(TestCase):
def test_one_question_mark_with_nonlist(self):
scraperwiki.sql.execute(u'CREATE TABLE zhuozi\xaa (\xaa TEXT);')
scraperwiki.sql.execute(u'INSERT INTO zhuozi\xaa VALUES (?)', u'apple\xff')
observed = scraperwiki.sql.select(u'* FROM zhuozi\xaa')
self.assertListEqual(observed, [{u'\xaa': u'apple\xff'}])
scraperwiki.sql.execute(u'DROP TABLE zhuozi\xaa')
def test_one_question_mark_with_list(self):
scraperwiki.sql.execute(u'CREATE TABLE zhuozi\xaa (\xaa TEXT);')
scraperwiki.sql.execute(u'INSERT INTO zhuozi\xaa VALUES (?)', [u'apple\xff'])
observed = scraperwiki.sql.select(u'* FROM zhuozi\xaa')
self.assertListEqual(observed, [{u'\xaa': u'apple\xff'}])
scraperwiki.sql.execute(u'DROP TABLE zhuozi\xaa')
def test_multiple_question_marks(self):
scraperwiki.sql.execute('CREATE TABLE zhuozi (a TEXT, b TEXT);')
scraperwiki.sql.execute(
'INSERT INTO zhuozi VALUES (?, ?)', ['apple', 'banana'])
observed = scraperwiki.sql.select('* FROM zhuozi')
self.assertListEqual(observed, [{'a': 'apple', 'b': 'banana'}])
scraperwiki.sql.execute('DROP TABLE zhuozi')
class TestDateTime(TestCase):
def rawdate(self, table="swdata", column="datetime"):
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
cursor.execute(u"SELECT {} FROM {}".format(column, table))
rawdate = cursor.fetchall()[0][0]
connection.close()
return rawdate
def test_save_date(self):
d = datetime.datetime.strptime('1991-03-30', '%Y-%m-%d').date()
with scraperwiki.sql.Transaction():
scraperwiki.sql.save([], {u"birthday\xaa": d})
self.assertEqual(
[{u'birthday\xaa': str(d)}],
scraperwiki.sql.select("* FROM swdata"))
self.assertEqual(
{u'keys': [u'birthday\xaa'], u'data': [(six.text_type(d),)]},
scraperwiki.sql.execute("SELECT * FROM swdata"))
self.assertEqual(str(d), self.rawdate(column=u"birthday\xaa"))
def test_save_datetime(self):
d = datetime.datetime.strptime('1990-03-30', '%Y-%m-%d')
with scraperwiki.sql.Transaction():
scraperwiki.sql.save([], {"birthday": d},
table_name="datetimetest")
exemplar = six.text_type(d)
# SQLAlchemy appears to convert with extended precision.
exemplar += ".000000"
self.assertEqual(
[{u'birthday': exemplar}],
scraperwiki.sql.select("* FROM datetimetest"))
self.assertDictEqual(
{u'keys': [u'birthday'], u'data': [(exemplar,)]},
scraperwiki.sql.execute("SELECT * FROM datetimetest"))
self.assertEqual(exemplar, self.rawdate(table="datetimetest", column="birthday"))
class TestStatus(TestCase):
'Test that the status endpoint works.'
def test_does_nothing_if_called_outside_box(self):
scraperwiki.status('ok')
def test_raises_exception_with_invalid_type_field(self):
self.assertRaises(AssertionError, scraperwiki.status, 'hello')
# XXX neeed some mocking tests for case of run inside a box
class TestImports(TestCase):
'Test that all module contents are imported.'
def setUp(self):
self.sw = __import__('scraperwiki')
def test_import_scraperwiki_root(self):
self.sw.scrape
def test_import_scraperwiki_sqlite(self):
self.sw.sqlite
def test_import_scraperwiki_sql(self):
self.sw.sql
def test_import_scraperwiki_status(self):
self.sw.status
def test_import_scraperwiki_utils(self):
self.sw.utils
def test_import_scraperwiki_special_utils(self):
self.sw.pdftoxml
if __name__ == '__main__':
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
General image database
An image database creates a list of relative image path called image_set_index and
transform index to absolute image path. As to training, it is necessary that ground
truth and proposals are mixed together for training.
roidb
basic format [image_index]
['image', 'height', 'width', 'flipped',
'boxes', 'gt_classes', 'gt_overlaps', 'max_classes', 'max_overlaps', 'bbox_targets']
"""
from ..logger import logger
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
from ..processing.bbox_transform import bbox_overlaps
class IMDB(object):
def __init__(self, name, image_set, root_path, dataset_path):
"""
basic information about an image database
:param name: name of image database will be used for any output
:param root_path: root path store cache and proposal data
:param dataset_path: dataset path store images and image lists
"""
self.name = name + '_' + image_set
self.image_set = image_set
self.root_path = root_path
self.data_path = dataset_path
# abstract attributes
self.classes = []
self.num_classes = 0
self.image_set_index = []
self.num_images = 0
self.config = {}
def image_path_from_index(self, index):
raise NotImplementedError
def gt_roidb(self):
raise NotImplementedError
def evaluate_detections(self, detections):
raise NotImplementedError
@property
def cache_path(self):
"""
make a directory to store all caches
:return: cache path
"""
cache_path = os.path.join(self.root_path, 'cache')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return cache_path
def image_path_at(self, index):
"""
access image at index in image database
:param index: image index in image database
:return: image path
"""
return self.image_path_from_index(self.image_set_index[index])
def load_rpn_data(self, full=False):
if full:
rpn_file = os.path.join(self.root_path, 'rpn_data', self.name + '_full_rpn.pkl')
else:
rpn_file = os.path.join(self.root_path, 'rpn_data', self.name + '_rpn.pkl')
assert os.path.exists(rpn_file), '%s rpn data not found at %s' % (self.name, rpn_file)
logger.info('%s loading rpn data from %s' % (self.name, rpn_file))
with open(rpn_file, 'rb') as f:
box_list = pickle.load(f)
return box_list
def load_rpn_roidb(self, gt_roidb):
"""
turn rpn detection boxes into roidb
:param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
:return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
box_list = self.load_rpn_data()
return self.create_roidb_from_box_list(box_list, gt_roidb)
def rpn_roidb(self, gt_roidb, append_gt=False):
"""
get rpn roidb and ground truth roidb
:param gt_roidb: ground truth roidb
:param append_gt: append ground truth
:return: roidb of rpn
"""
if append_gt:
logger.info('%s appending ground truth annotations' % self.name)
rpn_roidb = self.load_rpn_roidb(gt_roidb)
roidb = IMDB.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self.load_rpn_roidb(gt_roidb)
return roidb
def create_roidb_from_box_list(self, box_list, gt_roidb):
"""
given ground truth, prepare roidb
:param box_list: [image_index] ndarray of [box_index][x1, x2, y1, y2]
:param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
:return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
assert len(box_list) == self.num_images, 'number of boxes matrix must match number of images'
roidb = []
for i in range(self.num_images):
roi_rec = dict()
roi_rec['image'] = gt_roidb[i]['image']
roi_rec['height'] = gt_roidb[i]['height']
roi_rec['width'] = gt_roidb[i]['width']
boxes = box_list[i]
if boxes.shape[1] == 5:
boxes = boxes[:, :4]
num_boxes = boxes.shape[0]
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:
gt_boxes = gt_roidb[i]['boxes']
gt_classes = gt_roidb[i]['gt_classes']
# n boxes and k gt_boxes => n * k overlap
gt_overlaps = bbox_overlaps(boxes.astype(np.float), gt_boxes.astype(np.float))
# for each box in n boxes, select only maximum overlap (must be greater than zero)
argmaxes = gt_overlaps.argmax(axis=1)
maxes = gt_overlaps.max(axis=1)
I = np.where(maxes > 0)[0]
overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
roi_rec.update({'boxes': boxes,
'gt_classes': np.zeros((num_boxes,), dtype=np.int32),
'gt_overlaps': overlaps,
'max_classes': overlaps.argmax(axis=1),
'max_overlaps': overlaps.max(axis=1),
'flipped': False})
# background roi => background class
zero_indexes = np.where(roi_rec['max_overlaps'] == 0)[0]
assert all(roi_rec['max_classes'][zero_indexes] == 0)
# foreground roi => foreground class
nonzero_indexes = np.where(roi_rec['max_overlaps'] > 0)[0]
assert all(roi_rec['max_classes'][nonzero_indexes] != 0)
roidb.append(roi_rec)
return roidb
def append_flipped_images(self, roidb):
"""
append flipped images to an roidb
flip boxes coordinates, images will be actually flipped when loading into network
:param roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
:return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
logger.info('%s append flipped images to roidb' % self.name)
assert self.num_images == len(roidb)
for i in range(self.num_images):
roi_rec = roidb[i]
boxes = roi_rec['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = roi_rec['width'] - oldx2 - 1
boxes[:, 2] = roi_rec['width'] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'image': roi_rec['image'],
'height': roi_rec['height'],
'width': roi_rec['width'],
'boxes': boxes,
'gt_classes': roidb[i]['gt_classes'],
'gt_overlaps': roidb[i]['gt_overlaps'],
'max_classes': roidb[i]['max_classes'],
'max_overlaps': roidb[i]['max_overlaps'],
'flipped': True}
roidb.append(entry)
self.image_set_index *= 2
return roidb
def evaluate_recall(self, roidb, candidate_boxes=None, thresholds=None):
"""
evaluate detection proposal recall metrics
record max overlap value for each gt box; return vector of overlap values
:param roidb: used to evaluate
:param candidate_boxes: if not given, use roidb's non-gt boxes
:param thresholds: array-like recall threshold
:return: None
ar: average recall, recalls: vector recalls at each IoU overlap threshold
thresholds: vector of IoU overlap threshold, gt_overlaps: vector of all ground-truth overlaps
"""
area_names = ['all', '0-25', '25-50', '50-100',
'100-200', '200-300', '300-inf']
area_ranges = [[0**2, 1e5**2], [0**2, 25**2], [25**2, 50**2], [50**2, 100**2],
[100**2, 200**2], [200**2, 300**2], [300**2, 1e5**2]]
area_counts = []
for area_name, area_range in zip(area_names[1:], area_ranges[1:]):
area_count = 0
for i in range(self.num_images):
if candidate_boxes is None:
# default is use the non-gt boxes from roidb
non_gt_inds = np.where(roidb[i]['gt_classes'] == 0)[0]
boxes = roidb[i]['boxes'][non_gt_inds, :]
else:
boxes = candidate_boxes[i]
boxes_areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
valid_range_inds = np.where((boxes_areas >= area_range[0]) & (boxes_areas < area_range[1]))[0]
area_count += len(valid_range_inds)
area_counts.append(area_count)
total_counts = float(sum(area_counts))
for area_name, area_count in zip(area_names[1:], area_counts):
logger.info('percentage of %s is %f' % (area_name, area_count / total_counts))
logger.info('average number of proposal is %f' % (total_counts / self.num_images))
for area_name, area_range in zip(area_names, area_ranges):
gt_overlaps = np.zeros(0)
num_pos = 0
for i in range(self.num_images):
# check for max_overlaps == 1 avoids including crowd annotations
max_gt_overlaps = roidb[i]['gt_overlaps'].max(axis=1)
gt_inds = np.where((roidb[i]['gt_classes'] > 0) & (max_gt_overlaps == 1))[0]
gt_boxes = roidb[i]['boxes'][gt_inds, :]
gt_areas = (gt_boxes[:, 2] - gt_boxes[:, 0] + 1) * (gt_boxes[:, 3] - gt_boxes[:, 1] + 1)
valid_gt_inds = np.where((gt_areas >= area_range[0]) & (gt_areas < area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
if candidate_boxes is None:
# default is use the non-gt boxes from roidb
non_gt_inds = np.where(roidb[i]['gt_classes'] == 0)[0]
boxes = roidb[i]['boxes'][non_gt_inds, :]
else:
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
overlaps = bbox_overlaps(boxes.astype(np.float), gt_boxes.astype(np.float))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
# choose whatever is smaller to iterate
rounds = min(boxes.shape[0], gt_boxes.shape[0])
for j in range(rounds):
# find which proposal maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# get the IoU amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is covered by most IoU
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert (gt_ovr >= 0), '%s\n%s\n%s' % (boxes, gt_boxes, overlaps)
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the IoU coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert (_gt_overlaps[j] == gt_ovr)
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded IoU coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each IoU threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = recalls.mean()
# print results
print('average recall for {}: {:.3f}'.format(area_name, ar))
for threshold, recall in zip(thresholds, recalls):
print('recall @{:.2f}: {:.3f}'.format(threshold, recall))
@staticmethod
def merge_roidbs(a, b):
"""
merge roidbs into one
:param a: roidb to be merged into
:param b: roidb to be merged
:return: merged imdb
"""
assert len(a) == len(b)
for i in range(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'], b[i]['gt_classes']))
a[i]['gt_overlaps'] = np.vstack((a[i]['gt_overlaps'], b[i]['gt_overlaps']))
a[i]['max_classes'] = np.hstack((a[i]['max_classes'], b[i]['max_classes']))
a[i]['max_overlaps'] = np.hstack((a[i]['max_overlaps'], b[i]['max_overlaps']))
return a
|
|
"""Tests for the Hyperion integration."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import AsyncMock, Mock, call, patch
from hyperion import const
from homeassistant.components.hyperion import (
get_hyperion_device_id,
light as hyperion_light,
)
from homeassistant.components.hyperion.const import (
CONF_EFFECT_HIDE_LIST,
DEFAULT_ORIGIN,
DOMAIN,
HYPERION_MANUFACTURER_NAME,
HYPERION_MODEL_NAME,
TYPE_HYPERION_PRIORITY_LIGHT,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_HS_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.config_entries import (
RELOAD_AFTER_UPDATE_DELAY,
SOURCE_REAUTH,
ConfigEntry,
ConfigEntryState,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_PORT,
CONF_SOURCE,
CONF_TOKEN,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.util import dt
import homeassistant.util.color as color_util
from . import (
TEST_AUTH_NOT_REQUIRED_RESP,
TEST_AUTH_REQUIRED_RESP,
TEST_CONFIG_ENTRY_ID,
TEST_ENTITY_ID_1,
TEST_ENTITY_ID_2,
TEST_ENTITY_ID_3,
TEST_HOST,
TEST_ID,
TEST_INSTANCE,
TEST_INSTANCE_1,
TEST_INSTANCE_2,
TEST_INSTANCE_3,
TEST_PORT,
TEST_PRIORITY,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
TEST_SYSINFO_ID,
add_test_config_entry,
call_registered_callback,
create_mock_client,
register_test_entity,
setup_test_config_entry,
)
from tests.common import async_fire_time_changed
COLOR_BLACK = color_util.COLORS["black"]
def _get_config_entry_from_unique_id(
hass: HomeAssistant, unique_id: str
) -> ConfigEntry | None:
for entry in hass.config_entries.async_entries(domain=DOMAIN):
if TEST_SYSINFO_ID == entry.unique_id:
return entry
return None
async def test_setup_config_entry(hass: HomeAssistant) -> None:
"""Test setting up the component via config entries."""
await setup_test_config_entry(hass, hyperion_client=create_mock_client())
assert hass.states.get(TEST_ENTITY_ID_1) is not None
async def test_setup_config_entry_not_ready_connect_fail(
hass: HomeAssistant,
) -> None:
"""Test the component not being ready."""
client = create_mock_client()
client.async_client_connect = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_ENTITY_ID_1) is None
async def test_setup_config_entry_not_ready_switch_instance_fail(
hass: HomeAssistant,
) -> None:
"""Test the component not being ready."""
client = create_mock_client()
client.async_client_switch_instance = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
assert client.async_client_disconnect.called
assert hass.states.get(TEST_ENTITY_ID_1) is None
async def test_setup_config_entry_not_ready_load_state_fail(
hass: HomeAssistant,
) -> None:
"""Test the component not being ready."""
client = create_mock_client()
client.async_get_serverinfo = AsyncMock(
return_value={
"command": "serverinfo",
"success": False,
}
)
await setup_test_config_entry(hass, hyperion_client=client)
assert client.async_client_disconnect.called
assert hass.states.get(TEST_ENTITY_ID_1) is None
async def test_setup_config_entry_dynamic_instances(hass: HomeAssistant) -> None:
"""Test dynamic changes in the instance configuration."""
registry = er.async_get(hass)
config_entry = add_test_config_entry(hass)
master_client = create_mock_client()
master_client.instances = [TEST_INSTANCE_1, TEST_INSTANCE_2]
entity_client = create_mock_client()
entity_client.instances = master_client.instances
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
side_effect=[master_client, entity_client, entity_client],
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert master_client.set_callbacks.called
# == Inject a new instances update (stop instance 1, add instance 3)
instance_callback = master_client.set_callbacks.call_args[0][0][
f"{const.KEY_INSTANCE}-{const.KEY_UPDATE}"
]
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [
{**TEST_INSTANCE_1, "running": False},
TEST_INSTANCE_2,
TEST_INSTANCE_3,
],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
# Instance 1 is stopped, it should still be registered.
assert registry.async_is_registered(TEST_ENTITY_ID_1)
# == Inject a new instances update (remove instance 1)
assert master_client.set_callbacks.called
instance_callback = master_client.set_callbacks.call_args[0][0][
f"{const.KEY_INSTANCE}-{const.KEY_UPDATE}"
]
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [TEST_INSTANCE_2, TEST_INSTANCE_3],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
# Instance 1 is removed, it should not still be registered.
assert not registry.async_is_registered(TEST_ENTITY_ID_1)
# == Inject a new instances update (re-add instance 1, but not running)
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [
{**TEST_INSTANCE_1, "running": False},
TEST_INSTANCE_2,
TEST_INSTANCE_3,
],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
# == Inject a new instances update (re-add instance 1, running)
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=entity_client,
):
await instance_callback(
{
const.KEY_SUCCESS: True,
const.KEY_DATA: [TEST_INSTANCE_1, TEST_INSTANCE_2, TEST_INSTANCE_3],
}
)
await hass.async_block_till_done()
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert hass.states.get(TEST_ENTITY_ID_2) is not None
assert hass.states.get(TEST_ENTITY_ID_3) is not None
async def test_light_basic_properties(hass: HomeAssistant) -> None:
"""Test the basic properties."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["brightness"] == 255
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
# By default the effect list is the 3 external sources + 'Solid'.
assert len(entity_state.attributes["effect_list"]) == 4
assert (
entity_state.attributes["supported_features"] == hyperion_light.SUPPORT_HYPERION
)
async def test_light_async_turn_on(hass: HomeAssistant) -> None:
"""Test turning the light on."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
# On (=), 100% (=), solid (=), [255,255,255] (=)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID_1}, blocking=True
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: [255, 255, 255],
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# On (=), 50% (!), solid (=), [255,255,255] (=)
# ===
brightness = 128
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_set_adjustment = AsyncMock(return_value=True)
client.adjustment = [{const.KEY_ID: TEST_ID}]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
assert client.async_send_set_adjustment.call_args == call(
**{const.KEY_ADJUSTMENT: {const.KEY_BRIGHTNESS: 50, const.KEY_ID: TEST_ID}}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: [255, 255, 255],
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# Simulate a false return of async_send_set_adjustment
client.async_send_set_adjustment = AsyncMock(return_value=False)
client.adjustment = [{const.KEY_ID: TEST_ID}]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
# Simulate a state callback from Hyperion.
client.adjustment = [{const.KEY_BRIGHTNESS: 50}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["brightness"] == brightness
# On (=), 50% (=), solid (=), [0,255,255] (!)
hs_color = (180.0, 100.0)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_HS_COLOR: hs_color},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 255, 255),
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# Simulate a state callback from Hyperion.
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 255, 255)},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["hs_color"] == hs_color
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
# On (=), 100% (!), solid, [0,255,255] (=)
brightness = 255
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_set_adjustment = AsyncMock(return_value=True)
client.adjustment = [{const.KEY_ID: TEST_ID}]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
assert client.async_send_set_adjustment.call_args == call(
**{const.KEY_ADJUSTMENT: {const.KEY_BRIGHTNESS: 100, const.KEY_ID: TEST_ID}}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 255, 255),
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.adjustment = [{const.KEY_BRIGHTNESS: 100}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["brightness"] == brightness
# On (=), 100% (=), "USB Capture (!), [0,255,255] (=)
component = "V4L"
effect = const.KEY_COMPONENTID_TO_NAME[component]
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_component = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: effect},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{const.KEY_PRIORITY: TEST_PRIORITY}
)
assert client.async_send_set_component.call_args_list == [
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[0],
const.KEY_STATE: False,
}
}
),
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[1],
const.KEY_STATE: False,
}
}
),
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[2],
const.KEY_STATE: True,
}
}
),
]
client.visible_priority = {const.KEY_COMPONENTID: component}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_EXTERNAL_SOURCE
assert entity_state.attributes["effect"] == effect
# On (=), 100% (=), "Warm Blobs" (!), [0,255,255] (=)
effect = "Warm Blobs"
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: effect},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{const.KEY_PRIORITY: TEST_PRIORITY}
)
assert client.async_send_set_effect.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_EFFECT: {const.KEY_NAME: effect},
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_EFFECT,
const.KEY_OWNER: effect,
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_EFFECT
assert entity_state.attributes["effect"] == effect
# On (=), 100% (=), [0,0,255] (!)
# Ensure changing the color will move the effect to 'Solid' automatically.
hs_color = (240.0, 100.0)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_HS_COLOR: hs_color},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 0, 255),
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
# Simulate a state callback from Hyperion.
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 0, 255)},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["hs_color"] == hs_color
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
# No calls if disconnected.
client.has_loaded_state = False
call_registered_callback(client, "client-update", {"loaded-state": False})
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID_1}, blocking=True
)
assert not client.async_send_clear.called
assert not client.async_send_set_effect.called
async def test_light_async_turn_on_fail_async_send_set_component(
hass: HomeAssistant,
) -> None:
"""Test set_component failure when turning the light on."""
client = create_mock_client()
client.async_send_set_component = AsyncMock(return_value=False)
client.is_on = Mock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID_1}, blocking=True
)
assert client.method_calls[-1] == call.async_send_set_component(
componentstate={"component": "ALL", "state": True}
)
async def test_light_async_turn_on_fail_async_send_set_component_source(
hass: HomeAssistant,
) -> None:
"""Test async_send_set_component failure when selecting the source."""
client = create_mock_client()
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_component = AsyncMock(return_value=False)
client.is_on = Mock(return_value=True)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: TEST_ENTITY_ID_1,
ATTR_EFFECT: const.KEY_COMPONENTID_TO_NAME["V4L"],
},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_set_component(
componentstate={"component": "BOBLIGHTSERVER", "state": False}
)
async def test_light_async_turn_on_fail_async_send_clear_source(
hass: HomeAssistant,
) -> None:
"""Test async_send_clear failure when turning the light on."""
client = create_mock_client()
client.is_on = Mock(return_value=True)
client.async_send_clear = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: TEST_ENTITY_ID_1,
ATTR_EFFECT: const.KEY_COMPONENTID_TO_NAME["V4L"],
},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_clear(priority=180)
async def test_light_async_turn_on_fail_async_send_clear_effect(
hass: HomeAssistant,
) -> None:
"""Test async_send_clear failure when turning on an effect."""
client = create_mock_client()
client.is_on = Mock(return_value=True)
client.async_send_clear = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: "Warm Mood Blobs"},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_clear(priority=180)
async def test_light_async_turn_on_fail_async_send_set_effect(
hass: HomeAssistant,
) -> None:
"""Test async_send_set_effect failure when turning on the light."""
client = create_mock_client()
client.is_on = Mock(return_value=True)
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: "Warm Mood Blobs"},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_set_effect(
priority=180, effect={"name": "Warm Mood Blobs"}, origin="Home Assistant"
)
async def test_light_async_turn_on_fail_async_send_set_color(
hass: HomeAssistant,
) -> None:
"""Test async_send_set_color failure when turning on the light."""
client = create_mock_client()
client.is_on = Mock(return_value=True)
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_color = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_HS_COLOR: (240.0, 100.0)},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_set_color(
priority=180, color=(0, 0, 255), origin="Home Assistant"
)
async def test_light_async_turn_off_fail_async_send_set_component(
hass: HomeAssistant,
) -> None:
"""Test async_send_set_component failure when turning off the light."""
client = create_mock_client()
client.async_send_set_component = AsyncMock(return_value=False)
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_set_component(
componentstate={"component": "LEDDEVICE", "state": False}
)
async def test_priority_light_async_turn_off_fail_async_send_clear(
hass: HomeAssistant,
) -> None:
"""Test async_send_clear failure when turning off a priority light."""
client = create_mock_client()
client.async_send_clear = AsyncMock(return_value=False)
with patch(
"homeassistant.components.hyperion.light.HyperionPriorityLight.entity_registry_enabled_default"
) as enabled_by_default_mock:
enabled_by_default_mock.return_value = True
await setup_test_config_entry(hass, hyperion_client=client)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1},
blocking=True,
)
assert client.method_calls[-1] == call.async_send_clear(priority=180)
async def test_light_async_turn_off(hass: HomeAssistant) -> None:
"""Test turning the light off."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
client.async_send_set_component = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1},
blocking=True,
)
assert client.async_send_set_component.call_args == call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_LEDDEVICE,
const.KEY_STATE: False,
}
}
)
call_registered_callback(client, "components-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
# No calls if no state loaded.
client.has_loaded_state = False
client.async_send_set_component = AsyncMock(return_value=True)
call_registered_callback(client, "client-update", {"loaded-state": False})
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1},
blocking=True,
)
assert not client.async_send_set_component.called
async def test_light_async_updates_from_hyperion_client(
hass: HomeAssistant,
) -> None:
"""Test receiving a variety of Hyperion client callbacks."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
# Bright change gets accepted.
brightness = 10
client.adjustment = [{const.KEY_BRIGHTNESS: brightness}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["brightness"] == round(255 * (brightness / 100.0))
# Broken brightness value is ignored.
bad_brightness = -200
client.adjustment = [{const.KEY_BRIGHTNESS: bad_brightness}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["brightness"] == round(255 * (brightness / 100.0))
# Update components.
client.is_on.return_value = True
call_registered_callback(client, "components-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
client.is_on.return_value = False
call_registered_callback(client, "components-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# Update priorities (V4L)
client.is_on.return_value = True
client.visible_priority = {const.KEY_COMPONENTID: const.KEY_COMPONENTID_V4L}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["icon"] == hyperion_light.ICON_EXTERNAL_SOURCE
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
assert (
entity_state.attributes["effect"]
== const.KEY_COMPONENTID_TO_NAME[const.KEY_COMPONENTID_V4L]
)
# Update priorities (Effect)
effect = "foo"
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_EFFECT,
const.KEY_OWNER: effect,
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect"] == effect
assert entity_state.attributes["icon"] == hyperion_light.ICON_EFFECT
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
# Update priorities (Color)
rgb = (0, 100, 100)
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: rgb},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["hs_color"] == (180.0, 100.0)
# Update priorities (None)
client.visible_priority = None
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# Update effect list
effects = [{const.KEY_NAME: "One"}, {const.KEY_NAME: "Two"}]
client.effects = effects
call_registered_callback(client, "effects-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect_list"] == [
hyperion_light.KEY_EFFECT_SOLID
] + [
const.KEY_COMPONENTID_TO_NAME[component]
for component in const.KEY_COMPONENTID_EXTERNAL_SOURCES
] + [
effect[const.KEY_NAME] for effect in effects
]
# Update connection status (e.g. disconnection).
# Turn on late, check state, disconnect, ensure it cannot be turned off.
client.has_loaded_state = False
call_registered_callback(client, "client-update", {"loaded-state": False})
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "unavailable"
# Update connection status (e.g. re-connection)
client.has_loaded_state = True
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: rgb},
}
call_registered_callback(client, "client-update", {"loaded-state": True})
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
async def test_full_state_loaded_on_start(hass: HomeAssistant) -> None:
"""Test receiving a variety of Hyperion client callbacks."""
client = create_mock_client()
# Update full state (should call all update methods).
brightness = 25
client.adjustment = [{const.KEY_BRIGHTNESS: brightness}]
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 100, 100)},
}
client.effects = [{const.KEY_NAME: "One"}, {const.KEY_NAME: "Two"}]
await setup_test_config_entry(hass, hyperion_client=client)
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["brightness"] == round(255 * (brightness / 100.0))
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["hs_color"] == (180.0, 100.0)
async def test_unload_entry(hass: HomeAssistant) -> None:
"""Test unload."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert client.async_client_connect.call_count == 2
assert not client.async_client_disconnect.called
entry = _get_config_entry_from_unique_id(hass, TEST_SYSINFO_ID)
assert entry
await hass.config_entries.async_unload(entry.entry_id)
assert client.async_client_disconnect.call_count == 2
async def test_version_log_warning(caplog, hass: HomeAssistant) -> None:
"""Test warning on old version."""
client = create_mock_client()
client.async_sysinfo_version = AsyncMock(return_value="2.0.0-alpha.7")
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert "Please consider upgrading" in caplog.text
async def test_version_no_log_warning(caplog, hass: HomeAssistant) -> None:
"""Test no warning on acceptable version."""
client = create_mock_client()
client.async_sysinfo_version = AsyncMock(return_value="2.0.0-alpha.9")
await setup_test_config_entry(hass, hyperion_client=client)
assert hass.states.get(TEST_ENTITY_ID_1) is not None
assert "Please consider upgrading" not in caplog.text
async def test_setup_entry_no_token_reauth(hass: HomeAssistant) -> None:
"""Verify a reauth flow when auth is required but no token provided."""
client = create_mock_client()
config_entry = add_test_config_entry(hass)
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_REQUIRED_RESP)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch.object(hass.config_entries.flow, "async_init") as mock_flow_init:
assert not await hass.config_entries.async_setup(config_entry.entry_id)
assert client.async_client_disconnect.called
mock_flow_init.assert_called_once_with(
DOMAIN,
context={
CONF_SOURCE: SOURCE_REAUTH,
"entry_id": config_entry.entry_id,
"unique_id": config_entry.unique_id,
"title_placeholders": {"name": config_entry.title},
},
data=config_entry.data,
)
assert config_entry.state is ConfigEntryState.SETUP_ERROR
async def test_setup_entry_bad_token_reauth(hass: HomeAssistant) -> None:
"""Verify a reauth flow when a bad token is provided."""
client = create_mock_client()
config_entry = add_test_config_entry(
hass,
data={CONF_HOST: TEST_HOST, CONF_PORT: TEST_PORT, CONF_TOKEN: "expired_token"},
)
client.async_is_auth_required = AsyncMock(return_value=TEST_AUTH_NOT_REQUIRED_RESP)
# Fail to log in.
client.async_client_login = AsyncMock(return_value=False)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
), patch.object(hass.config_entries.flow, "async_init") as mock_flow_init:
assert not await hass.config_entries.async_setup(config_entry.entry_id)
assert client.async_client_disconnect.called
mock_flow_init.assert_called_once_with(
DOMAIN,
context={
CONF_SOURCE: SOURCE_REAUTH,
"entry_id": config_entry.entry_id,
"unique_id": config_entry.unique_id,
"title_placeholders": {"name": config_entry.title},
},
data=config_entry.data,
)
assert config_entry.state is ConfigEntryState.SETUP_ERROR
async def test_priority_light_async_updates(
hass: HomeAssistant,
) -> None:
"""Test receiving a variety of Hyperion client callbacks to a HyperionPriorityLight."""
priority_template = {
const.KEY_ACTIVE: True,
const.KEY_VISIBLE: True,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 100)},
}
client = create_mock_client()
client.priorities = [{**priority_template}]
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
# == Scenario: Color at HA priority will show light as on.
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
# == Scenario: Color going to black shows the light as off.
client.priorities = [
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: COLOR_BLACK},
}
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: Lower priority than HA priority should have no impact on what HA
# shows when the HA priority is present.
client.priorities = [
{**priority_template, const.KEY_PRIORITY: TEST_PRIORITY - 1},
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: COLOR_BLACK},
},
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: Fresh color at HA priority should turn HA entity on (even though
# there's a lower priority enabled/visible in Hyperion).
client.priorities = [
{**priority_template, const.KEY_PRIORITY: TEST_PRIORITY - 1},
{
**priority_template,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 150)},
},
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["hs_color"] == (240.0, 33.333)
# == Scenario: V4L at a higher priority, with no other HA priority at all, should
# have no effect.
# Emulate HA turning the light off with black at the HA priority.
client.priorities = []
client.visible_priority = None
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# Emulate V4L turning on.
client.priorities = [
{
**priority_template,
const.KEY_PRIORITY: 240,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_V4L,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 150)},
},
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: A lower priority input (lower priority than HA) should have no effect.
client.priorities = [
{
**priority_template,
const.KEY_VISIBLE: True,
const.KEY_PRIORITY: TEST_PRIORITY - 1,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (255, 0, 0)},
},
{
**priority_template,
const.KEY_PRIORITY: 240,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_V4L,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 150)},
const.KEY_VISIBLE: False,
},
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: A non-active priority is ignored.
client.priorities = [
{
const.KEY_ACTIVE: False,
const.KEY_VISIBLE: False,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 100)},
}
]
client.visible_priority = None
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# == Scenario: A priority with no ... priority ... is ignored.
client.priorities = [
{
const.KEY_ACTIVE: True,
const.KEY_VISIBLE: True,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 100)},
}
]
client.visible_priority = None
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
async def test_priority_light_async_updates_off_sets_black(
hass: HomeAssistant,
) -> None:
"""Test turning the HyperionPriorityLight off."""
client = create_mock_client()
client.priorities = [
{
const.KEY_ACTIVE: True,
const.KEY_VISIBLE: True,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (100, 100, 100)},
}
]
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: COLOR_BLACK,
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
async def test_priority_light_prior_color_preserved_after_black(
hass: HomeAssistant,
) -> None:
"""Test that color is preserved in an on->off->on cycle for a HyperionPriorityLight.
For a HyperionPriorityLight the color black is used to indicate off. This test
ensures that a cycle through 'off' will preserve the original color.
"""
priority_template = {
const.KEY_ACTIVE: True,
const.KEY_VISIBLE: True,
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
}
client = create_mock_client()
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_clear = AsyncMock(return_value=True)
client.priorities = []
client.visible_priority = None
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
# Turn the light on full green...
# On (=), 100% (=), solid (=), [0,0,255] (=)
hs_color = (240.0, 100.0)
rgb_color = (0, 0, 255)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1, ATTR_HS_COLOR: hs_color},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: rgb_color,
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.priorities = [
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: rgb_color},
}
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["hs_color"] == hs_color
# Then turn it off.
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: COLOR_BLACK,
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.priorities = [
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: COLOR_BLACK},
}
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "off"
# Then turn it back on and ensure it's still green.
# On (=), 100% (=), solid (=), [0,0,255] (=)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_PRIORITY_LIGHT_ENTITY_ID_1},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: rgb_color,
const.KEY_ORIGIN: DEFAULT_ORIGIN,
}
)
client.priorities = [
{
**priority_template,
const.KEY_VALUE: {const.KEY_RGB: rgb_color},
}
]
client.visible_priority = client.priorities[0]
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.state == "on"
assert entity_state.attributes["hs_color"] == hs_color
async def test_priority_light_has_no_external_sources(hass: HomeAssistant) -> None:
"""Ensure a HyperionPriorityLight does not list external sources."""
client = create_mock_client()
client.priorities = []
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect_list"] == [hyperion_light.KEY_EFFECT_SOLID]
async def test_light_option_effect_hide_list(hass: HomeAssistant) -> None:
"""Test the effect_hide_list option."""
client = create_mock_client()
client.effects = [{const.KEY_NAME: "One"}, {const.KEY_NAME: "Two"}]
await setup_test_config_entry(
hass,
hyperion_client=client,
options={CONF_EFFECT_HIDE_LIST: ["Two", "USB Capture"]},
)
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert entity_state.attributes["effect_list"] == [
"Solid",
"Boblight Server",
"Platform Capture",
"One",
]
async def test_device_info(hass: HomeAssistant) -> None:
"""Verify device information includes expected details."""
client = create_mock_client()
register_test_entity(
hass,
LIGHT_DOMAIN,
TYPE_HYPERION_PRIORITY_LIGHT,
TEST_PRIORITY_LIGHT_ENTITY_ID_1,
)
await setup_test_config_entry(hass, hyperion_client=client)
device_id = get_hyperion_device_id(TEST_SYSINFO_ID, TEST_INSTANCE)
device_registry = dr.async_get(hass)
device = device_registry.async_get_device({(DOMAIN, device_id)})
assert device
assert device.config_entries == {TEST_CONFIG_ENTRY_ID}
assert device.identifiers == {(DOMAIN, device_id)}
assert device.manufacturer == HYPERION_MANUFACTURER_NAME
assert device.model == HYPERION_MODEL_NAME
assert device.name == TEST_INSTANCE_1["friendly_name"]
entity_registry = await er.async_get_registry(hass)
entities_from_device = [
entry.entity_id
for entry in er.async_entries_for_device(entity_registry, device.id)
]
assert TEST_PRIORITY_LIGHT_ENTITY_ID_1 in entities_from_device
assert TEST_ENTITY_ID_1 in entities_from_device
async def test_lights_can_be_enabled(hass: HomeAssistant) -> None:
"""Verify lights can be enabled."""
client = create_mock_client()
await setup_test_config_entry(hass, hyperion_client=client)
entity_registry = er.async_get(hass)
entry = entity_registry.async_get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entry
assert entry.disabled
assert entry.disabled_by is er.RegistryEntryDisabler.INTEGRATION
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert not entity_state
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
return_value=client,
):
updated_entry = entity_registry.async_update_entity(
TEST_PRIORITY_LIGHT_ENTITY_ID_1, disabled_by=None
)
assert not updated_entry.disabled
await hass.async_block_till_done()
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(seconds=RELOAD_AFTER_UPDATE_DELAY + 1),
)
await hass.async_block_till_done()
entity_state = hass.states.get(TEST_PRIORITY_LIGHT_ENTITY_ID_1)
assert entity_state
async def test_deprecated_effect_names(caplog, hass: HomeAssistant) -> None:
"""Test deprecated effects function and issue a warning."""
client = create_mock_client()
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_component = AsyncMock(return_value=True)
await setup_test_config_entry(hass, hyperion_client=client)
for component in const.KEY_COMPONENTID_EXTERNAL_SOURCES:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID_1, ATTR_EFFECT: component},
blocking=True,
)
assert f"Use of Hyperion effect '{component}' is deprecated" in caplog.text
# Simulate a state callback from Hyperion.
client.visible_priority = {
const.KEY_COMPONENTID: component,
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
assert (
entity_state.attributes["effect"]
== const.KEY_COMPONENTID_TO_NAME[component]
)
async def test_deprecated_effect_names_not_in_effect_list(
hass: HomeAssistant,
) -> None:
"""Test deprecated effects are not in shown effect list."""
await setup_test_config_entry(hass)
entity_state = hass.states.get(TEST_ENTITY_ID_1)
assert entity_state
for component in const.KEY_COMPONENTID_EXTERNAL_SOURCES:
assert component not in entity_state.attributes["effect_list"]
|
|
import ctypes
try:
import av
av.t = ctypes.c_int
except:
print 'using av_64'
import av_64 as av
av.t = ctypes.c_long
import numpy as np
import pygame
global g_av_is_initialised
g_av_is_initialised = False
def setVolume(md, level):
if level == 0:
alookup = None
else:
alookup = np.arange(65536,dtype=np.int16)
for it in range(level): alookup = np.int16(np.sin(np.pi*0.5*(alookup/65535.))*65535.)
md['alookup'] = alookup
#@profile
def resampleAudio(audio, md):
'''Convert input audio samples to 48000,s16,stereo.'''
fmt = md['asamplefmt']
fmt,planar = fmt%5,fmt/5
adtype = [np.uint8,np.int16,np.int32,np.float32,np.float64][fmt]
bytes_per_inchannel_sample = [1,2,4,4,8][fmt]
audio = [np.fromstring(a,dtype=adtype) for a in audio]
if planar: audio = [a.reshape(md['ainchannels'],-1).T.reshape(-1) for a in audio] # change planar to interlaced before concatenating
if fmt > av.AV_SAMPLE_FMT_S32: # convert FLT and DBL to S32
audio = [np.int32(a*0x7fffffff) for a in audio]
fmt,adtype,bytes_per_inchannel_sample = av.AV_SAMPLE_FMT_S32,np.int32,4
audio = np.concatenate(audio).tostring()
ai = ctypes.cast(audio,ctypes.POINTER(ctypes.c_short))
bytes_per_in_sample = md['ainchannels'] * bytes_per_inchannel_sample
bytes_per_out_sample = md['aoutchannels'] * 2
num_in_samples = len(audio)/bytes_per_in_sample
audio_out_size = bytes_per_out_sample*np.ceil(num_in_samples*48000./md['asamplerate'])
audio_out = ' '*int(audio_out_size) #(ctypes.c_short * (audio_out_size/2))()
ao = ctypes.cast(audio_out,ctypes.POINTER(ctypes.c_short))
num_out_samples = av.audio_resample(md['aresample'], ao, ai, num_in_samples)
assert(num_out_samples*bytes_per_out_sample <= audio_out_size),repr(num_out_samples*bytes_per_out_sample)+'<='+repr(audio_out_size)
audio_out = np.fromstring(audio_out,dtype=np.int16)
audio_out = audio_out[:num_out_samples*bytes_per_out_sample/2]
if md['alookup'] is not None: audio_out = md['alookup'][audio_out]
if md['aoutchannels'] != 2: # the resampler only outputs stereo if the input is stereo/mono; otherwise we take the first two channels
tmp = audio_out
audio_out = np.zeros(len(tmp)*2/md['ainchannels'],dtype=np.int16)
audio_out[::2] = tmp[::md['ainchannels']]
audio_out[1::2] = tmp[1::md['ainchannels']]
return audio_out
#@profile
def readFrame(md, seekFrame=None, playingAudio=True, assumedFps=30000./1001., debugging=False):
pVCodecCtx, pVFrame, videoStream = md['vcodec'], md['vframe'], md['vstream']
pACodecCtx, pAFrame, audioStream = md['acodec'], md['aframe'], md['astream']
pTcCodecCtx, pDFrame, tcStream = md['dcodec'], md['dframe'], md['dstream']
seekFrame = max(0,seekFrame)
if md['vcodec'] is not None and seekFrame > md['vmaxframe']: return False
if seekFrame == md['frameNumber']:
if videoStream is not None:
av.sws_scale(md['sws'], pVFrame.contents.data, pVFrame.contents.linesize, 0, md['vheight'],
md['vrgb'].contents.data, md['vrgb' ].contents.linesize)
return True # same frame; do nothing
if md['frameNumber'] is not None and (seekFrame <= md['frameNumber'] or seekFrame >= md['frameNumber']+10):
if audioStream is not None:
seekTarget = int(seekFrame * md['aduration'] + md['aoffset'])
tmp = av.av_seek_frame(md['aformat'], audioStream, min(max(seekTarget,md['aoffset']),md['aformat'].contents.duration), av.AVSEEK_FLAG_ANY)
md['aplayedtoframe'] = 0 # cancel the player
if videoStream is not None:
#print 'seekFrame',seekFrame,'last frame',md['frameNumber']
seekTarget = max(md['voffset'],int((seekFrame) * md['vduration'] + md['voffset'])) # 32 for old cara files
#print 'seek',seekTarget
av.avcodec_flush_buffers(pVCodecCtx)
tmp = av.av_seek_frame(md['vformat'], videoStream, seekTarget, av.AVSEEK_FLAG_BACKWARD if seekTarget != md['voffset'] else av.AVSEEK_FLAG_BACKWARD)
assert (tmp >= 0), 'ERROR:movie seek failed with code '+repr(tmp)
packet = av.AVPacket()
if audioStream is not None:
if md['frameNumber'] is not None and seekFrame > md['frameNumber'] and seekFrame < min(md['frameNumber']+10,md['aplayedtoframe']):
#print 'skip',seekFrame
md['frameNumber'] = seekFrame
else:
while (av.av_read_frame(md['aformat'], packet)>=0):
if packet.stream_index==audioStream: # From the audio stream?
if md['aduration'] == 0:
md['aduration'] = int(packet.duration)
md['aduration'] = int(md['asamplerate']/float(assumedFps)) # needed for WAV
if debugging: print 'setting frame aduration',md['aduration']
if md['aduration'] != 0:
frameFinished = av.t(0)
decoded = av.avcodec_decode_audio4(pACodecCtx, pAFrame, ctypes.pointer(frameFinished), packet)
if decoded < 1 or not frameFinished:
print 'error decoding audio'; av.av_free_packet(packet); continue
data_size = av.av_samples_get_buffer_size(None, md['ainchannels'], pAFrame.contents.nb_samples, pACodecCtx.contents.sample_fmt, 1)
if (data_size < 0): break # no audio
# HACK HACK stereo data in planes is split over two buffers. the length is wrong. I don't know why.
if md['ainchannels'] == 2 and md['asamplefmt'] >= 5:
tmp = ctypes.string_at(pAFrame.contents.data[0],data_size/2) + ctypes.string_at(pAFrame.contents.data[1],data_size/2)
else:
tmp = ctypes.string_at(pAFrame.contents.data[0],data_size)
if playingAudio: md['adata'].append(tmp)
foundFrame = (pAFrame.contents.pkt_pts - md['aoffset']) / md['aduration']
if md['frameNumber'] is None: md['aoffset'] = pAFrame.contents.pkt_pts
if (foundFrame > seekFrame) or md['aplayedtoframe'] is None:
av.av_free_packet(packet)
md['aplayedtoframe'] = foundFrame
if videoStream is None: md['frameNumber'] = seekFrame
break
av.av_free_packet(packet)
if videoStream is not None:
while (av.av_read_frame(md['vformat'], packet)>=0):
''' # testing timecode
if packet.stream_index == tcStream: # Is this a packet from the timecode stream?
# not sure what to expect here. seems to be one packet. don't know how to decode it.. or what info it might contain.
print "tcstream packet :"
for x in dir(packet): print x,packet.__getattribute__(x)
'''
if packet.stream_index == videoStream: # Is this a packet from the video stream?
if md['vduration'] == 0:
md['vduration'] = int(packet.duration)
if debugging: print 'setting frame vduration',md['vduration']
if md['vduration'] != 0:
#print 'packet duration,pts',packet.duration,packet.pts
frameFinished = av.t(0)
if (av.avcodec_decode_video2(pVCodecCtx, pVFrame, ctypes.pointer(frameFinished), packet) < 1):
print 'error decoding video'; av.av_free_packet(packet); continue
if not frameFinished: continue
foundFrame = (pVFrame.contents.pkt_pts - md['voffset']) / md['vduration']
if pVFrame.contents.pkt_pts < 0: foundFrame = seekFrame
#print 'v',foundFrame,pVFrame.contents.pkt_pts,md['vduration']
if foundFrame >= seekFrame:
if md['frameNumber'] is None:
md['voffset'] = pVFrame.contents.pkt_pts
if debugging: print 'setting voffset',md['voffset']
av.sws_scale(md['sws'], pVFrame.contents.data, pVFrame.contents.linesize, 0, md['vheight'],
md['vrgb'].contents.data, md['vrgb' ].contents.linesize)
av.av_free_packet(packet)
md['frameNumber'] = foundFrame
break
av.av_free_packet(packet)
if md['frameNumber'] == seekFrame:
if playingAudio and len(md['adata']):
audio = resampleAudio(md['adata'],md)
amixer = pygame.mixer.Sound(buffer(audio))
channel = md.get('achannel',None)
if channel is None or channel.get_queue() is not None: channel = pygame.mixer.find_channel(True)
#pygame.mixer.stop()
channel.queue(amixer)
md['achannel'] = channel
md['amixer'].append(amixer)
# we keep a reference to these buffers for a while to prevent crashes
if len(md['amixer'])>=20: md['amixer'] = md['amixer'][10:]
md['adata'] = []
return True
return False
def open_file(filename, audio=True, frame_offset=0, debugging=False, volume_ups=0):
'''Setup a file decoding nightmare. We actually create two nightmares, one for the audio and one for the video.
This should allow us to synchronize without buffering (though it could be IO heavy, depending on how clever the file caching is).'''
global g_av_is_initialised
if not g_av_is_initialised:
g_av_is_initialised = True
av.av_register_all()
av.av_log_set_level(av.AV_LOG_VERBOSE if debugging else av.AV_LOG_QUIET)
aformat,aresample,ainchannels,aoutchannels,asamplerate,aduration = None,None,0,2,48000,0
if debugging: print "startAudio"
try:
if not audio: raise Exception('unwanted')
for it in range(10):
try:
aformat = av.avformat_alloc_context()
if av.avformat_open_input(aformat, filename, None, None):
raise Exception('error opening audio file %s' % filename)
tmp = av.avformat_find_stream_info(aformat, None)
if tmp >= 0: break # if we don't do this, some fields won't be filled in :-(
except Exception as e:
print '.',e
if it == 9: raise Exception('error retrieving audio stream info %s' % filename)
streams = [aformat.contents.streams[stream].contents.codec.contents.codec_type for stream in xrange(aformat.contents.nb_streams)]
pygame.mixer.init(frequency=48000, size=-16, channels=2, buffer=512)
audioStreams = np.where(np.array(streams) == av.AVMEDIA_TYPE_AUDIO)[0]
audioStream = audioStreams[0] # the first audio stream
audioStream = audioStreams[-1] # the last audio stream
pACodecCtx = aformat.contents.streams[audioStream].contents.codec
audioCodec = av.avcodec_find_decoder(pACodecCtx.contents.codec_id)
av.avcodec_open2(pACodecCtx, audioCodec, None)
ainchannels = pACodecCtx.contents.channels
asamplerate = pACodecCtx.contents.sample_rate
asamplefmt = pACodecCtx.contents.sample_fmt
print 'ainfo',ainchannels,asamplerate, asamplefmt
aoutchannels = max(2,ainchannels)
fmt = min(asamplefmt%5,av.AV_SAMPLE_FMT_S32)
aresample = av.av_audio_resample_init(aoutchannels, ainchannels, 48000, asamplerate, av.AV_SAMPLE_FMT_S16, fmt, 16, 10, 0, 0.8)
#print aresample,aresample.contents # fails if NULL
pAFrame = av.av_frame_alloc() # holds the codec frame
#for x in dir(aformat.contents): print x,aformat.contents.__getattribute__(x)
#for x in dir(pACodecCtx.contents): print x,pACodecCtx.contents.__getattribute__(x)
except Exception, e:
if e.args[0] == 'unwanted' :
if debugging:
print "No Audio Requested"
else:
print 'no audio: ', e
audioStream,pACodecCtx,audioCodec,pAFrame,asamplefmt = None,None,None,None,None
if debugging: print 'endAudio\nstartVideo'
try:
for it in range(10):
print '.',
try:
vformat = av.avformat_alloc_context()
if av.avformat_open_input(vformat, filename, None, None):
raise Exception('error opening video file %s' % filename)
streams = [vformat.contents.streams[stream].contents.codec.contents.codec_type for stream in xrange(vformat.contents.nb_streams)]
tmp = av.avformat_find_stream_info(vformat, None)
if tmp >= 0: break # if we don't do this, some fields won't be filled in :-(
except:
pass
if it == 9: raise Exception('error retrieving video stream info %s' % filename)
videoStream = streams.index(av.AVMEDIA_TYPE_VIDEO) # the first video stream
av.av_dump_format(vformat, 0, filename, False)
pVCodecCtx = vformat.contents.streams[videoStream].contents.codec
#for x in dir(pVCodecCtx.contents): print x,pVCodecCtx.contents.__getattribute__(x)
videoCodec = av.avcodec_find_decoder(pVCodecCtx.contents.codec_id)
av.avcodec_open2(pVCodecCtx, videoCodec, None)
pVFrame = av.av_frame_alloc() # holds the codec frame
pVFrameRGB = av.av_frame_alloc() # holds the decoded frame
vwidth,vheight = pVCodecCtx.contents.width,pVCodecCtx.contents.height
vwidth,vheight = int((vwidth+7)/8)*8, int((vheight+7)/8)*8
numBytes = vwidth * vheight * 3
vbuffer=ctypes.ARRAY(ctypes.c_uint8, numBytes)() # make a buffer; TODO what about buffer alignment?
av.avpicture_fill( ctypes.cast(pVFrameRGB,ctypes.POINTER(av.AVPicture)), vbuffer, av.PIX_FMT_RGB24, vwidth, vheight)
vfps = vformat.contents.streams[videoStream].contents.avg_frame_rate.num / float(vformat.contents.streams[videoStream].contents.avg_frame_rate.den+1e-8)
if vfps == 0.0: vfps = 30.0
if debugging: print 'vfps', vfps
aduration = asamplerate/vfps # NOTE, we believe in this case the audio packet pts is SAMPLES, not TICKS. aduration is SAMPLES PER FRAME
vmaxframe = vformat.contents.streams[videoStream].contents.nb_frames
if vmaxframe <= 0: print 'vmax error',vmaxframe; vmaxframe = 80000
except Exception, e:
print 'no video: ',e
videoStream,pVCodecCtx,videoCodec,pVFrame,pVFrameRGB,vwidth,vheight = None,None,None,None,None,0,0
vmaxframe=0
vbuffer=''
vfps = None
# given we have a video stream, attempt to read timecode from data stream
try:
tcStream = streams.index(av.AVMEDIA_TYPE_DATA)
pTcCodecCtx = vformat.contents.streams[tcStream].contents.codec
pTcFrame = av.av_frame_alloc() # holds the codec frame
#for x in dir(dformat.contents.streams[timecodeStream].contents): print x,dformat.contents.streams[timecodeStream].contents.__getattribute__(x)
#timebase = dformat.contents.streams[timecodeStream].contents.time_base
#print 'tb',timebase, timebase.num, timebase.den
metadataPointer = vformat.contents.streams[tcStream].contents.metadata
tag = ctypes.POINTER(av.AVDictionaryEntry)()
e = av.av_dict_get(metadataPointer, "timecode", tag, av.AV_DICT_IGNORE_SUFFIX)
if debugging: print 'e',e, 'k',e.contents.key, 'v',e.contents.value
timecode = e.contents.value
except Exception, e:
print "no timecode:", e
pTcCodecCtx, tcStream, pTcFrame = None, None, None
timecode = None
if pVCodecCtx is not None:
sws = av.sws_getCachedContext(None,vwidth,vheight,pVCodecCtx.contents.pix_fmt,vwidth,vheight,av.PIX_FMT_RGB24,av.SWS_BICUBIC,None,None,None)
else:
sws = None
md = {'dstream':tcStream,'dframe':pTcFrame,'dcodec':pTcCodecCtx,
'adata':[], 'amixer':[],'aformat':aformat, 'acodec':pACodecCtx, 'aframe':pAFrame, 'astream':audioStream, 'aduration':aduration,\
'aresample':aresample, 'alookup':None, 'ainchannels':ainchannels, 'aoutchannels':aoutchannels, 'asamplerate':asamplerate, 'asamplefmt':asamplefmt, 'aplayedtoframe':None, 'aoffset':0, \
'vformat':vformat, 'vcodec':pVCodecCtx, 'vframe':pVFrame, 'vrgb':pVFrameRGB, 'vbuffer':vbuffer, 'vwidth':vwidth, 'vheight':vheight, 'vstream':videoStream, 'vduration':0, 'voffset':0, 'vmaxframe':vmaxframe, \
'sws':sws, 'frameNumber':None, 'timecode':timecode, 'fps':vfps}
setVolume(md, volume_ups)
readFrame(md, frame_offset, debugging=debugging)
#print md
return md
if __name__ == '__main__':
from UI import QGLViewer,QApp,GLMeshes
import sys
def set_frame_cb(frame):
global md
readFrame(md, seekFrame=frame)
img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3)
image_mesh = QApp.app.getLayer('image_mesh')
image_mesh.setImage(img)
view = QApp.view()
view.refreshImageData()
view.updateGL()
if len(sys.argv) > 1:
filename = sys.argv[1]
md = open_file(filename)
if len(sys.argv) > 2: setVolume(md,int(sys.argv[2]))
img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3)
h,w = md['vheight']/2,md['vwidth']/2
img_vs = [[-w,-h,0],[w,-h,0],[w,h,0],[-w,h,0]]
img_fs = [[0,1,2,3]]
img_ts = np.array([[1,0,0,0],[0,1,0,1000],[0,0,1,0]], dtype=np.float32)
img_vts = [[0,1],[1,1],[1,0],[0,0]]
image_mesh = GLMeshes(names=['image_mesh'],verts=[img_vs],faces=[img_fs],transforms=[img_ts],bones = [[]], vts=[img_vts])
image_mesh.setImage(img)
QGLViewer.makeViewer(timeRange=(0,md['vmaxframe'],1,md['fps']), callback=set_frame_cb, layers={'image_mesh':image_mesh})
exit()
#'Imaginarium Movie Playback Tool'
if len(sys.argv) > 1:
import cv2
appname = sys.argv.pop(0)
filename = sys.argv.pop(0)
rotate = False
if filename == '-r180': rotate = True; filename = sys.argv.pop(0)
md1 = open_file(filename, audio=False)
img1 = np.frombuffer(md1['vbuffer'],dtype=np.uint8).reshape(md1['vheight'],md1['vwidth'],3)
fn = filename.rpartition('.')[0]+'.%04d.png'
assert fn != filename and fn != ''
fi,fo = 0,None
if sys.argv: fi = int(sys.argv.pop(0))
if sys.argv: fo = int(sys.argv.pop(0))
while readFrame(md1, fi):
if rotate: img1[:] = img1.ravel()[::-1].reshape(img1.shape)
cv2.imwrite(fn % fi, img1)
fi += 1
print '\rwrote',fi,'frames',
sys.stdout.flush()
if fi == fo: break
exit()
|
|
'''Tests for UITable.'''
from mysite import settings
from scisheets.core.helpers.serialize_deserialize import serialize, \
deserialize
from scisheets.core.column import Column
from scisheets.core.table import NAME_COLUMN_STR, Table
from scisheets.helpers.command_dict import CommandDict
from Tree.named_tree import GLOBAL_SEPARATOR
from ui_table import UITable
from django.test import TestCase # Provides mocks
import json
import random
# Constants
COLUMN_NAMES = ['A', 'B', 'C']
DATA = [[1, 2, 3], [10, 20, 30], [100, 200, 300]]
DATA_STRING = ['AA', 'BB', 'CC']
LARGE_NUMBER = 1000
NCOL = 30
NROW = 3
TABLE_NAME = "MY_TABLE"
IGNORE_TEST = False
########################################################
# Utility Functions And Classes
########################################################
def _getNode(table, target, excludes=None):
"""
Gets a node of the specified class.
:param Table table: Table to search
:param str/Type target: Table or Column
:param list-of-Node excludes: nodes to exclude
:return NamedTree node:
"""
if excludes is None:
excludes = []
nodes = table.getAllNodes()
if isinstance(target, str):
if target == "Table":
cls = Table
else:
cls = Column
else:
cls = target
for _ in range(LARGE_NUMBER):
index = random.randint(0,len(nodes)-1)
node = nodes[index]
if table.isNameColumn(node):
continue
if isinstance(node, cls):
if not node in excludes:
return node
raise RuntimeError("Could not find a node.")
def _evaluateMockedResponse(table, cmd_dict,
success=True, is_save=True):
"""
Evaluates if the response from a mock is as expected.
:param Table table:
:param dict cmd_dict:
:param bool success: value of success
:param bool is_save: value of is_save
"""
response, returned_is_save = \
table.processCommand(cmd_dict)
return (response['success'] == success) \
and returned_is_save == is_save
########################################################
# Test Classes
########################################################
class TestUITableCommandsCell(TestCase):
def testCellUpdate(self):
if IGNORE_TEST:
return
table = UITable.createRandomTable(TABLE_NAME,
NROW, NCOL)
before_table = table.copy()
column_index = 3
column = table.getChildAtPosition(column_index)
column_name = column.getName(is_global_name=False)
ROW_INDEX = 2
NEW_VALUE = 9999
cmd_dict = CommandDict.createCommandDict({
'target': 'Cell',
'command': 'Update',
'table_name': None,
'column_name': column_name,
'row_index': ROW_INDEX,
'value': NEW_VALUE
})
table.processCommand(cmd_dict)
self.assertEqual(int(table.getCell(ROW_INDEX, column_name)),
NEW_VALUE)
for c in range(table.numColumns()):
self.assertEqual(before_table.getColumns()[c].getName(),
table.getColumns()[c].getName())
for r in range(table.numRows()):
if not (r == ROW_INDEX and c == column_index):
self.assertEqual(before_table.getCell(r,c),
table.getCell(r,c))
class TestUITableCommandsTableAndColumn(TestCase):
def setUp(self):
self.cmd_dict = CommandDict.createCommandDict({
'target': None,
'command': None,
'table_name': None,
'column_name': None,
'column_index': -1,
'row_index': None,
'value': None,
})
def _testAppendAndInsert(self, target, command):
"""
:param str target: Table or Column
:param str command: 'Append' or 'Insert'
"""
new_name = "NEW_COLUMN"
node = _getNode(self.table, target)
node_name = node.getName()
self.cmd_dict['target'] = target
self.cmd_dict['command'] = command
self.cmd_dict['column_name'] = node_name
self.cmd_dict['args'] = [new_name]
expected_columns = self.table.numColumns() + 1
if command == "Append":
expected_position = node.getPosition() + 1
else:
expected_position = node.getPosition()
self.table.processCommand(self.cmd_dict)
self.assertEqual(self.table.numColumns(), expected_columns)
new_node = self.table.childFromName(new_name, is_relative=False)
self.assertIsNotNone(new_node)
self.assertEqual(new_node.getPosition(), expected_position)
def testAppendAndInsert(self):
if IGNORE_TEST:
return
targets = ["Column", "Table"]
commands = ["Append", "Insert"]
for target in targets:
for command in commands:
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
NROW, NCOL, 0.3, prob_detach=0.2)
self._testAppendAndInsert(target, command)
def _testDelete(self, target):
"""
:param str target: Table or Column
"""
node = _getNode(self.table, target)
node_name = node.getName()
old_num_nodes = len(self.table.getAllNodes())
before_table = self.table.copy()
self.cmd_dict['target'] = target
self.cmd_dict['command'] = 'Delete'
self.cmd_dict['column_name'] = node_name
expected_num_nodes = old_num_nodes - len(node.getAllNodes())
self.table.processCommand(self.cmd_dict)
self.assertEqual(len(self.table.getAllNodes()), expected_num_nodes)
for r in range(self.table.numRows()):
after_row = self.table.getRow(row_index=r)
before_row = before_table.getRow(row_index=r)
for k in after_row.keys():
self.assertEqual(after_row[k], before_row[k])
def testDelete(self):
if IGNORE_TEST:
return
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
NROW, NCOL, 0.3, prob_detach=0.2)
self._testDelete("Table")
self._testDelete("Column")
def _testFormula(self, column):
"""
:param Column column: column to evaluate
"""
colnm = column.getName(is_global_name=False)
formula = \
'''
a = 5
%s = range(a)
''' % colnm
self.cmd_dict['target'] = 'Column'
self.cmd_dict['command'] = 'Formula'
self.cmd_dict['column_name'] = column.getName()
self.cmd_dict['args'] = [formula]
self.table.processCommand(self.cmd_dict)
self.assertEqual(column.getFormula(), formula)
self.assertEqual(column.getCells(), range(5))
def testFormula(self):
if IGNORE_TEST:
return
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
NROW, NCOL, 0.3, prob_detach=0.2)
leaves = self.table.getDataColumns(is_recursive=True,
is_attached=False)
[self._testFormula(c) for c in leaves]
def _testHide(self, target):
"""
:param str target: Table or Column
"""
node = _getNode(self.table, target)
nodes = node.getAllNodes()
node_name = node.getName()
self.cmd_dict['target'] = target
self.cmd_dict['command'] = 'Hide'
self.cmd_dict['column_name'] = node_name
self.table.processCommand(self.cmd_dict)
[self.assertTrue(n in self.table.getHiddenNodes()) for n in nodes]
def testHide(self):
if IGNORE_TEST:
return
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
NROW, NCOL, 0.3, prob_detach=0.2)
self._testHide("Column")
self._testHide("Table")
def _testMove(self, target):
"""
:param str source: Table or Column
:param str destination: Table or Column
"""
source = _getNode(self.table, target)
destination = source
while destination == source:
destination = _getNode(self.table, target)
self.cmd_dict['target'] = target
self.cmd_dict['command'] = 'Move'
self.cmd_dict['column_name'] = source.getName()
self.cmd_dict['args'] = [destination.getName()]
expected_position = destination.getPosition()
expected_parent = destination.getParent()
self.table.processCommand(self.cmd_dict)
self.assertEqual(source.getPosition(), expected_position)
self.assertEqual(source.getParent(), expected_parent)
def testMove(self):
#if IGNORE_TEST:
# return
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
NROW, NCOL, 0.3, prob_detach=0.2)
self._testMove("Table")
self._testMove("Column")
def _testRename(self, target):
node = _getNode(self.table, target)
current_name = node.getName(is_global_name=False)
new_name = "New_Name"
self.cmd_dict['target'] = target
self.cmd_dict['command'] = 'Rename'
self.cmd_dict['column_name'] = current_name
self.cmd_dict['args'] = [new_name]
num_columns = self.table.numColumns()
self.table.processCommand(self.cmd_dict)
self.assertEqual(self.table.numColumns(), num_columns)
new_node = self.table.childFromName(new_name, is_relative=False)
self.assertIsNotNone(new_name)
def testRename(self):
if IGNORE_TEST:
return
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
NROW, NCOL, 0.3, prob_detach=0.2)
self._testRename("Table")
self._testRename("Column")
def _testTablize(self, target):
node = _getNode(self.table, target)
table_name = "%s_%d" % (node.getName(is_global_name=False),
random.randint(1, 1000))
if node.getParent() != node.getRoot(is_attached=False):
full_table_name = "%s%s%s" % (node.getParent().getName(),
GLOBAL_SEPARATOR, table_name)
else:
full_table_name = table_name
self.cmd_dict['target'] = target
self.cmd_dict['command'] = 'Tablize'
self.cmd_dict['column_name'] = node.getName()
self.cmd_dict['args'] = [table_name]
expected = len(self.table.getAllNodes()) + 2
self.table.processCommand(self.cmd_dict)
self.assertEqual(len(self.table.getAllNodes()), expected)
new_table = self.table.childFromName(full_table_name,
is_relative=False)
self.assertIsNotNone(new_table)
new_node = new_table.childFromName(node.getName(is_global_name=False))
self.assertTrue(node.isEquivalent(new_node, is_exception=True))
def testTablize(self):
#if IGNORE_TEST:
# return
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
NROW, NCOL, 0.3, prob_detach=0.2)
for _ in range(1):
self._testTablize("Table")
self._testTablize("Column")
class TestUITableFunctions(TestCase):
def setUp(self):
if IGNORE_TEST:
return
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
NROW, NCOL, 0.3, prob_detach=0.2)
def testAddEscapesToQuotes(self):
if IGNORE_TEST:
return
list_of_str = ["xy", "x'y'"]
mod_list_of_str = UITable._addEscapesToQuotes(list_of_str)
self.assertEqual(mod_list_of_str[1].index("\\"), 1)
list_of_str = range(3)
mod_list_of_str = UITable._addEscapesToQuotes(list_of_str)
self.assertTrue(list_of_str == mod_list_of_str)
def testGetHiddenColumns(self):
if IGNORE_TEST:
return
columns = self.table.getColumns()
for column in columns:
self.table.hideChildren(column)
self.assertTrue(column in self.table._hidden_children)
self.table.hideChildren([column])
self.assertTrue(column in self.table._hidden_children)
self.assertEqual(len(self.table._hidden_children) , 1)
self.assertEqual(self.table.getHiddenNodes(), [column])
self.table.unhideChildren(column)
self.assertEqual(len(self.table._hidden_children) , 0)
def testIsEquivalentAndCopy(self):
if IGNORE_TEST:
return
table_copy = self.table.copy()
self.assertTrue(self.table.isEquivalent(table_copy))
node = _getNode(self.table, "Column")
node.removeTree()
self.assertFalse(self.table.isEquivalent(table_copy))
def testSerializeDeserialize(self):
if IGNORE_TEST:
return
json_str = serialize(self.table)
new_table = deserialize(json_str)
self.assertTrue(self.table.isEquivalent(new_table,
is_exception=True))
def createNestedTable(self):
"""
Table
A
B
Subtable
C
D
:return dict: name, object pairs
"""
if IGNORE_TEST:
return
table = UITable("Table")
result = {"Table": table}
result["A"] = Column("A")
table.addColumn(result["A"])
result["B"] = Column("B")
table.addColumn(result["B"])
subtable = UITable("Subtable")
result["Subtable"] = subtable
table.addChild(subtable)
result["C"] = Column("C")
subtable.addColumn(result["C"])
result["D"] = Column("D")
subtable.addColumn(result["D"])
return result
def _testGetVisibleColumns(self, hide_names, expected_names):
"""
Hides the list of names specified and then
tests that the result is the expected_names.
:param list-of-str hide_names:
:param list-of-str expected_names:
"""
if IGNORE_TEST:
return
node_dict = self.createNestedTable()
table = node_dict["Table"]
for name in hide_names:
table.hideChildren(node_dict[name])
visibles = table.getVisibleNodes()
# Add two to account for name columns
self.assertEqual(len(expected_names)+2, len(visibles))
for name in expected_names:
if name == NAME_COLUMN_STR:
continue
if not node_dict[name] in visibles:
import pdb; pdb.set_trace()
self.assertTrue(node_dict[name] in visibles)
def testGetVisibleColumns(self):
if IGNORE_TEST:
return
self._testGetVisibleColumns(["C"],
["A", "B", "Subtable", "D"])
class _PseudoVersionedGood(object):
"""
Used to mock VersionedFile
"""
def undo(self):
return
def redo(self):
return
def getFilepath(self):
return None
def getDirectory(self):
return "."
def getMaxVersions(self):
return 1
class _PseudoVersionedBad(_PseudoVersionedGood):
"""
Used to mock VersionedFile
"""
def undo(self):
raise RuntimeError()
def redo(self):
raise RuntimeError()
class TestUITableSheetCommands(TestCase):
def setUp(self):
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
2, 10, 0.3, prob_detach=0.2)
self.mock_dict = {}
self.cmd_dict = CommandDict.createCommandDict({
'target': "Sheet",
'command': None,
'table_name': None,
'column_name': None,
'column_index': -1,
'row_index': None,
'args': [],
'value': None,
})
def _evaluateMockedResponse(self, success=True, is_save=True):
self.assertTrue(_evaluateMockedResponse(self.table,
self.cmd_dict, success=success, is_save=is_save))
def testExport(self):
if IGNORE_TEST:
return
def pseudoExportGood(function_name="x",
inputs=None, outputs=None, user_directory=None):
self.mock_dict["pseudoExport"] = True
return None
def pseudoExportBad(function_name="x",
inputs=None, outputs=None, user_directory=None):
self.mock_dict["pseudoExport"] = True
return "Error"
def _getNodes(count):
nodes = []
while len(nodes) < count:
excludes = list(nodes)
node = _getNode(self.table, "Column", excludes=excludes)
if not Table.isNameColumn(node):
nodes.append(node)
return nodes
nodes = [n.getName(is_global_name=False) for n in _getNodes(4)]
inputs = nodes[:2]
outputs = nodes[2:]
function_name = 'my_func'
self.cmd_dict["command"] = "Export"
args = [function_name, ', '.join(inputs), ', '.join(outputs)]
self.cmd_dict["args"] = args
# Successful export
self.table.export = pseudoExportGood # Mock the export function
self._evaluateMockedResponse(success=True, is_save=True)
# Unsuccessful export
self.table.export = pseudoExportBad # Mock the export function
self._evaluateMockedResponse(success=False, is_save=False)
def testRedo(self):
if IGNORE_TEST:
return
self.cmd_dict["command"] = "Redo"
self.table.setVersionedFile(_PseudoVersionedGood())
self._evaluateMockedResponse(success=True, is_save=False)
self.table.setVersionedFile(_PseudoVersionedBad())
self._evaluateMockedResponse(success=False, is_save=False)
def _testUnhide(self, target):
#if IGNORE_TEST:
# return
self.table.unhideAllChildren()
self.cmd_dict["command"] = "Unhide"
if target == "Column":
node = _getNode(self.table, target)
self.cmd_dict["target"] = "Column"
self.cmd_dict["column_name"] = node.getName()
elif target == "Table":
node = _getNode(self.table, target)
self.cmd_dict["target"] = target
self.cmd_dict["column_name"] = node.getName()
else:
# Sheet
self.cmd_dict["target"] = "Sheet"
node = _getNode(self.table, "Column")
self.cmd_dict["command"] = "UnhideAll"
self.table.hideChildren([node])
self.assertTrue(self.table.isHiddenChild(node))
self._evaluateMockedResponse(success=True, is_save=True)
self.assertFalse(self.table.isHiddenChild(node))
def testUnhide(self):
if IGNORE_TEST:
return
self._testUnhide("Column")
self._testUnhide("Table")
self._testUnhide("Sheet")
def testUndo(self):
if IGNORE_TEST:
return
self.cmd_dict["command"] = "Undo"
self.table.setVersionedFile(_PseudoVersionedGood())
self._evaluateMockedResponse(success=True, is_save=False)
self.table.setVersionedFile(_PseudoVersionedBad())
self._evaluateMockedResponse(success=False, is_save=False)
class TestUITableCommandsRow(TestCase):
def setUp(self):
self.table = UITable.createRandomHierarchicalTable(TABLE_NAME,
4, 10, 0.3, prob_detach=0.2)
self.row_index = 1
self.num_rows = self.table.numRows()
self.cmd_dict = CommandDict.createCommandDict({
'target': 'Row',
'command': None,
'table_name': None,
'column_name': NAME_COLUMN_STR,
'column_index': None,
'args': [self.row_index],
'row_index': self.row_index,
'value': None,
})
self.row_index_values = self.table.getRow(row_index=self.row_index)
def _evaluateMockedResponse(self, success=True, is_save=True):
self.assertTrue(_evaluateMockedResponse(self.table,
self.cmd_dict, success=success, is_save=is_save))
def testSimpleCommands(self):
if IGNORE_TEST:
return
def pseudoRenameRow(row_index, new_name):
return None
def pseudoDeleteRow(rows):
return None
def pseudoAddRow(row, row_index):
return None
mocks = {"Move": [pseudoRenameRow, 'renameRow'],
"Delete": [pseudoDeleteRow, 'deleteRow'],
"Insert": [pseudoAddRow, 'addRow'],
"Append": [pseudoAddRow, 'addRow'],
}
for command in mocks.keys():
self.cmd_dict["command"] = command
# Insert the mocks
setattr(self.table, mocks[command][1], mocks[command][0])
self._evaluateMockedResponse(success=True, is_save=True)
if __name__ == '__main__':
unittest.man()
|
|
"""
Utilities and plugins for running tests with nose
Django-nose database context to run tests in two phases:
- Stage 1 runs all test that don't require DB access (test that don't inherit
from TransactionTestCase)
- Stage 2 runs all DB tests (test that do inherit from TransactionTestCase)
Adapted from testrunner.TwoStageTestRunner
Based on http://www.caktusgroup.com/blog/2013/10/02/skipping-test-db-creation/
"""
import logging
import os
import sys
import threading
from fnmatch import fnmatch
from django.conf import settings
from django.core.management import call_command
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
from django.db.utils import OperationalError
from django.test.utils import get_unique_databases_and_mirrors
from couchdbkit import ResourceNotFound
from couchdbkit.ext.django import loading
from django_nose.plugin import DatabaseContext
from unittest.mock import Mock, patch
from nose.plugins import Plugin
from nose.tools import nottest
from requests.exceptions import HTTPError
from dimagi.utils.parsing import string_to_boolean
from corehq.tests.noseplugins.cmdline_params import CmdLineParametersPlugin
from corehq.util.couchdb_management import couch_config
from corehq.util.test_utils import timelimit, unit_testing_only
log = logging.getLogger(__name__)
class HqTestFinderPlugin(Plugin):
"""Find tests in all modules within "tests" packages"""
enabled = True
INCLUDE_DIRS = [
"corehq/ex-submodules/*",
"submodules/dimagi-utils-src",
"submodules/django-digest-src",
"submodules/toggle",
"extensions/*/*",
"custom",
]
def options(self, parser, env):
"""Avoid adding a ``--with`` option for this plugin."""
def configure(self, options, conf):
# do not call super (always enabled)
import corehq
abspath = os.path.abspath
dirname = os.path.dirname
self.hq_root = dirname(dirname(abspath(corehq.__file__)))
@staticmethod
def pathmatch(path, pattern):
"""Test if globbing pattern matches path
>>> join = os.path.join
>>> match = HqTestFinderPlugin.pathmatch
>>> match(join('a', 'b', 'c'), 'a/b/c')
True
>>> match(join('a', 'b', 'c'), 'a/b/*')
True
>>> match(join('a', 'b', 'c'), 'a/*/c')
True
>>> match(join('a'), 'a/*')
True
>>> match(join('a', 'b', 'c'), 'a/b')
>>> match(join('a', 'b', 'c'), 'a/*')
>>> match(join('a', 'b', 'c'), 'a/*/x')
False
>>> match(join('a', 'b', 'x'), 'a/b/c')
False
>>> match(join('a', 'x', 'c'), 'a/b')
False
:returns: `True` if the pattern matches. `False` if it does not
match. `None` if the match pattern could match, but
has less elements than the path.
"""
parts = path.split(os.path.sep)
patterns = pattern.split("/")
result = all(fnmatch(part, pat) for part, pat in zip(parts, patterns))
if len(patterns) >= len(parts):
return result
return None if result else False
def wantDirectory(self, directory):
root = self.hq_root + os.path.sep
if directory.startswith(root):
relname = directory[len(root):]
results = [self.pathmatch(relname, p) for p in self.INCLUDE_DIRS]
log.debug("want directory? %s -> %s", relname, results)
if any(results):
return True
else:
log.debug("ignored directory: %s", directory)
return None
def wantFile(self, path):
"""Want all .py files in .../tests dir (and all sub-packages)"""
pysrc = os.path.splitext(path)[-1] == ".py"
if pysrc:
parent, base = os.path.split(path)
while base and len(parent) > len(self.hq_root):
if base == "tests":
return True
parent, base = os.path.split(parent)
def wantModule(self, module):
"""Want all modules in "tests" package"""
return "tests" in module.__name__.split(".")
class ErrorOnDbAccessContext(object):
"""Ensure that touching a database raises an error."""
def __init__(self, tests, runner):
pass
def setup(self):
"""Disable database access"""
self.original_db_enabled = settings.DB_ENABLED
settings.DB_ENABLED = False
self.db_patch = patch('django.db.backends.utils.CursorWrapper')
db_mock = self.db_patch.start()
error = RuntimeError(
"Attempt to access database in a 'no database' test suite. "
"It could be that you don't have 'BASE_ADDRESS' set in your "
"localsettings.py. If your test really needs database access "
"it should subclass 'django.test.testcases.TestCase' or a "
"similar test base class.")
db_mock.side_effect = error
class CouchSpec(object):
dbname = None
view = Mock(return_value=[])
def mock_couch(app):
dbname = dbs.get(app, main_db_url).rsplit("/", 1)[1]
return Mock(name=dbname, dbname=dbname, spec_set=CouchSpec)
# register our dbs with the extension document classes
main_db_url = settings.COUCH_DATABASE
dbs = dict(settings.COUCHDB_DATABASES)
self.db_classes = db_classes = []
for app, value in loading.couchdbkit_handler.app_schema.items():
for cls in value.values():
db_classes.append(cls)
cls.set_db(mock_couch(app))
def teardown(self):
"""Enable database access"""
settings.DB_ENABLED = self.original_db_enabled
for cls in self.db_classes:
del cls._db
self.db_patch.stop()
class HqdbContext(DatabaseContext):
"""Database setup/teardown
In addition to the normal django database setup/teardown, also
setup/teardown couch databases. Database setup/teardown may be
skipped, depending on the presence and value of an environment
variable (`REUSE_DB`). Typical usage is `REUSE_DB=1` which means
skip database setup and migrations if possible and do not teardown
databases after running tests. If connection fails for any test
database in `settings.DATABASES` all databases will be re-created
and migrated.
When using REUSE_DB=1, you may also want to provide a value for the
--reusedb option, either reset, flush, migrate, or teardown.
./manage.py test --help will give you a description of these.
"""
def __init__(self, tests, runner):
reuse_db = (CmdLineParametersPlugin.get('reusedb')
or string_to_boolean(os.environ.get("REUSE_DB") or "0"))
self.reuse_db = reuse_db
self.skip_setup_for_reuse_db = reuse_db and reuse_db != "reset"
self.skip_teardown_for_reuse_db = reuse_db and reuse_db != "teardown"
super(HqdbContext, self).__init__(tests, runner)
def should_skip_test_setup(self):
return CmdLineParametersPlugin.get('collect_only')
@timelimit(480)
def setup(self):
if self.should_skip_test_setup():
return
from corehq.blobs.tests.util import TemporaryFilesystemBlobDB
self.blob_db = TemporaryFilesystemBlobDB()
self.old_names = self._get_databases()
if self.skip_setup_for_reuse_db and self._databases_ok():
if self.reuse_db == "migrate":
call_command('migrate_multi', interactive=False)
if self.reuse_db == "flush":
flush_databases()
return # skip remaining setup
if self.reuse_db == "reset":
self.reset_databases()
print("", file=sys.__stdout__) # newline for creating database message
if self.reuse_db:
print("REUSE_DB={} ".format(self.reuse_db), file=sys.__stdout__, end="")
if self.skip_setup_for_reuse_db:
# pass this on to the Django runner to avoid creating databases
# that already exist
self.runner.keepdb = True
super(HqdbContext, self).setup()
temporary_db_setup()
def reset_databases(self):
self.delete_couch_databases()
# tear down all databases together to avoid dependency issues
teardown = []
for connection, db_name, is_first in self.old_names:
try:
connection.ensure_connection()
teardown.append((connection, db_name, is_first))
except OperationalError:
pass # ignore databases that don't exist
self.runner.teardown_databases(reversed(teardown))
def _databases_ok(self):
for connection, db_name, _ in self.old_names:
db = connection.settings_dict
assert db["NAME"].startswith(TEST_DATABASE_PREFIX), db["NAME"]
try:
connection.ensure_connection()
except OperationalError as e:
print(str(e), file=sys.__stderr__)
return False
return True
def _get_databases(self):
from django.db import connections
old_names = []
test_databases, mirrored_aliases = get_unique_databases_and_mirrors()
assert not mirrored_aliases, "DB mirrors not supported"
for signature, (db_name, aliases) in test_databases.items():
alias = list(aliases)[0]
connection = connections[alias]
old_names.append((connection, db_name, True))
return old_names
def delete_couch_databases(self):
for db in get_all_test_dbs():
try:
db.server.delete_db(db.dbname)
log.info("deleted database %s", db.dbname)
except ResourceNotFound:
log.info("database %s not found! it was probably already deleted.",
db.dbname)
def teardown(self):
if self.should_skip_test_setup():
return
self.blob_db.close()
if self.skip_teardown_for_reuse_db:
return
self.delete_couch_databases()
# HACK clean up leaked database connections
from corehq.sql_db.connections import connection_manager
connection_manager.dispose_all()
# in case this was set before we want to remove it now
self.runner.keepdb = False
# tear down in reverse order
self.old_names = reversed(self.old_names)
super(HqdbContext, self).teardown()
def temporary_db_setup():
"""Temporary setup while V1 ledger models are being removed
Can be removed when migrations are added to delete the tables.
"""
from django.db import connection
with connection.cursor() as cursor:
cursor.execute("""
/*
StockState table must be deleted so TransactionTestCase can flush
the db. See commit 07329e61fefaf1c563c998a164029d735d11a4fd
Prevents CommandError: Database test_commcarehq couldn't be flushed.
SQL error:
ERROR: cannot truncate a table referenced in a foreign key constraint
DETAIL: Table "commtrack_stockstate" references "products_sqlproduct".
*/
DROP TABLE IF EXISTS commtrack_stockstate;
DROP TABLE IF EXISTS stock_stocktransaction;
DROP TABLE IF EXISTS stock_stockreport;
DROP TABLE IF EXISTS stock_docdomainmapping;
""")
def print_imports_until_thread_change():
"""Print imports until the current thread changes
This is useful for troubleshooting premature test runner exit
(often caused by an import when running tests --with-doctest).
"""
main = threading.current_thread()
print("setting up import hook on %s" % main, file=sys.__stdout__)
class InfoImporter(object):
def find_module(self, name, path=None):
thread = threading.current_thread()
# add code here to check for other things happening on import
#if name == 'gevent':
# sys.exit()
print("%s %s" % (thread, name), file=sys.__stdout__)
if thread is not main:
sys.exit()
return None
# Register the import hook. See https://www.python.org/dev/peps/pep-0302/
sys.meta_path.append(InfoImporter())
@nottest
@unit_testing_only
def get_all_test_dbs():
all_dbs = list(couch_config.all_dbs_by_db_name.values())
for db in all_dbs:
if '/test_' not in db.uri:
raise ValueError("not a test db url: db=%s url=%r" % (db.dbname, db.uri))
return all_dbs
@unit_testing_only
def flush_databases():
"""
Best effort at emptying all documents from all databases.
Useful when you break a test and it doesn't clean up properly. This took
about 5 seconds to run when trying it out.
"""
print("Flushing test databases, check yourself before you wreck yourself!", file=sys.__stdout__)
for db in get_all_test_dbs():
try:
db.flush()
except (ResourceNotFound, HTTPError):
pass
call_command('flush', interactive=False)
if os.environ.get("HQ_TESTS_PRINT_IMPORTS"):
print_imports_until_thread_change()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions raised by the Horizon code and the machinery for handling them.
"""
import logging
import os
import sys
from django.contrib.auth import logout
from django.http import HttpRequest
from django.utils import termcolors
from django.utils.translation import ugettext as _
from django.views.debug import SafeExceptionReporterFilter, CLEANSED_SUBSTITUTE
from horizon import messages
from horizon.conf import HORIZON_CONFIG
LOG = logging.getLogger(__name__)
PALETTE = termcolors.PALETTES[termcolors.DEFAULT_PALETTE]
class HorizonReporterFilter(SafeExceptionReporterFilter):
""" Error report filter that's always active, even in DEBUG mode. """
def is_active(self, request):
return True
# TODO(gabriel): This bugfix is cribbed from Django's code. When 1.4.1
# is available we can remove this code.
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper'
in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper,
'sensitive_variables',
None)
break
current_frame = current_frame.f_back
cleansed = []
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed.append((name, CLEANSED_SUBSTITUTE))
return cleansed
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
else:
# Potentially cleanse only the request if it's one of the
# frame variables.
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
class HorizonException(Exception):
""" Base exception class for distinguishing our own exception classes. """
pass
class Http302(HorizonException):
"""
Error class which can be raised from within a handler to cause an
early bailout and redirect at the middleware level.
"""
status_code = 302
def __init__(self, location, message=None):
self.location = location
self.message = message
class NotAuthorized(HorizonException):
"""
Raised whenever a user attempts to access a resource which they do not
have permission-based access to (such as when failing the
:func:`~horizon.decorators.require_perms` decorator).
The included :class:`~horizon.middleware.HorizonMiddleware` catches
``NotAuthorized`` and handles it gracefully by displaying an error
message and redirecting the user to a login page.
"""
status_code = 401
class NotAuthenticated(HorizonException):
"""
Raised when a user is trying to make requests and they are not logged in.
The included :class:`~horizon.middleware.HorizonMiddleware` catches
``NotAuthenticated`` and handles it gracefully by displaying an error
message and redirecting the user to a login page.
"""
status_code = 403
class NotFound(HorizonException):
""" Generic error to replace all "Not Found"-type API errors. """
status_code = 404
class RecoverableError(HorizonException):
""" Generic error to replace any "Recoverable"-type API errors. """
status_code = 100 # HTTP status code "Continue"
class ServiceCatalogException(HorizonException):
"""
Raised when a requested service is not available in the ``ServiceCatalog``
returned by Keystone.
"""
def __init__(self, service_name):
message = 'Invalid service catalog service: %s' % service_name
super(ServiceCatalogException, self).__init__(message)
class AlreadyExists(HorizonException):
"""
Exception to be raised when trying to create an API resource which
already exists.
"""
def __init__(self, name, resource_type):
self.attrs = {"name": name, "resource": resource_type}
self.msg = 'A %(resource)s with the name "%(name)s" already exists.'
def __repr__(self):
return self.msg % self.attrs
def __str__(self):
return self.msg % self.attrs
def __unicode__(self):
return _(self.msg) % self.attrs
class WorkflowError(HorizonException):
""" Exception to be raised when something goes wrong in a workflow. """
pass
class WorkflowValidationError(HorizonException):
"""
Exception raised during workflow validation if required data is missing,
or existing data is not valid.
"""
pass
class HandledException(HorizonException):
"""
Used internally to track exceptions that have gone through
:func:`horizon.exceptions.handle` more than once.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
UNAUTHORIZED = tuple(HORIZON_CONFIG['exceptions']['unauthorized'])
NOT_FOUND = tuple(HORIZON_CONFIG['exceptions']['not_found'])
RECOVERABLE = (AlreadyExists,)
RECOVERABLE += tuple(HORIZON_CONFIG['exceptions']['recoverable'])
def error_color(msg):
return termcolors.colorize(msg, **PALETTE['ERROR'])
def check_message(keywords, message):
"""
Checks an exception for given keywords and raises a new ``ActionError``
with the desired message if the keywords are found. This allows selective
control over API error messages.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
if set(str(exc_value).split(" ")).issuperset(set(keywords)):
exc_value._safe_message = message
raise
def handle(request, message=None, redirect=None, ignore=False,
escalate=False, log_level=None, force_log=None):
""" Centralized error handling for Horizon.
Because Horizon consumes so many different APIs with completely
different ``Exception`` types, it's necessary to have a centralized
place for handling exceptions which may be raised.
Exceptions are roughly divided into 3 types:
#. ``UNAUTHORIZED``: Errors resulting from authentication or authorization
problems. These result in being logged out and sent to the login screen.
#. ``NOT_FOUND``: Errors resulting from objects which could not be
located via the API. These generally result in a user-facing error
message, but are otherwise returned to the normal code flow. Optionally
a redirect value may be passed to the error handler so users are
returned to a different view than the one requested in addition to the
error message.
#. RECOVERABLE: Generic API errors which generate a user-facing message
but drop directly back to the regular code flow.
All other exceptions bubble the stack as normal unless the ``ignore``
argument is passed in as ``True``, in which case only unrecognized
errors are bubbled.
If the exception is not re-raised, an appropriate wrapper exception
class indicating the type of exception that was encountered will be
returned.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
log_method = getattr(LOG, log_level or "exception")
force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False)
force_silence = getattr(exc_value, "silence_logging", False)
# Because the same exception may travel through this method more than
# once (if it's re-raised) we may want to treat it differently
# the second time (e.g. no user messages/logging).
handled = issubclass(exc_type, HandledException)
wrap = False
# Restore our original exception information, but re-wrap it at the end
if handled:
exc_type, exc_value, exc_traceback = exc_value.wrapped
wrap = True
# We trust messages from our own exceptions
if issubclass(exc_type, HorizonException):
message = exc_value
# Check for an override message
elif getattr(exc_value, "_safe_message", None):
message = exc_value._safe_message
# If the message has a placeholder for the exception, fill it in
elif message and "%(exc)s" in message:
message = message % {"exc": exc_value}
if issubclass(exc_type, UNAUTHORIZED):
if ignore:
return NotAuthorized
if not force_silence and not handled:
log_method(error_color("Unauthorized: %s" % exc_value))
if not handled:
if message:
message = _("Unauthorized: %s") % message
# We get some pretty useless error messages back from
# some clients, so let's define our own fallback.
fallback = _("Unauthorized. Please try logging in again.")
messages.error(request, message or fallback)
# Escalation means logging the user out and raising NotAuthorized
# so the middleware will redirect them appropriately.
if escalate:
logout(request)
raise NotAuthorized
# Otherwise continue and present our "unauthorized" error message.
return NotAuthorized
if issubclass(exc_type, NOT_FOUND):
wrap = True
if not force_silence and not handled and (not ignore or force_log):
log_method(error_color("Not Found: %s" % exc_value))
if not ignore and not handled:
messages.error(request, message or exc_value)
if redirect:
raise Http302(redirect)
if not escalate:
return NotFound # return to normal code flow
if issubclass(exc_type, RECOVERABLE):
wrap = True
if not force_silence and not handled and (not ignore or force_log):
# Default recoverable error to WARN log level
log_method = getattr(LOG, log_level or "warning")
log_method(error_color("Recoverable error: %s" % exc_value))
if not ignore and not handled:
messages.error(request, message or exc_value)
if redirect:
raise Http302(redirect)
if not escalate:
return RecoverableError # return to normal code flow
# If we've gotten here, time to wrap and/or raise our exception.
if wrap:
raise HandledException([exc_type, exc_value, exc_traceback])
raise exc_type, exc_value, exc_traceback
|
|
# Copyright (c) Ralph Meijer.
# See LICENSE for details.
"""
Tests for L{wokkel.server}.
"""
from __future__ import division, absolute_import
from twisted.internet import defer
from twisted.python import failure
from twisted.test.proto_helpers import StringTransport
from twisted.trial import unittest
from twisted.words.protocols.jabber import error, jid, xmlstream
from twisted.words.xish import domish
from wokkel import component, server
NS_STREAMS = 'http://etherx.jabber.org/streams'
NS_DIALBACK = "jabber:server:dialback"
class GenerateKeyTest(unittest.TestCase):
"""
Tests for L{server.generateKey}.
"""
def testBasic(self):
originating = "example.org"
receiving = "xmpp.example.com"
sid = "D60000229F"
secret = "s3cr3tf0rd14lb4ck"
key = server.generateKey(secret, receiving, originating, sid)
self.assertEqual(key,
'37c69b1cf07a3f67c04a5ef5902fa5114f2c76fe4a2686482ba5b89323075643')
class XMPPServerListenAuthenticatorTest(unittest.TestCase):
"""
Tests for L{server.XMPPServerListenAuthenticator}.
"""
secret = "s3cr3tf0rd14lb4ck"
originating = "example.org"
receiving = "xmpp.example.com"
sid = "D60000229F"
key = '37c69b1cf07a3f67c04a5ef5902fa5114f2c76fe4a2686482ba5b89323075643'
def setUp(self):
self.output = []
class MyService(object):
pass
self.service = MyService()
self.service.defaultDomain = self.receiving
self.service.domains = [self.receiving, 'pubsub.'+self.receiving]
self.service.secret = self.secret
self.authenticator = server.XMPPServerListenAuthenticator(self.service)
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.send = self.output.append
self.xmlstream.transport = StringTransport()
def test_attributes(self):
"""
Test attributes of authenticator and stream objects.
"""
self.assertEqual(self.service, self.authenticator.service)
self.assertEqual(self.xmlstream.initiating, False)
def test_streamStartedVersion0(self):
"""
The authenticator supports pre-XMPP 1.0 streams.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='jabber:server:dialback' "
"xmlns='jabber:server' "
"to='xmpp.example.com'>")
self.assertEqual((0, 0), self.xmlstream.version)
def test_streamStartedVersion1(self):
"""
The authenticator supports XMPP 1.0 streams.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='jabber:server:dialback' "
"xmlns='jabber:server' "
"to='xmpp.example.com' "
"version='1.0'>")
self.assertEqual((1, 0), self.xmlstream.version)
def test_streamStartedSID(self):
"""
The response stream will have a stream ID.
"""
self.xmlstream.connectionMade()
self.assertIdentical(None, self.xmlstream.sid)
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='jabber:server:dialback' "
"xmlns='jabber:server' "
"to='xmpp.example.com' "
"version='1.0'>")
self.assertNotIdentical(None, self.xmlstream.sid)
def test_streamStartedSentResponseHeader(self):
"""
A stream header is sent in response to the incoming stream header.
"""
self.xmlstream.connectionMade()
self.assertFalse(self.xmlstream._headerSent)
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='jabber:server:dialback' "
"xmlns='jabber:server' "
"to='xmpp.example.com'>")
self.assertTrue(self.xmlstream._headerSent)
def test_streamStartedNotSentFeatures(self):
"""
No features are sent in response to an XMPP < 1.0 stream header.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='jabber:server:dialback' "
"xmlns='jabber:server' "
"to='xmpp.example.com'>")
self.assertEqual(1, len(self.output))
def test_streamStartedSentFeatures(self):
"""
Features are sent in response to an XMPP >= 1.0 stream header.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='jabber:server:dialback' "
"xmlns='jabber:server' "
"to='xmpp.example.com' "
"version='1.0'>")
self.assertEqual(2, len(self.output))
features = self.output[-1]
self.assertEqual(NS_STREAMS, features.uri)
self.assertEqual('features', features.name)
def test_streamRootElement(self):
"""
Test stream error on wrong stream namespace.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='badns' "
"xmlns:db='jabber:server:dialback' "
"xmlns='jabber:server' "
"to='xmpp.example.com'>")
self.assertEqual(3, len(self.output))
exc = error.exceptionFromStreamError(self.output[1])
self.assertEqual('invalid-namespace', exc.condition)
def test_streamDefaultNamespace(self):
"""
Test stream error on missing dialback namespace.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='jabber:server:dialback' "
"xmlns='badns' "
"to='xmpp.example.com'>")
self.assertEqual(3, len(self.output))
exc = error.exceptionFromStreamError(self.output[1])
self.assertEqual('invalid-namespace', exc.condition)
def test_streamNoDialbackNamespace(self):
"""
Test stream error on missing dialback namespace.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns='jabber:server' "
"to='xmpp.example.com'>")
self.assertEqual(3, len(self.output))
exc = error.exceptionFromStreamError(self.output[1])
self.assertEqual('invalid-namespace', exc.condition)
def test_streamBadDialbackNamespace(self):
"""
Test stream error on missing dialback namespace.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='badns' "
"xmlns='jabber:server' "
"to='xmpp.example.com'>")
self.assertEqual(3, len(self.output))
exc = error.exceptionFromStreamError(self.output[1])
self.assertEqual('invalid-namespace', exc.condition)
def test_streamToUnknownHost(self):
"""
Test stream error on stream's to attribute having unknown host.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='jabber:server:dialback' "
"xmlns='jabber:server' "
"to='badhost'>")
self.assertEqual(3, len(self.output))
exc = error.exceptionFromStreamError(self.output[1])
self.assertEqual('host-unknown', exc.condition)
def test_streamToOtherLocalHost(self):
"""
The authenticator supports XMPP 1.0 streams.
"""
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns:db='jabber:server:dialback' "
"xmlns='jabber:server' "
"to='pubsub.xmpp.example.com' "
"version='1.0'>")
self.assertEqual(2, len(self.output))
self.assertEqual(jid.JID('pubsub.xmpp.example.com'),
self.xmlstream.thisEntity)
def test_onResult(self):
def cb(result):
self.assertEqual(1, len(self.output))
reply = self.output[0]
self.assertEqual(self.originating, reply['to'])
self.assertEqual(self.receiving, reply['from'])
self.assertEqual('valid', reply['type'])
def validateConnection(thisHost, otherHost, sid, key):
self.assertEqual(thisHost, self.receiving)
self.assertEqual(otherHost, self.originating)
self.assertEqual(sid, self.sid)
self.assertEqual(key, self.key)
return defer.succeed(None)
self.xmlstream.sid = self.sid
self.service.validateConnection = validateConnection
result = domish.Element((NS_DIALBACK, 'result'))
result['to'] = self.receiving
result['from'] = self.originating
result.addContent(self.key)
d = self.authenticator.onResult(result)
d.addCallback(cb)
return d
def test_onResultFailure(self):
class TestError(Exception):
pass
def cb(result):
reply = self.output[0]
self.assertEqual('invalid', reply['type'])
self.assertEqual(1, len(self.flushLoggedErrors(TestError)))
def validateConnection(thisHost, otherHost, sid, key):
return defer.fail(TestError())
self.xmlstream.sid = self.sid
self.service.validateConnection = validateConnection
result = domish.Element((NS_DIALBACK, 'result'))
result['to'] = self.receiving
result['from'] = self.originating
result.addContent(self.key)
d = self.authenticator.onResult(result)
d.addCallback(cb)
return d
class FakeService(object):
domains = set(['example.org', 'pubsub.example.org'])
defaultDomain = 'example.org'
secret = 'mysecret'
def __init__(self):
self.dispatched = []
def dispatch(self, xs, element):
self.dispatched.append(element)
class XMPPS2SServerFactoryTest(unittest.TestCase):
"""
Tests for L{component.XMPPS2SServerFactory}.
"""
def setUp(self):
self.service = FakeService()
self.factory = server.XMPPS2SServerFactory(self.service)
self.xmlstream = self.factory.buildProtocol(None)
self.transport = StringTransport()
self.xmlstream.thisEntity = jid.JID('example.org')
self.xmlstream.otherEntity = jid.JID('example.com')
def test_makeConnection(self):
"""
A new connection increases the stream serial count. No logs by default.
"""
self.xmlstream.makeConnection(self.transport)
self.assertEqual(0, self.xmlstream.serial)
self.assertEqual(1, self.factory.serial)
self.assertIdentical(None, self.xmlstream.rawDataInFn)
self.assertIdentical(None, self.xmlstream.rawDataOutFn)
def test_makeConnectionLogTraffic(self):
"""
Setting logTraffic should set up raw data loggers.
"""
self.factory.logTraffic = True
self.xmlstream.makeConnection(self.transport)
self.assertNotIdentical(None, self.xmlstream.rawDataInFn)
self.assertNotIdentical(None, self.xmlstream.rawDataOutFn)
def test_onError(self):
"""
An observer for stream errors should trigger onError to log it.
"""
self.xmlstream.makeConnection(self.transport)
class TestError(Exception):
pass
reason = failure.Failure(TestError())
self.xmlstream.dispatch(reason, xmlstream.STREAM_ERROR_EVENT)
self.assertEqual(1, len(self.flushLoggedErrors(TestError)))
def test_connectionInitialized(self):
"""
"""
self.xmlstream.makeConnection(self.transport)
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
def test_connectionLost(self):
"""
"""
self.xmlstream.makeConnection(self.transport)
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
self.xmlstream.dispatch(None, xmlstream.STREAM_END_EVENT)
def test_Element(self):
self.xmlstream.makeConnection(self.transport)
self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT)
stanza = domish.Element((None, "presence"))
self.xmlstream.dispatch(stanza)
self.assertEqual(1, len(self.service.dispatched))
self.assertIdentical(stanza, self.service.dispatched[-1])
def test_ElementNotAuthenticated(self):
self.xmlstream.makeConnection(self.transport)
stanza = domish.Element((None, "presence"))
self.xmlstream.dispatch(stanza)
self.assertEqual(0, len(self.service.dispatched))
class ServerServiceTest(unittest.TestCase):
def setUp(self):
self.output = []
self.xmlstream = xmlstream.XmlStream(xmlstream.Authenticator())
self.xmlstream.thisEntity = jid.JID('example.org')
self.xmlstream.otherEntity = jid.JID('example.com')
self.xmlstream.send = self.output.append
self.router = component.Router()
self.service = server.ServerService(self.router,
secret='mysecret',
domain='example.org')
self.service.xmlstream = self.xmlstream
def test_defaultDomainInDomains(self):
"""
The default domain is part of the domains considered local.
"""
self.assertIn(self.service.defaultDomain, self.service.domains)
def test_dispatch(self):
stanza = domish.Element((None, "presence"))
stanza['to'] = 'user@example.org'
stanza['from'] = 'other@example.com'
self.service.dispatch(self.xmlstream, stanza)
self.assertEqual(1, len(self.output))
self.assertIdentical(stanza, self.output[-1])
def test_dispatchNoTo(self):
errors = []
self.xmlstream.sendStreamError = errors.append
stanza = domish.Element((None, "presence"))
stanza['from'] = 'other@example.com'
self.service.dispatch(self.xmlstream, stanza)
self.assertEqual(1, len(errors))
def test_generatedSecret(self):
self.router = component.Router()
self.service = server.ServerService(self.router,
domain='example.org')
self.assertEqual(32, len(self.service.secret))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.matchers.argmax_matcher."""
import numpy as np
import tensorflow as tf
from object_detection.matchers import argmax_matcher
class ArgMaxMatcherTest(tf.test.TestCase):
def test_return_correct_matches_with_default_thresholds(self):
similarity = np.array([[1., 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]])
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
expected_matched_rows = np.array([2, 0, 1, 0, 1])
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, np.arange(similarity.shape[1]))
self.assertEmpty(res_unmatched_cols)
def test_return_correct_matches_with_empty_rows(self):
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
sim = 0.2*tf.ones([0, 5])
match = matcher.match(sim)
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_unmatched_cols, np.arange(5))
def test_return_correct_matches_with_matched_threshold(self):
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1, 2])
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_return_correct_matches_with_matched_and_unmatched_threshold(self):
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_return_correct_matches_negatives_lower_than_unmatched_false(self):
similarity = np.array([[1, 1, 1, 3, 1],
[2, -1, 2, 0, 4],
[3, 0, -1, 0, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2,
negatives_lower_than_unmatched=False)
expected_matched_cols = np.array([0, 3, 4])
expected_matched_rows = np.array([2, 0, 1])
expected_unmatched_cols = np.array([2]) # col 1 has too low maximum val
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_not_using_force_match(self):
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2)
expected_matched_cols = np.array([0, 3])
expected_matched_rows = np.array([2, 0])
expected_unmatched_cols = np.array([1, 2, 4])
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_return_correct_matches_unmatched_row_while_using_force_match(self):
similarity = np.array([[1, 1, 1, 3, 1],
[-1, 0, -2, -2, -1],
[3, 0, -1, 2, 0]], dtype=np.int32)
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3,
unmatched_threshold=2,
force_match_for_each_row=True)
expected_matched_cols = np.array([0, 1, 3])
expected_matched_rows = np.array([2, 1, 0])
expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val
sim = tf.constant(similarity)
match = matcher.match(sim)
matched_cols = match.matched_column_indices()
matched_rows = match.matched_row_indices()
unmatched_cols = match.unmatched_column_indices()
with self.test_session() as sess:
res_matched_cols = sess.run(matched_cols)
res_matched_rows = sess.run(matched_rows)
res_unmatched_cols = sess.run(unmatched_cols)
self.assertAllEqual(res_matched_rows, expected_matched_rows)
self.assertAllEqual(res_matched_cols, expected_matched_cols)
self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols)
def test_valid_arguments_corner_case(self):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1)
def test_invalid_arguments_corner_case_negatives_lower_than_thres_false(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=1,
negatives_lower_than_unmatched=False)
def test_invalid_arguments_no_matched_threshold(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=None,
unmatched_threshold=4)
def test_invalid_arguments_unmatched_thres_larger_than_matched_thres(self):
with self.assertRaises(ValueError):
argmax_matcher.ArgMaxMatcher(matched_threshold=1,
unmatched_threshold=2)
def test_set_values_using_indicator(self):
input_a = np.array([3, 4, 5, 1, 4, 3, 2])
expected_b = np.array([3, 0, 0, 1, 0, 3, 2]) # Set a>3 to 0
expected_c = np.array(
[3., 4., 5., -1., 4., 3., -1.]) # Set a<3 to -1. Float32
idxb_ = input_a > 3
idxc_ = input_a < 3
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None)
a = tf.constant(input_a)
idxb = tf.constant(idxb_)
idxc = tf.constant(idxc_)
b = matcher._set_values_using_indicator(a, idxb, 0)
c = matcher._set_values_using_indicator(tf.cast(a, tf.float32), idxc, -1)
with self.test_session() as sess:
res_b = sess.run(b)
res_c = sess.run(c)
self.assertAllEqual(res_b, expected_b)
self.assertAllEqual(res_c, expected_c)
if __name__ == '__main__':
tf.test.main()
|
|
"""
Unit tests for genome_annotation
"""
import logging
from unittest import skipUnless
from . import shared
from doekbase.data_api.taxonomy.taxon.api import TaxonAPI
from doekbase.data_api.taxonomy.taxon.api import _Prototype
from doekbase.data_api.taxonomy.taxon.api import _KBaseGenomes_Genome
_log = logging.getLogger(__name__)
taxon_new = "ReferenceTaxons/242159_taxon"
taxon_old = "OriginalReferenceGenomes/kb|g.166819"
t_new = None
t_new_e = None
t_old = None
t_old_e = None
def setup():
shared.setup()
global t_new, t_new_e, t_old, t_old_e
t_new = TaxonAPI(shared.services, shared.token, taxon_new)
t_new_e = _Prototype(shared.services, shared.token, taxon_new)
t_old = TaxonAPI(shared.services, shared.token, taxon_old)
t_old_e = _KBaseGenomes_Genome(shared.services, shared.token, taxon_old)
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_bogus_type():
inputs = ["Bogus",
"PrototypeReferenceGenomes/kb|g.166819",
"PrototypeReferenceGenomes/kb|g.166819_assembly",
"OriginalReferenceGenomes/kb|g.166819.contigset"]
_log.info("Input {}".format(inputs))
for x in inputs:
try:
t = TaxonAPI(shared.services, shared.token, x)
except Exception, e:
assert isinstance(e, TypeError)
####### New Taxon Type tests
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_parent_new():
_log.info("Input {}".format(taxon_new))
parent = t_new.get_parent()
_log.info("Output {}".format(parent))
assert isinstance(parent, TaxonAPI)
parent_e = t_new_e.get_parent()
assert isinstance(parent_e, TaxonAPI)
assert parent.get_taxonomic_id() == parent_e.get_taxonomic_id()
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_children_new():
_log.info("Input {}".format(taxon_new))
children = t_new.get_children()
_log.info("Output {}".format(children))
assert isinstance(children, list)
#and len(children) > 0
children_e = t_new_e.get_children()
assert isinstance(children, list)
assert children == children_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_genome_annotations_new():
_log.info("Input {}".format(taxon_new))
annotations = t_new.get_genome_annotations()
_log.info("Output {}".format(annotations))
assert isinstance(annotations, list)
#and len(annotations) > 0
annotations_e = t_new_e.get_genome_annotations()
assert isinstance(annotations_e, list)
assert annotations == annotations_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_scientific_lineage_new():
_log.info("Input {}".format(taxon_new))
scientific_lineage = t_new.get_scientific_lineage()
_log.info("Output {}".format(scientific_lineage))
assert isinstance(scientific_lineage, basestring) and len(scientific_lineage) > 0
scientific_lineage_e = t_new_e.get_scientific_lineage()
assert isinstance(scientific_lineage_e, basestring) and len(scientific_lineage_e) > 0
assert scientific_lineage == scientific_lineage_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_scientific_name_new():
_log.info("Input {}".format(taxon_new))
scientific_name = t_new.get_scientific_name()
_log.info("Output {}".format(scientific_name))
assert isinstance(scientific_name, basestring) and len(scientific_name) > 0
scientific_name_e = t_new_e.get_scientific_name()
assert isinstance(scientific_name_e, basestring) and len(scientific_name_e) > 0
assert scientific_name == scientific_name_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_taxonomic_id_new():
_log.info("Input {}".format(taxon_new))
taxonomic_id = t_new.get_taxonomic_id()
_log.info("Output {}".format(taxonomic_id))
assert isinstance(taxonomic_id, int) and taxonomic_id != -1
taxonomic_id_e = t_new_e.get_taxonomic_id()
assert isinstance(taxonomic_id_e, int) and taxonomic_id_e != -1
assert taxonomic_id == taxonomic_id_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_kingdom_new():
_log.info("Input {}".format(taxon_new))
kingdom = t_new.get_kingdom()
_log.info("Output {}".format(kingdom))
assert kingdom == "Viridiplantae"
kingdom_e = t_new_e.get_kingdom()
assert kingdom_e == "Viridiplantae"
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_domain_new():
_log.info("Input {}".format(taxon_new))
domain = t_new.get_domain()
_log.info("Output {}".format(domain))
assert isinstance(domain, basestring) and len(domain) > 0
domain_e = t_new_e.get_domain()
assert isinstance(domain_e, basestring) and len(domain_e) > 0
assert domain == domain_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_aliases_new():
_log.info("Input {}".format(taxon_new))
aliases = t_new.get_aliases()
_log.info("Output {}".format(aliases))
assert isinstance(aliases, list) and len(aliases) > 0
aliases_e = t_new_e.get_aliases()
assert isinstance(aliases_e, list) and len(aliases_e) > 0
assert aliases == aliases_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_genetic_code_new():
_log.info("Input {}".format(taxon_new))
genetic_code = t_new.get_genetic_code()
_log.info("Output {}".format(genetic_code))
assert isinstance(genetic_code, int) and genetic_code != -1
genetic_code_e = t_new_e.get_genetic_code()
assert isinstance(genetic_code_e, int) and genetic_code_e != -1
assert genetic_code == genetic_code_e
####### Old Taxon Type tests
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_parent_old():
_log.info("Input {}".format(taxon_old))
parent = t_old.get_parent()
_log.info("Output {}".format(parent))
assert parent is None
parent_e = t_old_e.get_parent()
assert parent_e is None
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_children_old():
_log.info("Input {}".format(taxon_old))
children = t_old.get_children()
_log.info("Output {}".format(children))
assert isinstance(children, list) and len(children) == 0
children_e = t_old_e.get_children()
assert isinstance(children_e, list) and len(children_e) == 0
assert children == children_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_genome_annotations_old():
_log.info("Input {}".format(taxon_old))
annotations = t_old.get_genome_annotations()
_log.info("Output {}".format(annotations))
assert isinstance(annotations, list) and len(annotations) == 0
annotations_e = t_old_e.get_genome_annotations()
assert isinstance(annotations_e, list) and len(annotations_e) == 0
assert annotations == annotations_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_scientific_lineage_old():
_log.info("Input {}".format(taxon_old))
scientific_lineage = t_old.get_scientific_lineage()
_log.info("Output {}".format(scientific_lineage))
assert isinstance(scientific_lineage, basestring) and len(scientific_lineage) > 0
scientific_lineage_e = t_old_e.get_scientific_lineage()
assert isinstance(scientific_lineage_e, basestring) and len(scientific_lineage_e) > 0
assert scientific_lineage == scientific_lineage_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_scientific_name_old():
_log.info("Input {}".format(taxon_old))
scientific_name = t_old.get_scientific_name()
_log.info("Output {}".format(scientific_name))
assert isinstance(scientific_name, basestring) and len(scientific_name) > 0
scientific_name_e = t_old_e.get_scientific_name()
assert isinstance(scientific_name_e, basestring) and len(scientific_name_e) > 0
assert scientific_name == scientific_name_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_taxonomic_id_old():
_log.info("Input {}".format(taxon_old))
taxonomic_id = t_old.get_taxonomic_id()
_log.info("Output {}".format(taxonomic_id))
assert isinstance(taxonomic_id, int) and taxonomic_id == -1
taxonomic_id_e = t_old_e.get_taxonomic_id()
assert isinstance(taxonomic_id_e, int) and taxonomic_id_e == -1
assert taxonomic_id == taxonomic_id_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_kingdom_old():
_log.info("Input {}".format(taxon_old))
kingdom = t_old.get_kingdom()
_log.info("Output {}".format(kingdom))
assert kingdom is None
kingdom_e = t_old_e.get_kingdom()
assert kingdom_e is None
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_domain_old():
_log.info("Input {}".format(taxon_old))
domain = t_old.get_domain()
_log.info("Output {}".format(domain))
assert isinstance(domain, basestring) and len(domain) > 0
domain_e = t_old_e.get_domain()
assert isinstance(domain_e, basestring) and len(domain_e) > 0
assert domain == domain_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_aliases_old():
_log.info("Input {}".format(taxon_old))
aliases = t_old.get_aliases()
_log.info("Output {}".format(aliases))
assert isinstance(aliases, list) and len(aliases) == 0
aliases_e = t_old_e.get_aliases()
assert isinstance(aliases_e, list) and len(aliases_e) == 0
assert aliases == aliases_e
@skipUnless(shared.can_connect, 'Cannot connect to workspace')
def test_get_genetic_code_old():
_log.info("Input {}".format(taxon_old))
genetic_code = t_old.get_genetic_code()
_log.info("Output {}".format(genetic_code))
assert isinstance(genetic_code, int) and genetic_code != -1
genetic_code_e = t_old_e.get_genetic_code()
assert isinstance(genetic_code_e, int) and genetic_code_e != -1
|
|
from collections import OrderedDict
import wx
from classes.om import ObjectManager
from classes.ui import UIManager
from classes.ui import FrameController
from classes.ui import Frame
from app import log
SLIDER_MARGIN = 5 # Default 6
class RangeSlider(wx.Slider):
def __init__(self, *args, **kwargs):
wx.Slider.__init__(self, *args, **kwargs)
self.SetPageSize(1)
self.calculate_rect()
self.Bind(wx.EVT_SLIDER, self.on_event)
def calculate_rect(self):
self.rect = self.GetClientRect()
self.rect.width -= 2 * SLIDER_MARGIN
self.rect.x += SLIDER_MARGIN
def on_event(self, event):
if event.GetEventType() == wx.EVT_LEFT_DOWN.typeId:
if self.is_selrange():
val = self.get_position(event)
if val < self.GetValue():
self.SetSelection(val, self.GetSelEnd())
elif val > self.GetValue():
self.SetSelection(self.GetSelStart(), val)
self.send_change()
else:
event.Skip()
elif event.GetEventType() == wx.EVT_SLIDER.typeId:
self.send_change()
def send_change(self):
if self.is_selrange():
# print('send_change:', self.GetSelStart(), self.GetSelEnd() + 1)
self.GetParent().GetParent().set_values(self.GetSelStart(), self.GetSelEnd() + 1)
else:
self.GetParent().GetParent().set_values(self.GetValue())
def is_selrange(self):
return (self.GetWindowStyle() & wx.SL_SELRANGE)
def set_selrange(self, sel=True, selmin=None, selmax=None):
if sel:
self.SetWindowStyle(wx.SL_SELRANGE)
if selmin is None or selmax is None:
raise Exception()
else:
self.SetSelection(selmin, selmax)
self.Bind(wx.EVT_LEFT_DOWN, self.on_event)
else:
self.ClearSel()
if self.is_selrange():
self.Unbind(wx.EVT_LEFT_DOWN, handler=self.on_event)
self.SetWindowStyle(wx.SL_BOTTOM)
if selmin is None:
raise Exception()
super(RangeSlider, self).SetValue(selmin)
def SetSelection(self, minsel, maxsel):
# print('Aqui')
super(RangeSlider, self).SetSelection(minsel, maxsel)
# print 'aqui nao'
value = minsel + ((maxsel - minsel) / 2)
super(RangeSlider, self).SetValue(value)
def SetValue(self, value):
old_min_range = self.GetSelStart()
old_max_range = self.GetSelEnd()
old_med_range = old_min_range + ((old_max_range - old_min_range) / 2)
new_min_range = value - (old_med_range - old_min_range)
new_max_range = value + (old_max_range - old_med_range)
if new_min_range < self.GetRange()[0] or \
new_max_range > self.GetRange()[1]:
return False
super(RangeSlider, self).SetSelection(new_min_range, new_max_range)
super(RangeSlider, self).SetValue(value)
return True
def get_position(self, e):
click_min = self.rect.x + (self.GetThumbLength() / 2)
click_max = (self.rect.x + self.rect.width) - (self.GetThumbLength() / 2)
click_position = e.GetX()
result_min = self.GetMin()
result_max = self.GetMax()
if click_position > click_min and click_position < click_max:
result = self.linapp(click_min, click_max,
result_min, result_max,
click_position)
elif click_position <= click_min:
result = result_min
else:
result = result_max
return result
def linapp(self, x1, x2, y1, y2, x):
proportion = float(x - x1) / (x2 - x1)
length = y2 - y1
return round(proportion * length + y1)
class DimensionPanel(wx.Panel):
def __init__(self, data_index_uid, display, is_range, min_idx, max_idx, *args, **kwargs):
super(DimensionPanel, self).__init__(*args, **kwargs)
self.SetSize(300, 50)
#
self.data_index_uid = data_index_uid
OM = ObjectManager()
obj = OM.get(data_index_uid)
#
main_sizer = wx.StaticBoxSizer(wx.VERTICAL, self, obj.name)
#
self.top_panel = wx.Panel(self)
self.top_sizer = wx.BoxSizer(wx.HORIZONTAL)
#
self.check_display = wx.CheckBox(self.top_panel, -1, label='Display')
self.check_display.Bind(wx.EVT_CHECKBOX, self._on_check_display)
self.top_sizer.Add(self.check_display, 1, wx.ALIGN_CENTER | wx.LEFT, 30)
#
self.check_range = wx.CheckBox(self.top_panel, -1, label='Range')
self.check_range.Bind(wx.EVT_CHECKBOX, self._on_check_range)
self.top_sizer.Add(self.check_range, 1, wx.ALIGN_CENTER | wx.RIGHT, 30)
self.top_panel.SetSizer(self.top_sizer)
#
main_sizer.Add(self.top_panel, 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 3)
#
self.label = obj.name
self.vec = obj.data
self.display = display
self.is_range = is_range
#
self.bottom_panel = wx.Panel(self)
self.bottom_sizer = wx.BoxSizer(wx.VERTICAL)
self.slider = RangeSlider(self.bottom_panel)
self.bottom_sizer.Add(self.slider, 0, wx.EXPAND)
self.text_value = wx.StaticText(self.bottom_panel, -1)
self.bottom_sizer.Add(self.text_value, 0, wx.ALIGN_CENTER)
self.bottom_panel.SetSizer(self.bottom_sizer)
#
main_sizer.Add(self.bottom_panel, 0, wx.EXPAND)
#
self.slider.SetRange(0, len(self.vec) - 1)
self.min_idx = min_idx
self.max_idx = max_idx
#
if self.display:
self.set_check_display(1)
else:
self.set_check_display(0)
#
self.Bind(wx.EVT_PAINT, self._on_paint)
self.SetSizer(main_sizer)
main_sizer.Layout()
def _on_paint(self, event):
self.slider.calculate_rect()
event.Skip()
def _on_check_display(self, event):
self.set_check_display(event.GetSelection())
def _on_check_range(self, event):
self.set_check_range(event.GetSelection())
def set_check_display(self, value=0):
self.check_display.SetValue(value)
if value:
self.display = True
self.set_check_range(1)
self.check_range.Enable()
else:
self.display = False
self.set_check_range(0)
self.check_range.Disable()
def set_check_range(self, value=0):
# print 'set_check_range:', value
if self.min_idx > self.max_idx:
temp = self.min_idx
self.min_idx = self.max_idx
self.max_idx = temp
if value:
self.is_range = True
# self.slider.set_selrange(True, self.min_idx, self.max_idx)
# self.set_values(self.min_idx, self.max_idx)
else:
self.is_range = False
# self.slider.set_selrange(False, self.min_idx, self.max_idx)
# self.set_values(self.min_idx, self.max_idx)
self.slider.set_selrange(self.is_range, self.min_idx, self.max_idx)
self.set_values(self.min_idx, self.max_idx)
self.check_range.SetValue(value)
def set_values(self, min_idx, max_idx=None):
self.min_idx = min_idx
if max_idx is not None:
self.max_idx = max_idx
from_str = 'From: {}'.format(self.vec[min_idx])
# TODO: max_idx-1 to max_idx !!!
to_str = ' To: {}'.format(self.vec[max_idx - 1])
self.text_value.SetLabel(from_str + to_str)
else:
val_str = 'Selected: {}'.format(self.vec[min_idx])
self.text_value.SetLabel(val_str)
# print 'set_values:', self.min_idx, self.max_idx
self.bottom_sizer.Layout()
def get_result(self):
ret = {}
ret['uid'] = self.data_index_uid
ret['display'] = self.display
ret['is_range'] = self.is_range
# if self.display:
ret['start'] = self.min_idx
ret['end'] = self.max_idx
# else:
# ret['start'] = self.slider.GetValue()
# ret['end'] = None
return ret
###############################################################################
###############################################################################
class NavigatorController(FrameController):
tid = 'navigator_controller'
_ATTRIBUTES = OrderedDict()
def __init__(self, **state):
state['title'] = 'Data navigator'
state['size'] = (350, 600)
super().__init__(**state)
def PostInit(self):
toc = self._get_track_object_controller()
OM = ObjectManager()
for (di_uid, display, first, last) in toc._data:
di = OM.get(di_uid)
print('\n', di.name, display, first, last)
self.view.add_panel(di_uid, display, first, last)
self.view.add_bottom_panel()
def _get_track_object_controller(self):
UIM = UIManager()
toc_uid = UIM._getparentuid(self.uid)
return UIM.get(toc_uid)
def Set(self, results):
print('NavigatorController.Set:', results)
toc = self._get_track_object_controller()
new_data = []
for result in results:
new_data.append((result['uid'], result['display'],
result['start'], result['end'])
)
# print result
toc._data = new_data
toc.redraw()
print('NavigatorController.Set ENDED')
class Navigator(Frame):
tid = 'navigator'
def __init__(self, controller_uid):
super().__init__(controller_uid)
self.basepanel = wx.Panel(self)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.basepanel.SetSizer(self.sizer)
self.panels = []
def add_panel(self, data_index_uid, display, start, end):
panel = DimensionPanel(data_index_uid, display, True, start, end,
self.basepanel)
self.panels.append(panel)
self.sizer.Add(panel, 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, 5) # wx.ALIGN_CENTER
self.sizer.Layout()
def add_bottom_panel(self):
buttons_panel = wx.Panel(self.basepanel)
buttons_panel_sizer = wx.BoxSizer(wx.HORIZONTAL)
#
self.ok_button = wx.Button(buttons_panel, label='Ok')
self.ok_button.Bind(wx.EVT_BUTTON, self.on_ok)
self.apply_button = wx.Button(buttons_panel, label='Apply')
self.apply_button.Bind(wx.EVT_BUTTON, self.on_apply)
self.cancel_button = wx.Button(buttons_panel, label='Cancel')
self.cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel)
#
buttons_panel_sizer.Add(self.ok_button, 0,
wx.ALIGN_CENTER | wx.LEFT | wx.TOP | wx.BOTTOM, 10
)
buttons_panel_sizer.Add(self.apply_button, 0,
wx.ALIGN_CENTER | wx.LEFT | wx.TOP | wx.BOTTOM, 10
)
buttons_panel_sizer.Add(self.cancel_button, 0,
wx.ALIGN_CENTER | wx.LEFT | wx.TOP | wx.BOTTOM, 10
)
buttons_panel.SetSizer(buttons_panel_sizer)
self.sizer.Add(buttons_panel, 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, 5)
self.sizer.Layout()
def on_ok(self, event):
self._doApply()
self._doCancel()
def on_apply(self, event):
self._doApply()
def on_cancel(self, event):
self._doCancel()
def _doApply(self):
results = []
for panel in self.panels:
results.append(panel.get_result())
UIM = UIManager()
controller = UIM.get(self._controller_uid)
controller.Set(results)
def _doCancel(self):
self.Close()
|
|
import os.path
import threading, Queue
import shutil, tempfile
import flask
import mock
from libmproxy.proxy.config import ProxyConfig
from libmproxy.proxy.server import ProxyServer
from libmproxy.proxy.primitives import TransparentProxyMode
import libpathod.test, libpathod.pathoc
from libmproxy import flow, controller
from libmproxy.cmdline import APP_HOST, APP_PORT
import tutils
testapp = flask.Flask(__name__)
@testapp.route("/")
def hello():
return "testapp"
@testapp.route("/error")
def error():
raise ValueError("An exception...")
def errapp(environ, start_response):
raise ValueError("errapp")
class TestMaster(flow.FlowMaster):
def __init__(self, config):
config.port = 0
s = ProxyServer(config)
state = flow.State()
flow.FlowMaster.__init__(self, s, state)
self.apps.add(testapp, "testapp", 80)
self.apps.add(errapp, "errapp", 80)
self.clear_log()
def handle_request(self, f):
flow.FlowMaster.handle_request(self, f)
f.reply()
def handle_response(self, f):
flow.FlowMaster.handle_response(self, f)
f.reply()
def clear_log(self):
self.log = []
def handle_log(self, l):
self.log.append(l.msg)
l.reply()
class ProxyThread(threading.Thread):
def __init__(self, tmaster):
threading.Thread.__init__(self)
self.tmaster = tmaster
self.name = "ProxyThread (%s:%s)" % (tmaster.server.address.host, tmaster.server.address.port)
controller.should_exit = False
@property
def port(self):
return self.tmaster.server.address.port
@property
def log(self):
return self.tmaster.log
def run(self):
self.tmaster.run()
def shutdown(self):
self.tmaster.shutdown()
class ProxTestBase(object):
# Test Configuration
ssl = None
ssloptions = False
clientcerts = False
no_upstream_cert = False
authenticator = None
masterclass = TestMaster
certforward = False
@classmethod
def setupAll(cls):
cls.server = libpathod.test.Daemon(ssl=cls.ssl, ssloptions=cls.ssloptions)
cls.server2 = libpathod.test.Daemon(ssl=cls.ssl, ssloptions=cls.ssloptions)
cls.config = ProxyConfig(**cls.get_proxy_config())
tmaster = cls.masterclass(cls.config)
tmaster.start_app(APP_HOST, APP_PORT)
cls.proxy = ProxyThread(tmaster)
cls.proxy.start()
@classmethod
def teardownAll(cls):
shutil.rmtree(cls.cadir)
cls.proxy.shutdown()
cls.server.shutdown()
cls.server2.shutdown()
def setUp(self):
self.master.clear_log()
self.master.state.clear()
self.server.clear_log()
self.server2.clear_log()
@property
def master(self):
return self.proxy.tmaster
@classmethod
def get_proxy_config(cls):
cls.cadir = os.path.join(tempfile.gettempdir(), "mitmproxy")
return dict(
no_upstream_cert = cls.no_upstream_cert,
cadir = cls.cadir,
authenticator = cls.authenticator,
certforward = cls.certforward,
ssl_ports=([cls.server.port, cls.server2.port] if cls.ssl else []),
clientcerts = tutils.test_data.path("data/clientcert") if cls.clientcerts else None
)
class HTTPProxTest(ProxTestBase):
def pathoc_raw(self):
return libpathod.pathoc.Pathoc(("127.0.0.1", self.proxy.port))
def pathoc(self, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = libpathod.pathoc.Pathoc(("localhost", self.proxy.port), ssl=self.ssl, sni=sni)
if self.ssl:
p.connect(("127.0.0.1", self.server.port))
else:
p.connect()
return p
def pathod(self, spec, sni=None):
"""
Constructs a pathod GET request, with the appropriate base and proxy.
"""
p = self.pathoc(sni=sni)
spec = spec.encode("string_escape")
if self.ssl:
q = "get:'/p/%s'"%spec
else:
q = "get:'%s/p/%s'"%(self.server.urlbase, spec)
return p.request(q)
def app(self, page):
if self.ssl:
p = libpathod.pathoc.Pathoc(("127.0.0.1", self.proxy.port), True)
p.connect((APP_HOST, APP_PORT))
return p.request("get:'%s'"%page)
else:
p = self.pathoc()
return p.request("get:'http://%s%s'"%(APP_HOST, page))
class TResolver:
def __init__(self, port):
self.port = port
def original_addr(self, sock):
return ("127.0.0.1", self.port)
class TransparentProxTest(ProxTestBase):
ssl = None
resolver = TResolver
@classmethod
@mock.patch("libmproxy.platform.resolver")
def setupAll(cls, _):
super(TransparentProxTest, cls).setupAll()
if cls.ssl:
ports = [cls.server.port, cls.server2.port]
else:
ports = []
cls.config.mode = TransparentProxyMode(cls.resolver(cls.server.port), ports)
@classmethod
def get_proxy_config(cls):
d = ProxTestBase.get_proxy_config()
d["mode"] = "transparent"
return d
def pathod(self, spec, sni=None):
"""
Constructs a pathod GET request, with the appropriate base and proxy.
"""
if self.ssl:
p = self.pathoc(sni=sni)
q = "get:'/p/%s'"%spec
else:
p = self.pathoc()
q = "get:'/p/%s'"%spec
return p.request(q)
def pathoc(self, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = libpathod.pathoc.Pathoc(("localhost", self.proxy.port), ssl=self.ssl, sni=sni)
p.connect()
return p
class ReverseProxTest(ProxTestBase):
ssl = None
@classmethod
def get_proxy_config(cls):
d = ProxTestBase.get_proxy_config()
d["upstream_server"] = (
True if cls.ssl else False,
True if cls.ssl else False,
"127.0.0.1",
cls.server.port
)
d["mode"] = "reverse"
return d
def pathoc(self, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = libpathod.pathoc.Pathoc(("localhost", self.proxy.port), ssl=self.ssl, sni=sni)
p.connect()
return p
def pathod(self, spec, sni=None):
"""
Constructs a pathod GET request, with the appropriate base and proxy.
"""
if self.ssl:
p = self.pathoc(sni=sni)
q = "get:'/p/%s'"%spec
else:
p = self.pathoc()
q = "get:'/p/%s'"%spec
return p.request(q)
class ChainProxTest(ProxTestBase):
"""
Chain three instances of mitmproxy in a row to test upstream mode.
Proxy order is cls.proxy -> cls.chain[0] -> cls.chain[1]
cls.proxy and cls.chain[0] are in upstream mode,
cls.chain[1] is in regular mode.
"""
chain = None
n = 2
@classmethod
def setupAll(cls):
cls.chain = []
super(ChainProxTest, cls).setupAll()
for _ in range(cls.n):
config = ProxyConfig(**cls.get_proxy_config())
tmaster = cls.masterclass(config)
proxy = ProxyThread(tmaster)
proxy.start()
cls.chain.insert(0, proxy)
# Patch the orginal proxy to upstream mode
cls.config = cls.proxy.tmaster.config = cls.proxy.tmaster.server.config = ProxyConfig(**cls.get_proxy_config())
@classmethod
def teardownAll(cls):
super(ChainProxTest, cls).teardownAll()
for proxy in cls.chain:
proxy.shutdown()
def setUp(self):
super(ChainProxTest, self).setUp()
for proxy in self.chain:
proxy.tmaster.clear_log()
proxy.tmaster.state.clear()
@classmethod
def get_proxy_config(cls):
d = super(ChainProxTest, cls).get_proxy_config()
if cls.chain: # First proxy is in normal mode.
d.update(
mode="upstream",
upstream_server=(False, False, "127.0.0.1", cls.chain[0].port)
)
return d
class HTTPUpstreamProxTest(ChainProxTest, HTTPProxTest):
pass
|
|
import os,sys,re,gzip,gc
import numpy as np
import scipy as sp
import scipy.io
from scipy.sparse import *
from scipy.sparse.linalg import *
import pickle
import math
def readData(param_file,AllComb):
"""
Read in tree, chromatin marks and chromosomes from the param_file.
If AllComb is true, consider all possible combinations of marks.
If AllComb is false, consider all combinations that exist in the genome.
"""
exec('from '+param_file[0:-3]+' import *')
chrs = chromosomes
Samples = tree.keys()
seq = {}
comb = {}
for S in Samples:
print S
i = Samples.index(S)
seq[i] = {}
nsegment = 0
lenChr = {}
for chr in chrs:
lenChr[chr] = 0
filename = inputdir+'/'+S+'_'+chr+'_binary.txt'
if os.path.isfile(filename):
f = open(filename)
else:
f = gzip.open(filename+'.gz')
line = f.readline()
line = f.readline()
marks0 = re.split("[\t\n]",line.rstrip())
for line in f:
nsegment += 1
lenChr[chr] += 1
sig = line.rstrip().replace('\t','')
sig2 = ''
for a in range(len(marks0)):
if (marks0[a] in marks):
sig2 += sig[a]
b = int(sig2,2)
comb[b] = 1
if (b>0):
seq[i][nsegment] = b
f.close()
(transObs,transObs2) = ({},{})
if AllComb:
n = 2**len(marks)
for i in range(n):
transObs[i] = i
transObs2[i] = i
seq2 = seq
else:
n = len(comb)
for i in range(n):
transObs[i] = comb.keys()[i]
transObs2[comb.keys()[i]] = i
seq2 = {}
for i in seq.keys():
seq2[i] = {}
for s in seq[i].keys():
seq2[i][s] = transObs2[seq[i][s]]
return (seq2,tree,Samples,marks,chrs,n,nsegment,lenChr,transObs)
def get_range(seq,m,n,sample_size):
"""
Compute singular vectors for three views
U[i,j] : singular vectors for view j in Sample i
U[i,1] : U from Pairs12
U[i,2] : U from Pairs21
U[i,3] : U from Pairs31
"""
U = {}
D = seq.__len__()
for i in range(0,D):
print "Node " + str(i)
seq_i = {}
for s in range(0,sample_size):
triple = (seq[i].get(s,0),seq[i].get(s+1,0),seq[i].get(s+2,0))
seq_i[s] = triple
[iw_i,unique_pos_i] = iw_seq(seq_i,sample_size)
unique_size_i = unique_pos_i.__len__()
for views in range(1,4):
cooccur = dok_matrix((n,n))
for N in unique_pos_i.keys():
pos = unique_pos_i[N]
if (views==1): (pos1,pos2) = (pos,pos+2)
elif (views==2): (pos1,pos2) = (pos+1,pos)
elif (views==3): (pos1,pos2) = (pos+2,pos)
cooccur[seq[i].get(pos1,0),seq[i].get(pos2,0)] += iw_i[N]
cooccur /= float(sample_size)
(U2,t1,t2) = svds(cooccur,m)
U[i,views] = np.matrix(sp.zeros((n,m)))
U2 = np.matrix(U2)
for k in range(0,m):
U[i,views][:,k] = np.sign(U2[0,m-k-1])*U2[:,m-k-1]
return U
def iw_seq(seq_i,sample_size):
"""
Count number of occurrences in the genome for each observation and store
the genomic position where it occurs for the first time.
This is used to efficiently go through the whole genome data.
"""
iw_i = {}
unique_pos_i = {}
for s in range(0,sample_size):
if ((seq_i[s]) not in unique_pos_i):
unique_pos_i[seq_i[s]] = s
iw_i[seq_i[s]] = iw_i.get(seq_i[s],0) + 1
return (iw_i,unique_pos_i)
def symmetrize(depth_i,path_i,seq,U,m,n,sample_size,Prod_Proj):
"""
Symmetrize the tensor
"""
# Count all combinations of triples from all samples; eg six-tuples for 2 samples;
# x^u_1,x^u_2,x^u_3, ..., x^r_1,x^r_2,x^r_3 where the tree path is r=root, ..., u.
seqext = {}
for s in range(0,sample_size):
comb = ()
for j in range(0,depth_i):
comb += (seq[path_i[j]].get(s,0),seq[path_i[j]].get(s+1,0),seq[path_i[j]].get(s+2,0))
seqext[s] = comb
(iw_i,unique_pos_i) = iw_seq(seqext,sample_size)
unique_size_i = unique_pos_i.__len__()
# P^{u,Hu}_{2,3}_proj = U[u,2]^T * P^{u,Hu}_{2,3} * U[Hu,3]
P_2i_3 = symmetrize_2i_3(depth_i,path_i,seq,U,m,n,sample_size,Prod_Proj)
# P^{u,Hu}_{2,1}_proj = U[u,2]^T * P^{u,Hu}_{2,1} * U[Hu,1]
P_2i_1 = symmetrize_2i_1(depth_i,path_i,seq,U,m,n,sample_size,Prod_Proj)
# P^{Hu,Hu}_{1,3}_proj = U[Hu,1]^T * P^{Hu,Hu}_{1,3} * U[Hu,3]
P_1_3 = symmetrize_1_3(depth_i,path_i,seq,U,m,n,sample_size,Prod_Proj)
# S1 = P^{u,Hu}_{2,3}_proj * (P^{Hu,Hu}_{1,3}_proj)^{-1}
# S3 = P^{u,Hu}_{2,1}_proj * (P^{Hu,Hu}_{3,1}_proj)^{-1}
# M^u_2 = P^{Hu,u}_{1,2}(U[Hu,1]*S1^T,U[u,2]) = S1 * U[Hu,1]^T * P^{Hu,u}_{1,2} * U[u,2] = P^{u,Hu}_{2,3}_proj * (P^{Hu,Hu}_{1,3}_proj)^{-1} * P^{u,Hu}_{1,2}_proj
M_2i = P_2i_3 * np.linalg.inv(P_1_3) * P_2i_1.transpose()
# Whitening step
S2 = sp.sparse.identity(m).todense()
if (Prod_Proj):
(U1,s1,t2) = np.linalg.svd(np.array((M_2i+M_2i.transpose())*.5))
U2 = np.matrix(U1)
for k in range(0,m):
U2[:,k] = np.sign(U2[0,k])*U2[:,k]
S2[k,k] = 1/np.sqrt(s1[k])
else:
(U1,s1,t2) = svds(lil_matrix((M_2i+M_2i.transpose())*.5),m)
U1 = np.matrix(U1)
U2 = np.matrix(sp.zeros((n,m)))
for k in range(0,m):
U2[:,k] = np.sign(U1[0,m-k-1])*U1[:,m-k-1]
S2[k,k] = 1/np.sqrt(s1[m-k-1])
whiten = U2 * S2
S1 = P_2i_3 * np.linalg.inv(P_1_3)
S3 = P_2i_1 * np.linalg.inv(P_1_3.transpose())
A_1 = S1.transpose() * whiten
A_3 = S3.transpose() * whiten
return (A_1,A_3,whiten,iw_i,unique_pos_i)
def symmetrize_2i_3(depth_i,path_i,seq,U,m,n,sample_size,Prod_Proj):
"""
Project view 3
"""
# Count all combinations of (x^u_2, x^u_3, ..., x^r_3) where the tree path for u, Hu is (r=root, ..., u).
seq_2i_3 = {}
for s in range(0,sample_size):
comb_2i_3 = (seq[path_i[0]].get(s+1,0),)
for j in range(depth_i):
comb_2i_3 += (seq[path_i[j]].get(s+2,0),)
seq_2i_3[s] = comb_2i_3
(iw_2i_3,unique_pos_2i_3) = iw_seq(seq_2i_3,sample_size)
#print unique_pos_2i_3.__len__()
# P^{u,Hu}_{2,3}_proj = U[u,2]^T * P^{u,Hu}_{2,3} * U[Hu,3]
if (Prod_Proj): P_2i_3 = np.matrix(sp.zeros((m,m**depth_i)))
else: P_2i_3 = np.matrix(sp.zeros((n,m**depth_i)))
for N in unique_pos_2i_3.keys():
pos = unique_pos_2i_3[N]
if (Prod_Proj): y_2i = U[path_i[0],2][seq[path_i[0]].get(pos+1,0),:].transpose()
else: y_2i = seq[path_i[0]].get(pos+1,0)
y_3 = np.matrix([[1]])
for j in range(depth_i):
y_3 = np.kron(y_3,U[path_i[j],3][seq[path_i[j]].get(pos+2,0),:].transpose())
if (Prod_Proj): P_2i_3 = P_2i_3 + y_2i * y_3.transpose() * iw_2i_3[N]
else: P_2i_3[y_2i,:] = P_2i_3[y_2i,:] + y_3.transpose() * iw_2i_3[N]
P_2i_3 /= float(sample_size)
return P_2i_3
def symmetrize_2i_1(depth_i,path_i,seq,U,m,n,sample_size,Prod_Proj):
"""
Project view 1
"""
# Count all combinations of (x^u_2, x^u_1, ..., x^r_1) where the tree path for u, Hu is (r=root, ..., u).
seq_2i_1 = {}
for s in range(0,sample_size):
comb_2i_1 = (seq[path_i[0]].get(s+1,0),)
for j in range(depth_i):
comb_2i_1 += (seq[path_i[j]].get(s,0),)
seq_2i_1[s] = comb_2i_1
(iw_2i_1,unique_pos_2i_1) = iw_seq(seq_2i_1,sample_size)
#print unique_pos_2i_1.__len__()
# P^{u,Hu}_{2,1}_proj = U[u,2]^T * P^{u,Hu}_{2,1} * U[Hu,1]
if (Prod_Proj):
P_2i_1 = np.matrix(sp.zeros((m,m**depth_i)))
else:
P_2i_1 = np.matrix(sp.zeros((n,m**depth_i)))
for N in unique_pos_2i_1.keys():
pos = unique_pos_2i_1[N]
if (Prod_Proj): y_2i = U[path_i[0],2][seq[path_i[0]].get(pos+1,0),:].transpose()
else: y_2i = seq[path_i[0]].get(pos+1,0)
y_1 = np.matrix([[1]])
for j in range(depth_i):
y_1 = np.kron(y_1,U[path_i[j],1][seq[path_i[j]].get(pos,0),:].transpose())
if (Prod_Proj): P_2i_1 = P_2i_1 + y_2i * y_1.transpose() * iw_2i_1[N]
else: P_2i_1[y_2i,:] = P_2i_1[y_2i,:] + y_1.transpose() * iw_2i_1[N]
P_2i_1 /= float(sample_size)
return P_2i_1
def symmetrize_1_3(depth_i,path_i,seq,U,m,n,sample_size,Prod_Proj):
"""
Project views 1 and 3
"""
# Count all combinations of (x^u_1, x^u_3, ..., x^r_1, x^r_3) where the tree path for u, Hu is (r=root, ..., u).
seq_1_3 = {}
for s in range(0,sample_size):
comb_1_3 = ()
for j in range(depth_i):
comb_1_3 += (seq[path_i[j]].get(s,0),seq[path_i[j]].get(s+2,0))
seq_1_3[s] = comb_1_3
(iw_1_3,unique_pos_1_3) = iw_seq(seq_1_3,sample_size)
# P^{Hu,Hu}_{1,3}_proj = U[Hu,1]^T * P^{Hu,Hu}_{1,3} * U[Hu,3]
P_1_3s = {}
for j in range(depth_i):
P_1_3s[j] = np.matrix(sp.zeros((m,m)))
for N in unique_pos_1_3.keys():
pos = unique_pos_1_3[N]
for j in range(depth_i):
y_1 = U[path_i[j],1][seq[path_i[j]].get(pos,0),:].transpose()
y_3 = U[path_i[j],3][seq[path_i[j]].get(pos+2,0),:].transpose()
P_1_3s[j] = P_1_3s[j] + y_1 * y_3.transpose() * iw_1_3[N]
for j in range(depth_i):
P_1_3s[j] /= float(sample_size)
P_1_3 = np.matrix([[1]])
for j in range(depth_i):
P_1_3 = np.kron(P_1_3,P_1_3s[j])
return P_1_3
def precomputeTensorSlices(seq,U,A_1,A_3,i,path_i,m,sample_size,iw_i,unique_pos_i):
"""
Precompution step to efficiently perform the computation at the next iteration.
M^u_3 = P^{Hu,u,Hu}_{1,2,3}(U[Hu,1]*S1^T,U[u,2],U[Hu,3]*S3^T)
A_1 = S1^T * W
A_3 = S3^T * W
i-th slice of M_3: [M_3]_i = A_1^T * U[Hu,1]^T * [P^{Hu,u,Hu}_{1,2,3}]_i * U[Hu,3] * A_3
= W^T * (U[Hu,1]*S1^T)^T * [P^{Hu,u,Hu}_{1,2,3}]_i * (U[Hu,3]*S3^T) * W
"""
M_3_i = {}
for N in unique_pos_i.keys():
pos = unique_pos_i[N]
s_2i = seq[i].get(pos+1,0)
M_3_i[s_2i] = M_3_i.get(s_2i,sp.zeros((m,m)))
y_1 = np.matrix([[1]])
y_3 = np.matrix([[1]])
for j in range(path_i.__len__()):
y_1 = kron(y_1,U[path_i[j],1][seq[path_i[j]].get(pos,0),:].transpose())
y_3 = kron(y_3,U[path_i[j],3][seq[path_i[j]].get(pos+2,0),:].transpose())
M_3_i[s_2i] = M_3_i[s_2i] + (A_1.transpose()*y_1) * (A_3.transpose()*y_3).transpose() * iw_i[N]
for s_2i in M_3_i.keys():
M_3_i[s_2i] /= float(sample_size)
return M_3_i
def simult_power_iter(seq,U,M_3_i,whiten,i,path_i,m,n,sample_size,iw_i,unique_pos_i,Prod_Proj,cutoff):
"""
Perform the simultaneous tensor power method to decompose the tensor.
"""
m2 = m
nIter = 20 #max number of iterations
[V,r1] = np.linalg.qr(np.random.randn(m,m))
V_n = sp.zeros((m,m))
Lambda = sp.zeros((m,1))
for t in range(nIter):
for j in range(0,m2):
# compute: V_n_j = M^u_3(W*V_j,W,W*V_j) / ||M^u_3(W*V_j,W,W*V_j)||
V_n[:,j] = single_power_iter(seq,U,M_3_i,whiten,i,path_i,m,n,V[:,j],sample_size,iw_i,unique_pos_i,Prod_Proj)
# compute: Lambda_j = M^u_3(W*V_j,W*V_j,W*V_j)
Lambda[j,0] = np.dot(V[:,j].T,V_n[:,j])
V_n[:,j] /= np.linalg.norm(V_n[:,j])
[V_n,r1] = np.linalg.qr(V_n) #orthogonalization step
similarity = min(abs(np.diagonal(np.dot(V_n[:,0:m2].T,V[:,0:m2]))))
print similarity
if (similarity > cutoff):
break
V = np.array(V_n)
return (Lambda,V)
def single_power_iter(seq,U,M_3_i,whiten,i,path_i,m,n,v,sample_size,iw_i,unique_pos_i,Prod_Proj):
"""
Perform the power iteration for a single vector using the precomputed tensor slices M_3_i
"""
# i-th slice of M_3: [M_3]_i = W^T * (U[Hu,1]*S1^T)^T * [P^{Hu,u,Hu}_{1,2,3}]_i * (U[Hu,3]*S3^T) * W
# M^u_3 = P^{Hu,u,Hu}_{1,2,3}(U[Hu,1]*S1^T,U[u,2],U[Hu,3]*S3^T)
# v_n = M^u_3(Wv,W,Wv) = sum_i{W^T*(U[u,2]_i)^T * (Wv)^T * (U[Hu,1]*S1^T)^T * [P^{Hu,u,Hu}_{1,2,3}]_i * (U[Hu,3]*S3^T) * Wv}
v = np.matrix(v).transpose()
v_n = sp.zeros((m,1))
I = identity(n).todense()
for s_2i in M_3_i.keys():
if (Prod_Proj):
y_2i = U[path_i[0],2][s_2i,:].transpose()
else:
y_2i = I[s_2i,:].transpose()
v_n += np.array((whiten.transpose()*y_2i) * (v.transpose() * M_3_i[s_2i] * v))
return v_n[:,0]
def recover_W(O_d,path,seq,m,sample_size):
"""
Recover initial state distributions
If u is root r: W^r = (O^r)^+ * P^r_1
else: W^u = (O^u)^+ * P^{u,pi(u)}_{1,1} * ((O^u)^+)^T
"""
D = seq.__len__()
W = {}
for i in range(D):
if (path[i].__len__()==1):
dims = (m,)
else:
dims = (m,m)
seq_i = {}
for s in range(0,sample_size):
comb = ()
comb += (seq[path[i][0]].get(s,0),)
if (path[i].__len__()>=2):
comb += (seq[path[i][1]].get(s,0),)
seq_i[s] = comb
[iw_i,unique_pos_i] = iw_seq(seq_i,sample_size)
unique_size_i = unique_pos_i.__len__()
P_1i = sp.zeros(dims)
for N in unique_pos_i.keys():
pos = unique_pos_i[N]
y_1_i = np.array(O_d[path[i][0]][:,seq[path[i][0]].get(pos,0)])[:,0]
if (path[i].__len__()>=2): # u is not root
y_2_i = np.array(O_d[path[i][1]][:,seq[path[i][1]].get(pos,0)])[:,0]
y_1_i = np.outer(y_1_i,y_2_i)
P_1i = P_1i + y_1_i * iw_i[N]
P_1i /= float(sample_size)
W[i] = {}
W[i][0] = P_1i
W[i][1] = sp.zeros(dims)
if (path[i].__len__()==1):
W[i][1] = abs(W[i][0])/sum(abs(W[i][0]))
else:
for k in range(m):
W[i][1][:,k] = abs(W[i][0][:,k])/sum(abs(W[i][0][:,k]))
print unique_size_i
return W
def recover_T(O_d,path,seq,m,sample_size):
"""
Recover Transition Matrices
If u is root: Q^r = (O^u)^+ * P^{r,r}_{2,1} * ((O^r)^+)^T
else: Q^u = P^{u,pi(u),u}_{2,2,1}(((O^r)^+)^T,((O^r)^+)^T,((O^r)^+)^T)
Normalize over the z^u_2 coordinate to get T^u.
"""
D = seq.__len__()
T = {}
for i in range(D):
T[i] = {}
if (path[i].__len__()==1): # u is root
P_2i_1i = sp.zeros((m,m))
seq_i = {}
for s in range(0,sample_size):
comb = (seq[path[i][0]].get(s,0),seq[path[i][0]].get(s+1,0))
seq_i[s] = comb
[iw_i,unique_pos_i] = iw_seq(seq_i,sample_size)
unique_size_i = unique_pos_i.__len__()
for N in unique_pos_i.keys():
pos = unique_pos_i[N]
y_1_i = np.array(O_d[path[i][0]][:,seq[path[i][0]].get(pos,0)])[:,0]
y_2_i = np.array(O_d[path[i][0]][:,seq[path[i][0]].get(pos+1,0)])[:,0]
P_2i_1i = P_2i_1i + np.outer(y_2_i,y_1_i) * iw_i[N]
P_2i_1i /= float(sample_size)
T[i][0] = abs(P_2i_1i)
T[i][1] = np.zeros((m,m))
for k in range(m):
T[i][1][:,k] = T[i][0][:,k]/sum(T[i][0][:,k])
else: # u is not root
P_2i_1i_2pi = sp.zeros((m,m,m))
seq_i = {}
for s in range(0,sample_size):
comb = (seq[path[i][0]].get(s,0),seq[path[i][0]].get(s+1,0),seq[path[i][1]].get(s+1,0))
seq_i[s] = comb
[iw_i,unique_pos_i] = iw_seq(seq_i,sample_size)
unique_size_i = unique_pos_i.__len__()
for N in unique_pos_i.keys():
pos = unique_pos_i[N]
y_1_i = np.array(O_d[path[i][0]][:,seq[path[i][0]].get(pos,0)])[:,0]
y_2_i = np.array(O_d[path[i][0]][:,seq[path[i][0]].get(pos+1,0)])[:,0]
y_2_pi = np.array(O_d[path[i][1]][:,seq[path[i][1]].get(pos+1,0)])[:,0]
P_2i_1i_2pi = P_2i_1i_2pi + np.reshape(np.kron(y_2_i,np.kron(y_1_i,y_2_pi)),(m,m,m)) * iw_i[N]
P_2i_1i_2pi /= float(sample_size)
T[i][0] = abs(P_2i_1i_2pi)
T[i][1] = np.zeros((m,m,m))
for j in range(m):
for k in range(m):
T[i][1][:,j,k] = T[i][0][:,j,k]/sum(T[i][0][:,j,k])
return T
def flatten(P):
"""
Flatten parameters for one sample by averaging out other samples.
"""
dims = sp.shape(P)
if (dims.__len__()==1): # vector
P_new = P
elif (dims.__len__()==2): # matrix
P_new = sp.zeros((dims[0],))
for i in range(dims[0]):
P_new[i] = sum(P[i,:])/float(dims[1])
elif (dims.__len__()==3): # tensor
P_new = sp.zeros((dims[1],dims[2]))
for i in range(dims[0]):
for j in range(dims[1]):
P_new[i,j] = sum(P[i,j,:])/float(dims[1])
return P_new
def writeModel(O,T,Pi,marks,transObs,nobs,model_file,TreeStructured):
""" Write model parameters for a single HMM. """
(M2,K) = np.shape(O)
with open(model_file,'w') as f:
f.write(str(K)+'\t'+str(nobs)+'\tU\t0\t0\n')
if TreeStructured:
for j in range(0,K):
for i in range(0,K):
f.write('probinit\t'+str(j+1)+'\t'+str(i+1)+'\t'+str(Pi[i,j])+'\n')
for k in range(0,K):
for j in range(0,K):
for i in range(0,K):
f.write('transitionprobs\t'+str(k+1)+'\t'+str(j+1)+'\t'+str(i+1)+'\t'+str(T[i,j,k])+'\n')
else:
for i in range(0,K):
f.write('probinit\t'+str(i+1)+'\t'+str(Pi[i])+'\n')
for j in range(0,K):
for i in range(0,K):
f.write('transitionprobs\t'+str(j+1)+'\t'+str(i+1)+'\t'+str(T[i,j])+'\n')
for j in range(0,K):
for i in sorted(transObs.keys()):
f.write('emissionprobs\t'+str(j+1)+'\t'+str(transObs[i])+'\t'+str(O[i,j])+'\n')
def writeEmissionMatrix(O,marks,transObs,filename):
""" Write emission matrix to the output file. """
(M2,K) = np.shape(O)
with open(filename,'w') as f:
PRINT = [str(K),str(len(transObs)),'(Emission order)']
for mark in marks:
PRINT.append(mark)
f.write('\t'.join(PRINT)+'\n')
for j in range(0,K):
for d in sorted(transObs.keys()):
f.write(str(j+1)+'\t'+str(transObs[d])+'\t'+str(O[d,j])+'\n')
def getEmission(O2,transObs,marks):
n = len(marks)
Emission = {}
for (i,j) in O2.iterkeys():
Emission[j+1] = Emission.get(j+1,{})
v = O2[i,j]
i2 = transObs[i] % (2**n)
for a in range(0,n):
mark = marks[n-1-a]
if (int(i2/2)*2!=i2): Emission[j+1][mark] = Emission[j+1].get(mark,0) + v
i2 /= 2
if (i2==0): break
return Emission
def printEmission(Emission,marks0):
"""
Print the emission matrix to stdout
"""
PRINT = ''
for mark in marks0:
PRINT += '\t'+mark
print PRINT
for i in sorted(Emission.keys()):
PRINT = str(i)
for mark in marks0:
PRINT += '\t'+str(round(Emission[i].get(mark,0),4))
print PRINT
# main function of Spectacle-Tree.py
if __name__=="__main__":
"""
[EXAMPLE] python Spectacle-Tree.py 6 Param_Spectacle_Tree.py Output imp_tree
"""
try:
m = int(sys.argv[1]) # Number of chromatin states
param_file = sys.argv[2]
outdir = sys.argv[3] # Directory must already exist
outfile = sys.argv[4]
except (ValueError, IndexError):
print 'usage: python Spectacle-Tree.py 6 Param_Spectacle_Tree.py Output imp_tree'
quit(0)
(seq,tree,Samples,marks,chrs,n,nsegment,lenChr,transObs) = readData(param_file,False)
sample_size = nsegment-2
D = seq.__len__() # Number of samples
O = {}
O_d = {}
Prod_Proj = True # Boolean: use product projections technique or not
cutoff = 1.-1e-6 # Cutoff for simultaneous tensor power method
# Step 1: Compute all the range matrices U's via SVD on the cooccurence matrices
print 'Computing the range matrices'
U = get_range(seq,m,n,sample_size)
# Step 2: For each tree node, compute the unique path to the root in the tree
print 'Computing the path to the root for each tree node'
depth = {}
path = {}
for i in range(0,D):
path[i] = []
temp = Samples[i]
while (temp!='root'):
path[i].append(Samples.index(temp))
depth[i] = depth.get(i,0) + 1
temp = tree[temp]
for i in range(0,D): #iterate over all tree nodes
# Step 3: Symmetrization of the matrices
print 'Performing symmetrization for node %d' % i
print "Sample: " + tree.keys()[i]
[A_1,A_3,whiten,iw_i,unique_pos_i] = symmetrize(depth[i],path[i],seq,U,m,n,sample_size,Prod_Proj)
# Step 4: Run simultaneous tensor power method
print 'Performing tensor decomposition'
# T^u = M^u_3(W^u,W^u,W^u)
# M^u_3 = P^{Hu,u,Hu}_{1,2,3}(U[Hu,1]*S1^T,U[u,2],U[Hu,3]*S3^T)
# i-th slice of M_3: [M_3]_i = W^T * (U[Hu,1]*S1^T)^T * [P^{Hu,u,Hu}_{1,2,3}]_i * (U[Hu,3]*S3^T) * W
M_3_i = precomputeTensorSlices(seq,U,A_1,A_3,i,path[i],m,sample_size,iw_i,unique_pos_i)
[Lambda,V] = simult_power_iter(seq,U,M_3_i,whiten,i,path[i],m,n,sample_size,iw_i,unique_pos_i,Prod_Proj,cutoff)
# Step 5: Recover the observation matrix of the HMM
LambdaD = identity(m).todense()
for k in range(m):
LambdaD[k,k] = Lambda[k,0]
# Recover Emission matrix: O^u = U[u,2]*(W^T)^{-1} * V * Diag(Lambda)
if (Prod_Proj):
O_i = U[i,2] * (np.linalg.inv(whiten.transpose()) * np.matrix(V) * np.matrix(LambdaD))
else:
O_i = (np.linalg.pinv(whiten.transpose()) * np.matrix(V) * np.matrix(LambdaD))
B_i = np.matrix(sp.zeros((n,m)))
for k in range(m):
B_i[:,k] = O_i[:,k] / sum(O_i[:,k])
O[i] = {}
O[i][0] = B_i
O[i][1] = np.matrix(sp.zeros((n,m)))
for k in range(m):
O[i][1][:,k] = abs(O_i[:,k]) / sum(abs(O_i[:,k]))
for k in range(m):
O[i][1][:,k] /= sum(O[i][1][:,k])
Emission = getEmission(dok_matrix(O[i][1]),transObs,marks)
printEmission(Emission,marks)
# Step 6: Recover the initial state distribution and transition probabilities of the HMM
print 'Recovering initial state distribution and transition probabilities of the HMM'
for i in range(D):
O_d[i] = np.linalg.pinv(O[i][0])
W = recover_W(O_d,path,seq,m,sample_size)
T = recover_T(O_d,path,seq,m,sample_size)
# Step 7: Output the HMM parameters to the output file
for i in range(D):
Sample = tree.keys()[i]
if len(path[i])==1:
writeModel(O[i][1],T[i][1],W[i][1],marks,transObs,len(marks),outdir+'/model_comb_'+str(m)+'_'+Sample+'_'+outfile+'.txt',False)
writeEmissionMatrix(O[i][1],marks,transObs,outdir+'/emissions_comb_'+str(m)+'_'+Sample+'_'+outfile+'.txt')
else:
W2 = flatten(W[i][1])
T2 = flatten(T[i][1])
writeModel(O[i][1],T2,W2,marks,transObs,len(marks),outdir+'/model_comb_'+str(m)+'_'+Sample+'_'+outfile+'_flat.txt',False)
|
|
"""The tests for the DirecTV Media player platform."""
from unittest.mock import call, patch
from datetime import datetime, timedelta
import requests
import pytest
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
MEDIA_TYPE_TVSHOW,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_CHANNEL,
ATTR_INPUT_SOURCE,
ATTR_MEDIA_POSITION_UPDATED_AT,
DOMAIN,
SERVICE_PLAY_MEDIA,
SUPPORT_PAUSE,
SUPPORT_TURN_ON,
SUPPORT_TURN_OFF,
SUPPORT_PLAY_MEDIA,
SUPPORT_STOP,
SUPPORT_NEXT_TRACK,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_PLAY,
)
from homeassistant.components.directv.media_player import (
ATTR_MEDIA_CURRENTLY_RECORDING,
ATTR_MEDIA_RATING,
ATTR_MEDIA_RECORDED,
ATTR_MEDIA_START_TIME,
DEFAULT_DEVICE,
DEFAULT_PORT,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE,
CONF_HOST,
CONF_NAME,
CONF_PORT,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import MockDependency, async_fire_time_changed
CLIENT_ENTITY_ID = "media_player.client_dvr"
MAIN_ENTITY_ID = "media_player.main_dvr"
IP_ADDRESS = "127.0.0.1"
DISCOVERY_INFO = {"host": IP_ADDRESS, "serial": 1234}
LIVE = {
"callsign": "HASSTV",
"date": "20181110",
"duration": 3600,
"isOffAir": False,
"isPclocked": 1,
"isPpv": False,
"isRecording": False,
"isVod": False,
"major": 202,
"minor": 65535,
"offset": 1,
"programId": "102454523",
"rating": "No Rating",
"startTime": 1541876400,
"stationId": 3900947,
"title": "Using Home Assistant to automate your home",
}
LOCATIONS = [{"locationName": "Main DVR", "clientAddr": DEFAULT_DEVICE}]
RECORDING = {
"callsign": "HASSTV",
"date": "20181110",
"duration": 3600,
"isOffAir": False,
"isPclocked": 1,
"isPpv": False,
"isRecording": True,
"isVod": False,
"major": 202,
"minor": 65535,
"offset": 1,
"programId": "102454523",
"rating": "No Rating",
"startTime": 1541876400,
"stationId": 3900947,
"title": "Using Home Assistant to automate your home",
"uniqueId": "12345",
"episodeTitle": "Configure DirecTV platform.",
}
WORKING_CONFIG = {
"media_player": {
"platform": "directv",
CONF_HOST: IP_ADDRESS,
CONF_NAME: "Main DVR",
CONF_PORT: DEFAULT_PORT,
CONF_DEVICE: DEFAULT_DEVICE,
}
}
@pytest.fixture
def client_dtv():
"""Fixture for a client device."""
mocked_dtv = MockDirectvClass("mock_ip")
mocked_dtv.attributes = RECORDING
mocked_dtv._standby = False
return mocked_dtv
@pytest.fixture
def main_dtv():
"""Fixture for main DVR."""
return MockDirectvClass("mock_ip")
@pytest.fixture
def dtv_side_effect(client_dtv, main_dtv):
"""Fixture to create DIRECTV instance for main and client."""
def mock_dtv(ip, port, client_addr):
if client_addr != "0":
mocked_dtv = client_dtv
else:
mocked_dtv = main_dtv
mocked_dtv._host = ip
mocked_dtv._port = port
mocked_dtv._device = client_addr
return mocked_dtv
return mock_dtv
@pytest.fixture
def mock_now():
"""Fixture for dtutil.now."""
return dt_util.utcnow()
@pytest.fixture
def platforms(hass, dtv_side_effect, mock_now):
"""Fixture for setting up test platforms."""
config = {
"media_player": [
{
"platform": "directv",
"name": "Main DVR",
"host": IP_ADDRESS,
"port": DEFAULT_PORT,
"device": DEFAULT_DEVICE,
},
{
"platform": "directv",
"name": "Client DVR",
"host": IP_ADDRESS,
"port": DEFAULT_PORT,
"device": "1",
},
]
}
with MockDependency("DirectPy"), patch(
"DirectPy.DIRECTV", side_effect=dtv_side_effect
), patch("homeassistant.util.dt.utcnow", return_value=mock_now):
hass.loop.run_until_complete(async_setup_component(hass, DOMAIN, config))
hass.loop.run_until_complete(hass.async_block_till_done())
yield
async def async_turn_on(hass, entity_id=None):
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data)
async def async_turn_off(hass, entity_id=None):
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data)
async def async_media_pause(hass, entity_id=None):
"""Send the media player the command for pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_PAUSE, data)
async def async_media_play(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_PLAY, data)
async def async_media_stop(hass, entity_id=None):
"""Send the media player the command for stop."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_STOP, data)
async def async_media_next_track(hass, entity_id=None):
"""Send the media player the command for next track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
async def async_media_previous_track(hass, entity_id=None):
"""Send the media player the command for prev track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)
async def async_play_media(hass, media_type, media_id, entity_id=None, enqueue=None):
"""Send the media player the command for playing media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type, ATTR_MEDIA_CONTENT_ID: media_id}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
if enqueue:
data[ATTR_MEDIA_ENQUEUE] = enqueue
await hass.services.async_call(DOMAIN, SERVICE_PLAY_MEDIA, data)
class MockDirectvClass:
"""A fake DirecTV DVR device."""
def __init__(self, ip, port=8080, clientAddr="0"):
"""Initialize the fake DirecTV device."""
self._host = ip
self._port = port
self._device = clientAddr
self._standby = True
self._play = False
self._locations = LOCATIONS
self.attributes = LIVE
def get_locations(self):
"""Mock for get_locations method."""
test_locations = {
"locations": self._locations,
"status": {
"code": 200,
"commandResult": 0,
"msg": "OK.",
"query": "/info/getLocations",
},
}
return test_locations
def get_standby(self):
"""Mock for get_standby method."""
return self._standby
def get_tuned(self):
"""Mock for get_tuned method."""
if self._play:
self.attributes["offset"] = self.attributes["offset"] + 1
test_attributes = self.attributes
test_attributes["status"] = {
"code": 200,
"commandResult": 0,
"msg": "OK.",
"query": "/tv/getTuned",
}
return test_attributes
def key_press(self, keypress):
"""Mock for key_press method."""
if keypress == "poweron":
self._standby = False
self._play = True
elif keypress == "poweroff":
self._standby = True
self._play = False
elif keypress == "play":
self._play = True
elif keypress == "pause" or keypress == "stop":
self._play = False
def tune_channel(self, source):
"""Mock for tune_channel method."""
self.attributes["major"] = int(source)
async def test_setup_platform_config(hass):
"""Test setting up the platform from configuration."""
with MockDependency("DirectPy"), patch("DirectPy.DIRECTV", new=MockDirectvClass):
await async_setup_component(hass, DOMAIN, WORKING_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state
assert len(hass.states.async_entity_ids("media_player")) == 1
async def test_setup_platform_discover(hass):
"""Test setting up the platform from discovery."""
with MockDependency("DirectPy"), patch("DirectPy.DIRECTV", new=MockDirectvClass):
hass.async_create_task(
async_load_platform(
hass, DOMAIN, "directv", DISCOVERY_INFO, {"media_player": {}}
)
)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state
assert len(hass.states.async_entity_ids("media_player")) == 1
async def test_setup_platform_discover_duplicate(hass):
"""Test setting up the platform from discovery."""
with MockDependency("DirectPy"), patch("DirectPy.DIRECTV", new=MockDirectvClass):
await async_setup_component(hass, DOMAIN, WORKING_CONFIG)
await hass.async_block_till_done()
hass.async_create_task(
async_load_platform(
hass, DOMAIN, "directv", DISCOVERY_INFO, {"media_player": {}}
)
)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state
assert len(hass.states.async_entity_ids("media_player")) == 1
async def test_setup_platform_discover_client(hass):
"""Test setting up the platform from discovery."""
LOCATIONS.append({"locationName": "Client 1", "clientAddr": "1"})
LOCATIONS.append({"locationName": "Client 2", "clientAddr": "2"})
with MockDependency("DirectPy"), patch("DirectPy.DIRECTV", new=MockDirectvClass):
await async_setup_component(hass, DOMAIN, WORKING_CONFIG)
await hass.async_block_till_done()
hass.async_create_task(
async_load_platform(
hass, DOMAIN, "directv", DISCOVERY_INFO, {"media_player": {}}
)
)
await hass.async_block_till_done()
del LOCATIONS[-1]
del LOCATIONS[-1]
state = hass.states.get(MAIN_ENTITY_ID)
assert state
state = hass.states.get("media_player.client_1")
assert state
state = hass.states.get("media_player.client_2")
assert state
assert len(hass.states.async_entity_ids("media_player")) == 3
async def test_supported_features(hass, platforms):
"""Test supported features."""
# Features supported for main DVR
state = hass.states.get(MAIN_ENTITY_ID)
assert (
SUPPORT_PAUSE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
== state.attributes.get("supported_features")
)
# Feature supported for clients.
state = hass.states.get(CLIENT_ENTITY_ID)
assert (
SUPPORT_PAUSE
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
== state.attributes.get("supported_features")
)
async def test_check_attributes(hass, platforms, mock_now):
"""Test attributes."""
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# Start playing TV
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
await async_media_play(hass, CLIENT_ENTITY_ID)
await hass.async_block_till_done()
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == RECORDING["programId"]
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_TVSHOW
assert state.attributes.get(ATTR_MEDIA_DURATION) == RECORDING["duration"]
assert state.attributes.get(ATTR_MEDIA_POSITION) == 2
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) == next_update
assert state.attributes.get(ATTR_MEDIA_TITLE) == RECORDING["title"]
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) == RECORDING["episodeTitle"]
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "{} ({})".format(
RECORDING["callsign"], RECORDING["major"]
)
assert state.attributes.get(ATTR_INPUT_SOURCE) == RECORDING["major"]
assert (
state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING) == RECORDING["isRecording"]
)
assert state.attributes.get(ATTR_MEDIA_RATING) == RECORDING["rating"]
assert state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == datetime(
2018, 11, 10, 19, 0, tzinfo=dt_util.UTC
)
# Test to make sure that ATTR_MEDIA_POSITION_UPDATED_AT is not
# updated if TV is paused.
with patch(
"homeassistant.util.dt.utcnow", return_value=next_update + timedelta(minutes=5)
):
await async_media_pause(hass, CLIENT_ENTITY_ID)
await hass.async_block_till_done()
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PAUSED
assert state.attributes.get(ATTR_MEDIA_POSITION_UPDATED_AT) == next_update
async def test_main_services(hass, platforms, main_dtv, mock_now):
"""Test the different services."""
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# DVR starts in off state.
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_OFF
# All these should call key_press in our class.
with patch.object(
main_dtv, "key_press", wraps=main_dtv.key_press
) as mock_key_press, patch.object(
main_dtv, "tune_channel", wraps=main_dtv.tune_channel
) as mock_tune_channel, patch.object(
main_dtv, "get_tuned", wraps=main_dtv.get_tuned
) as mock_get_tuned, patch.object(
main_dtv, "get_standby", wraps=main_dtv.get_standby
) as mock_get_standby:
# Turn main DVR on. When turning on DVR is playing.
await async_turn_on(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call("poweron")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PLAYING
# Pause live TV.
await async_media_pause(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call("pause")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PAUSED
# Start play again for live TV.
await async_media_play(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call("play")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PLAYING
# Change channel, currently it should be 202
assert state.attributes.get("source") == 202
await async_play_media(hass, "channel", 7, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_tune_channel.called
assert mock_tune_channel.call_args == call("7")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.attributes.get("source") == 7
# Stop live TV.
await async_media_stop(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call("stop")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PAUSED
# Turn main DVR off.
await async_turn_off(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call("poweroff")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_OFF
# There should have been 6 calls to check if DVR is in standby
assert main_dtv.get_standby.call_count == 6
assert mock_get_standby.call_count == 6
# There should be 5 calls to get current info (only 1 time it will
# not be called as DVR is in standby.)
assert main_dtv.get_tuned.call_count == 5
assert mock_get_tuned.call_count == 5
async def test_available(hass, platforms, main_dtv, mock_now):
"""Test available status."""
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# Confirm service is currently set to available.
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state != STATE_UNAVAILABLE
# Make update fail 1st time
next_update = next_update + timedelta(minutes=5)
with patch.object(
main_dtv, "get_standby", side_effect=requests.RequestException
), patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state != STATE_UNAVAILABLE
# Make update fail 2nd time within 1 minute
next_update = next_update + timedelta(seconds=30)
with patch.object(
main_dtv, "get_standby", side_effect=requests.RequestException
), patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state != STATE_UNAVAILABLE
# Make update fail 3rd time more then a minute after 1st failure
next_update = next_update + timedelta(minutes=1)
with patch.object(
main_dtv, "get_standby", side_effect=requests.RequestException
), patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_UNAVAILABLE
# Recheck state, update should work again.
next_update = next_update + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state != STATE_UNAVAILABLE
|
|
import functools
import unittest
import itertools
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def _skip_if(cond, reason):
"""Skip test if cond(self) is True"""
def decorator(impl):
@functools.wraps(impl)
def wrapper(self, *args, **kwargs):
if cond(self):
raise unittest.SkipTest(reason)
else:
impl(self, *args, **kwargs)
return wrapper
return decorator
def _is_good_param(param):
# Check if 'nonzero' param is valid and meaningful. On the latter point,
# x should contain at least a zero if 'nonzeros' param is given.
return param['nonzeros'] is None \
or param['nonzeros'] < numpy.prod(param['shape'])
@testing.parameterize(*filter(_is_good_param, testing.product([
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[
{'shape': (4, 15), 'axis': 1},
{'shape': (4,), 'axis': 0},
{'shape': (4, 3, 2, 5), 'axis': 0},
{'shape': (4, 3, 2, 5), 'axis': 1},
{'shape': (4, 3, 2, 5), 'axis': 2},
{'shape': (4, 3, 2, 5), 'axis': 3},
{'shape': (4, 3, 2), 'axis': (0, 1)},
{'shape': (4, 3, 2, 4, 3, 2, 2), 'axis': (1, 4, 3, 6)},
{'shape': (0, 2), 'axis': 1},
{'shape': (), 'axis': ()},
],
[
# nonzeros (optional int): number of nonzero elems in input
# truezero (bool): flag whether zero elems are exactly zero. If false,
# randomly-chosen small values are used.
{'eps': 1e-5, 'nonzeros': None},
{'eps': 1e-1, 'nonzeros': None},
{'eps': 1e-1, 'nonzeros': 0, 'truezero': True},
{'eps': 1e-1, 'nonzeros': 0, 'truezero': False},
{'eps': 1e-1, 'nonzeros': 2, 'truezero': True},
{'eps': 1e-1, 'nonzeros': 2, 'truezero': False},
],
])))
class TestL2Normalization(unittest.TestCase):
def setUp(self):
min_abs = 0.1
if self.dtype == numpy.float16:
tuple_axis = self.axis
if not isinstance(tuple_axis, tuple):
tuple_axis = (tuple_axis,)
aggr_size = numpy.prod(
[self.shape[i] for i in tuple_axis], dtype=int)
min_abs = max(min_abs, 0.5 / aggr_size)
self.x = chainer.utils.force_array(
numpy.random.uniform(min_abs, 1, self.shape)
* (1 - 2 * numpy.random.randint(2, size=self.shape)),
self.dtype)
if self.nonzeros is not None:
# Make self.x have limited number of large values
# get mask of indices to modify at
zeros = self.x.size - self.nonzeros
while True:
rand = numpy.random.uniform(0, 1, self.shape)
mask = rand <= numpy.sort(rand.ravel())[zeros - 1]
if self.x[mask].shape == (zeros,):
break
# set zeros or small values to a part of the input
if self.truezero:
self.x[mask] = 0
else:
zero_scale = 10. ** numpy.random.randint(-40, -3)
self.x[mask] = numpy.random.uniform(
-zero_scale, zero_scale, zeros)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, self.shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
else:
self.check_forward_options = {}
if self.nonzeros is None:
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-3, 'rtol': 5e-3}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
else:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2,
'eps': 1e-4}
def check_forward(self, x_data, axis):
eps = self.eps
x = chainer.Variable(x_data)
y = functions.normalize(x, eps=eps, axis=axis)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
y_expect = numpy.empty_like(self.x)
shape = self.x.shape
indices = []
axis_tuple = axis if isinstance(axis, tuple) else (axis,)
for i in six.moves.range(len(shape)):
if i not in axis_tuple:
indices.append(six.moves.range(shape[i]))
else:
indices.append([slice(None)])
indices_tuple = list(itertools.product(*indices))
for index in indices_tuple:
# Note: Casting back the result of `numpy.linalg.norm` to `x.dtype`
# because old NumPy casts it to float32 when a float16 value is
# given.
numerator = numpy.linalg.norm(self.x[index]).astype(x.dtype) + eps
y_expect[index] = self.x[index] / numerator
testing.assert_allclose(y_expect, y_data, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.axis)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), self.axis)
def check_backward(self, x_data, axis, y_grad):
def f(x):
return functions.normalize(x, eps=self.eps, axis=axis)
gradient_check.check_backward(
f, x_data, y_grad, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.axis, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), self.axis, cuda.to_gpu(self.gy))
@_skip_if(
lambda self: self.nonzeros is not None,
'backward of L2Normalize is non-differentiable at zero vector')
def check_double_backward(self, x_data, axis, y_grad, x_grad_grad):
def f(x):
return functions.normalize(x, eps=self.eps, axis=axis)
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.axis, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), self.axis, cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
def check_eps(self, x_data):
x = chainer.Variable(x_data)
y = functions.normalize(x, axis=self.axis)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
y_expect = numpy.zeros_like(self.x)
testing.assert_allclose(y_expect, y_data)
def test_eps_cpu(self):
self.check_eps(numpy.zeros_like(self.x))
@attr.gpu
def test_eps_gpu(self):
self.check_eps(cuda.to_gpu(numpy.zeros_like(self.x)))
testing.run_module(__name__, __file__)
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
from __future__ import division, absolute_import, print_function
from contextlib import closing
import os
import re
from tempfile import NamedTemporaryFile
import requests
from beets import plugins
from beets import importer
from beets import ui
from beets import util
from beets import config
from beets.mediafile import image_mime_type
from beets.util.artresizer import ArtResizer
from beets.util import confit
from beets.util import syspath, bytestring_path, py3_path
import six
try:
import itunes
HAVE_ITUNES = True
except ImportError:
HAVE_ITUNES = False
CONTENT_TYPES = {
'image/jpeg': [b'jpg', b'jpeg'],
'image/png': [b'png']
}
IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts]
class Candidate(object):
"""Holds information about a matching artwork, deals with validation of
dimension restrictions and resizing.
"""
CANDIDATE_BAD = 0
CANDIDATE_EXACT = 1
CANDIDATE_DOWNSCALE = 2
MATCH_EXACT = 0
MATCH_FALLBACK = 1
def __init__(self, log, path=None, url=None, source=u'',
match=None, size=None):
self._log = log
self.path = path
self.url = url
self.source = source
self.check = None
self.match = match
self.size = size
def _validate(self, extra):
"""Determine whether the candidate artwork is valid based on
its dimensions (width and ratio).
Return `CANDIDATE_BAD` if the file is unusable.
Return `CANDIDATE_EXACT` if the file is usable as-is.
Return `CANDIDATE_DOWNSCALE` if the file must be resized.
"""
if not self.path:
return self.CANDIDATE_BAD
if not (extra['enforce_ratio'] or
extra['minwidth'] or
extra['maxwidth']):
return self.CANDIDATE_EXACT
# get_size returns None if no local imaging backend is available
if not self.size:
self.size = ArtResizer.shared.get_size(self.path)
self._log.debug(u'image size: {}', self.size)
if not self.size:
self._log.warning(u'Could not get size of image (please see '
u'documentation for dependencies). '
u'The configuration options `minwidth` and '
u'`enforce_ratio` may be violated.')
return self.CANDIDATE_EXACT
short_edge = min(self.size)
long_edge = max(self.size)
# Check minimum size.
if extra['minwidth'] and self.size[0] < extra['minwidth']:
self._log.debug(u'image too small ({} < {})',
self.size[0], extra['minwidth'])
return self.CANDIDATE_BAD
# Check aspect ratio.
edge_diff = long_edge - short_edge
if extra['enforce_ratio']:
if extra['margin_px']:
if edge_diff > extra['margin_px']:
self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})',
long_edge, short_edge, extra['margin_px'])
return self.CANDIDATE_BAD
elif extra['margin_percent']:
margin_px = extra['margin_percent'] * long_edge
if edge_diff > margin_px:
self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})',
long_edge, short_edge, margin_px)
return self.CANDIDATE_BAD
elif edge_diff:
# also reached for margin_px == 0 and margin_percent == 0.0
self._log.debug(u'image is not square ({} != {})',
self.size[0], self.size[1])
return self.CANDIDATE_BAD
# Check maximum size.
if extra['maxwidth'] and self.size[0] > extra['maxwidth']:
self._log.debug(u'image needs resizing ({} > {})',
self.size[0], extra['maxwidth'])
return self.CANDIDATE_DOWNSCALE
return self.CANDIDATE_EXACT
def validate(self, extra):
self.check = self._validate(extra)
return self.check
def resize(self, extra):
if extra['maxwidth'] and self.check == self.CANDIDATE_DOWNSCALE:
self.path = ArtResizer.shared.resize(extra['maxwidth'], self.path)
def _logged_get(log, *args, **kwargs):
"""Like `requests.get`, but logs the effective URL to the specified
`log` at the `DEBUG` level.
Use the optional `message` parameter to specify what to log before
the URL. By default, the string is "getting URL".
Also sets the User-Agent header to indicate beets.
"""
# Use some arguments with the `send` call but most with the
# `Request` construction. This is a cheap, magic-filled way to
# emulate `requests.get` or, more pertinently,
# `requests.Session.request`.
req_kwargs = kwargs
send_kwargs = {}
for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'):
if arg in kwargs:
send_kwargs[arg] = req_kwargs.pop(arg)
# Our special logging message parameter.
if 'message' in kwargs:
message = kwargs.pop('message')
else:
message = 'getting URL'
req = requests.Request('GET', *args, **req_kwargs)
with requests.Session() as s:
s.headers = {'User-Agent': 'beets'}
prepped = s.prepare_request(req)
log.debug('{}: {}', message, prepped.url)
return s.send(prepped, **send_kwargs)
class RequestMixin(object):
"""Adds a Requests wrapper to the class that uses the logger, which
must be named `self._log`.
"""
def request(self, *args, **kwargs):
"""Like `requests.get`, but uses the logger `self._log`.
See also `_logged_get`.
"""
return _logged_get(self._log, *args, **kwargs)
# ART SOURCES ################################################################
class ArtSource(RequestMixin):
def __init__(self, log, config):
self._log = log
self._config = config
def get(self, album, extra):
raise NotImplementedError()
def _candidate(self, **kwargs):
return Candidate(source=self, log=self._log, **kwargs)
def fetch_image(self, candidate, extra):
raise NotImplementedError()
class LocalArtSource(ArtSource):
IS_LOCAL = True
LOC_STR = u'local'
def fetch_image(self, candidate, extra):
pass
class RemoteArtSource(ArtSource):
IS_LOCAL = False
LOC_STR = u'remote'
def fetch_image(self, candidate, extra):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
if extra['maxwidth']:
candidate.url = ArtResizer.shared.proxy_url(extra['maxwidth'],
candidate.url)
try:
with closing(self.request(candidate.url, stream=True,
message=u'downloading image')) as resp:
ct = resp.headers.get('Content-Type', None)
# Download the image to a temporary file. As some servers
# (notably fanart.tv) have proven to return wrong Content-Types
# when images were uploaded with a bad file extension, do not
# rely on it. Instead validate the type using the file magic
# and only then determine the extension.
data = resp.iter_content(chunk_size=1024)
header = b''
for chunk in data:
header += chunk
if len(header) >= 32:
# The imghdr module will only read 32 bytes, and our
# own additions in mediafile even less.
break
else:
# server didn't return enough data, i.e. corrupt image
return
real_ct = image_mime_type(header)
if real_ct is None:
# detection by file magic failed, fall back to the
# server-supplied Content-Type
# Is our type detection failsafe enough to drop this?
real_ct = ct
if real_ct not in CONTENT_TYPES:
self._log.debug(u'not a supported image: {}',
real_ct or u'unknown content type')
return
ext = b'.' + CONTENT_TYPES[real_ct][0]
if real_ct != ct:
self._log.warning(u'Server specified {}, but returned a '
u'{} image. Correcting the extension '
u'to {}',
ct, real_ct, ext)
suffix = py3_path(ext)
with NamedTemporaryFile(suffix=suffix, delete=False) as fh:
# write the first already loaded part of the image
fh.write(header)
# download the remaining part of the image
for chunk in data:
fh.write(chunk)
self._log.debug(u'downloaded art to: {0}',
util.displayable_path(fh.name))
candidate.path = util.bytestring_path(fh.name)
return
except (IOError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556
self._log.debug(u'error fetching art: {}', exc)
return
class CoverArtArchive(RemoteArtSource):
NAME = u"Cover Art Archive"
URL = 'http://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front'
def get(self, album, extra):
"""Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID.
"""
if album.mb_albumid:
yield self._candidate(url=self.URL.format(mbid=album.mb_albumid),
match=Candidate.MATCH_EXACT)
if album.mb_releasegroupid:
yield self._candidate(
url=self.GROUP_URL.format(mbid=album.mb_releasegroupid),
match=Candidate.MATCH_FALLBACK)
class Amazon(RemoteArtSource):
NAME = u"Amazon"
URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
INDICES = (1, 2)
def get(self, album, extra):
"""Generate URLs using Amazon ID (ASIN) string.
"""
if album.asin:
for index in self.INDICES:
yield self._candidate(url=self.URL % (album.asin, index),
match=Candidate.MATCH_EXACT)
class AlbumArtOrg(RemoteArtSource):
NAME = u"AlbumArt.org scraper"
URL = 'http://www.albumart.org/index_detail.php'
PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def get(self, album, extra):
"""Return art URL from AlbumArt.org using album ASIN.
"""
if not album.asin:
return
# Get the page from albumart.org.
try:
resp = self.request(self.URL, params={'asin': album.asin})
self._log.debug(u'scraped art URL: {0}', resp.url)
except requests.RequestException:
self._log.debug(u'error scraping art page')
return
# Search the page for the image URL.
m = re.search(self.PAT, resp.text)
if m:
image_url = m.group(1)
yield self._candidate(url=image_url, match=Candidate.MATCH_EXACT)
else:
self._log.debug(u'no image found on page')
class GoogleImages(RemoteArtSource):
NAME = u"Google Images"
URL = u'https://www.googleapis.com/customsearch/v1'
def __init__(self, *args, **kwargs):
super(GoogleImages, self).__init__(*args, **kwargs)
self.key = self._config['google_key'].get(),
self.cx = self._config['google_engine'].get(),
def get(self, album, extra):
"""Return art URL from google custom search engine
given an album title and interpreter.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ',' + album.album).encode('utf-8')
response = self.request(self.URL, params={
'key': self.key,
'cx': self.cx,
'q': search_string,
'searchType': 'image'
})
# Get results using JSON.
try:
data = response.json()
except ValueError:
self._log.debug(u'google: error loading response: {}'
.format(response.text))
return
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug(u'google fetchart error: {0}', reason)
return
if 'items' in data.keys():
for item in data['items']:
yield self._candidate(url=item['link'],
match=Candidate.MATCH_EXACT)
class FanartTV(RemoteArtSource):
"""Art from fanart.tv requested using their API"""
NAME = u"fanart.tv"
API_URL = 'http://webservice.fanart.tv/v3/'
API_ALBUMS = API_URL + 'music/albums/'
PROJECT_KEY = '61a7d0ab4e67162b7a0c7c35915cd48e'
def __init__(self, *args, **kwargs):
super(FanartTV, self).__init__(*args, **kwargs)
self.client_key = self._config['fanarttv_key'].get()
def get(self, album, extra):
if not album.mb_releasegroupid:
return
response = self.request(
self.API_ALBUMS + album.mb_releasegroupid,
headers={'api-key': self.PROJECT_KEY,
'client-key': self.client_key})
try:
data = response.json()
except ValueError:
self._log.debug(u'fanart.tv: error loading response: {}',
response.text)
return
if u'status' in data and data[u'status'] == u'error':
if u'not found' in data[u'error message'].lower():
self._log.debug(u'fanart.tv: no image found')
elif u'api key' in data[u'error message'].lower():
self._log.warning(u'fanart.tv: Invalid API key given, please '
u'enter a valid one in your config file.')
else:
self._log.debug(u'fanart.tv: error on request: {}',
data[u'error message'])
return
matches = []
# can there be more than one releasegroupid per response?
for mbid, art in data.get(u'albums', dict()).items():
# there might be more art referenced, e.g. cdart, and an albumcover
# might not be present, even if the request was succesful
if album.mb_releasegroupid == mbid and u'albumcover' in art:
matches.extend(art[u'albumcover'])
# can this actually occur?
else:
self._log.debug(u'fanart.tv: unexpected mb_releasegroupid in '
u'response!')
matches.sort(key=lambda x: x[u'likes'], reverse=True)
for item in matches:
# fanart.tv has a strict size requirement for album art to be
# uploaded
yield self._candidate(url=item[u'url'],
match=Candidate.MATCH_EXACT,
size=(1000, 1000))
class ITunesStore(RemoteArtSource):
NAME = u"iTunes Store"
def get(self, album, extra):
"""Return art URL from iTunes Store given an album title.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ' ' + album.album).encode('utf-8')
try:
# Isolate bugs in the iTunes library while searching.
try:
results = itunes.search_album(search_string)
except Exception as exc:
self._log.debug(u'iTunes search failed: {0}', exc)
return
# Get the first match.
if results:
itunes_album = results[0]
else:
self._log.debug(u'iTunes search for {:r} got no results',
search_string)
return
if itunes_album.get_artwork()['100']:
small_url = itunes_album.get_artwork()['100']
big_url = small_url.replace('100x100', '1200x1200')
yield self._candidate(url=big_url, match=Candidate.MATCH_EXACT)
else:
self._log.debug(u'album has no artwork in iTunes Store')
except IndexError:
self._log.debug(u'album not found in iTunes Store')
class Wikipedia(RemoteArtSource):
NAME = u"Wikipedia (queried through DBpedia)"
DBPEDIA_URL = 'http://dbpedia.org/sparql'
WIKIPEDIA_URL = 'http://en.wikipedia.org/w/api.php'
SPARQL_QUERY = u'''PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dbpprop: <http://dbpedia.org/property/>
PREFIX owl: <http://dbpedia.org/ontology/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT DISTINCT ?pageId ?coverFilename WHERE {{
?subject owl:wikiPageID ?pageId .
?subject dbpprop:name ?name .
?subject rdfs:label ?label .
{{ ?subject dbpprop:artist ?artist }}
UNION
{{ ?subject owl:artist ?artist }}
{{ ?artist foaf:name "{artist}"@en }}
UNION
{{ ?artist dbpprop:name "{artist}"@en }}
?subject rdf:type <http://dbpedia.org/ontology/Album> .
?subject dbpprop:cover ?coverFilename .
FILTER ( regex(?name, "{album}", "i") )
}}
Limit 1'''
def get(self, album, extra):
if not (album.albumartist and album.album):
return
# Find the name of the cover art filename on DBpedia
cover_filename, page_id = None, None
dbpedia_response = self.request(
self.DBPEDIA_URL,
params={
'format': 'application/sparql-results+json',
'timeout': 2500,
'query': self.SPARQL_QUERY.format(
artist=album.albumartist.title(), album=album.album)
},
headers={'content-type': 'application/json'},
)
try:
data = dbpedia_response.json()
results = data['results']['bindings']
if results:
cover_filename = 'File:' + results[0]['coverFilename']['value']
page_id = results[0]['pageId']['value']
else:
self._log.debug(u'wikipedia: album not found on dbpedia')
except (ValueError, KeyError, IndexError):
self._log.debug(u'wikipedia: error scraping dbpedia response: {}',
dbpedia_response.text)
# Ensure we have a filename before attempting to query wikipedia
if not (cover_filename and page_id):
return
# DBPedia sometimes provides an incomplete cover_filename, indicated
# by the filename having a space before the extension, e.g., 'foo .bar'
# An additional Wikipedia call can help to find the real filename.
# This may be removed once the DBPedia issue is resolved, see:
# https://github.com/dbpedia/extraction-framework/issues/396
if ' .' in cover_filename and \
'.' not in cover_filename.split(' .')[-1]:
self._log.debug(
u'wikipedia: dbpedia provided incomplete cover_filename'
)
lpart, rpart = cover_filename.rsplit(' .', 1)
# Query all the images in the page
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'images',
'pageids': page_id,
},
headers={'content-type': 'application/json'},
)
# Try to see if one of the images on the pages matches our
# imcomplete cover_filename
try:
data = wikipedia_response.json()
results = data['query']['pages'][page_id]['images']
for result in results:
if re.match(re.escape(lpart) + r'.*?\.' + re.escape(rpart),
result['title']):
cover_filename = result['title']
break
except (ValueError, KeyError):
self._log.debug(
u'wikipedia: failed to retrieve a cover_filename'
)
return
# Find the absolute url of the cover art on Wikipedia
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'imageinfo',
'iiprop': 'url',
'titles': cover_filename.encode('utf-8'),
},
headers={'content-type': 'application/json'},
)
try:
data = wikipedia_response.json()
results = data['query']['pages']
for _, result in results.items():
image_url = result['imageinfo'][0]['url']
yield self._candidate(url=image_url,
match=Candidate.MATCH_EXACT)
except (ValueError, KeyError, IndexError):
self._log.debug(u'wikipedia: error scraping imageinfo')
return
class FileSystem(LocalArtSource):
NAME = u"Filesystem"
@staticmethod
def filename_priority(filename, cover_names):
"""Sort order for image names.
Return indexes of cover names found in the image filename. This
means that images with lower-numbered and more keywords will have
higher priority.
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def get(self, album, extra):
"""Look for album art files in the specified directories.
"""
paths = extra['paths']
if not paths:
return
cover_names = list(map(util.bytestring_path, extra['cover_names']))
cover_names_str = b'|'.join(cover_names)
cover_pat = br''.join([br"(\b|_)(", cover_names_str, br")(\b|_)"])
cautious = extra['cautious']
for path in paths:
if not os.path.isdir(syspath(path)):
continue
# Find all files that look like images in the directory.
images = []
for fn in os.listdir(syspath(path)):
fn = bytestring_path(fn)
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith(b'.' + ext) and \
os.path.isfile(syspath(os.path.join(path, fn))):
images.append(fn)
# Look for "preferred" filenames.
images = sorted(images,
key=lambda x:
self.filename_priority(x, cover_names))
remaining = []
for fn in images:
if re.search(cover_pat, os.path.splitext(fn)[0], re.I):
self._log.debug(u'using well-named art file {0}',
util.displayable_path(fn))
yield self._candidate(path=os.path.join(path, fn),
match=Candidate.MATCH_EXACT)
else:
remaining.append(fn)
# Fall back to any image in the folder.
if remaining and not cautious:
self._log.debug(u'using fallback art file {0}',
util.displayable_path(remaining[0]))
yield self._candidate(path=os.path.join(path, remaining[0]),
match=Candidate.MATCH_FALLBACK)
# Try each source in turn.
SOURCES_ALL = [u'filesystem',
u'coverart', u'itunes', u'amazon', u'albumart',
u'wikipedia', u'google', u'fanarttv']
ART_SOURCES = {
u'filesystem': FileSystem,
u'coverart': CoverArtArchive,
u'itunes': ITunesStore,
u'albumart': AlbumArtOrg,
u'amazon': Amazon,
u'wikipedia': Wikipedia,
u'google': GoogleImages,
u'fanarttv': FanartTV,
}
SOURCE_NAMES = {v: k for k, v in ART_SOURCES.items()}
# PLUGIN LOGIC ###############################################################
class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
PAT_PX = r"(0|[1-9][0-9]*)px"
PAT_PERCENT = r"(100(\.00?)?|[1-9]?[0-9](\.[0-9]{1,2})?)%"
def __init__(self):
super(FetchArtPlugin, self).__init__()
# Holds candidates corresponding to downloaded images between
# fetching them and placing them in the filesystem.
self.art_candidates = {}
self.config.add({
'auto': True,
'minwidth': 0,
'maxwidth': 0,
'enforce_ratio': False,
'cautious': False,
'cover_names': ['cover', 'front', 'art', 'album', 'folder'],
'sources': ['filesystem',
'coverart', 'itunes', 'amazon', 'albumart'],
'google_key': None,
'google_engine': u'001442825323518660753:hrh5ch1gjzm',
'fanarttv_key': None,
'store_source': False,
})
self.config['google_key'].redact = True
self.config['fanarttv_key'].redact = True
self.minwidth = self.config['minwidth'].get(int)
self.maxwidth = self.config['maxwidth'].get(int)
# allow both pixel and percentage-based margin specifications
self.enforce_ratio = self.config['enforce_ratio'].get(
confit.OneOf([bool,
confit.String(pattern=self.PAT_PX),
confit.String(pattern=self.PAT_PERCENT)]))
self.margin_px = None
self.margin_percent = None
if type(self.enforce_ratio) is six.text_type:
if self.enforce_ratio[-1] == u'%':
self.margin_percent = float(self.enforce_ratio[:-1]) / 100
elif self.enforce_ratio[-2:] == u'px':
self.margin_px = int(self.enforce_ratio[:-2])
else:
# shouldn't happen
raise confit.ConfigValueError()
self.enforce_ratio = True
cover_names = self.config['cover_names'].as_str_seq()
self.cover_names = list(map(util.bytestring_path, cover_names))
self.cautious = self.config['cautious'].get(bool)
self.store_source = self.config['store_source'].get(bool)
self.src_removed = (config['import']['delete'].get(bool) or
config['import']['move'].get(bool))
if self.config['auto']:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener('import_task_files', self.assign_art)
available_sources = list(SOURCES_ALL)
if not HAVE_ITUNES and u'itunes' in available_sources:
available_sources.remove(u'itunes')
if not self.config['google_key'].get() and \
u'google' in available_sources:
available_sources.remove(u'google')
sources_name = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
if 'remote_priority' in self.config:
self._log.warning(
u'The `fetch_art.remote_priority` configuration option has '
u'been deprecated. Instead, place `filesystem` at the end of '
u'your `sources` list.')
if self.config['remote_priority'].get(bool):
try:
sources_name.remove(u'filesystem')
sources_name.append(u'filesystem')
except ValueError:
pass
self.sources = [ART_SOURCES[s](self._log, self.config)
for s in sources_name]
# Asynchronous; after music is added to the library.
def fetch_art(self, session, task):
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.album.artpath and os.path.isfile(task.album.artpath):
# Album already has art (probably a re-import); skip it.
return
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag == importer.action.APPLY:
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
candidate = self.art_for_album(task.album, task.paths, local)
if candidate:
self.art_candidates[task] = candidate
def _set_art(self, album, candidate, delete=False):
album.set_art(candidate.path, delete)
if self.store_source:
# store the source of the chosen artwork in a flexible field
self._log.debug(
u"Storing art_source for {0.albumartist} - {0.album}",
album)
album.art_source = SOURCE_NAMES[type(candidate.source)]
album.store()
# Synchronous; after music files are put in place.
def assign_art(self, session, task):
"""Place the discovered art in the filesystem."""
if task in self.art_candidates:
candidate = self.art_candidates.pop(task)
self._set_art(task.album, candidate, not self.src_removed)
if self.src_removed:
task.prune(candidate.path)
# Manual album art fetching.
def commands(self):
cmd = ui.Subcommand('fetchart', help='download album art')
cmd.parser.add_option(
u'-f', u'--force', dest='force',
action='store_true', default=False,
help=u're-download art when already present'
)
def func(lib, opts, args):
self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force)
cmd.func = func
return [cmd]
# Utilities converted from functions to methods on logging overhaul
def art_for_album(self, album, paths, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `local_only`, then only local
image files from the filesystem are returned; no network requests
are made.
"""
out = None
# all the information any of the sources might need
extra = {'paths': paths,
'cover_names': self.cover_names,
'cautious': self.cautious,
'enforce_ratio': self.enforce_ratio,
'margin_px': self.margin_px,
'margin_percent': self.margin_percent,
'minwidth': self.minwidth,
'maxwidth': self.maxwidth}
for source in self.sources:
if source.IS_LOCAL or not local_only:
self._log.debug(
u'trying source {0} for album {1.albumartist} - {1.album}',
SOURCE_NAMES[type(source)],
album,
)
# URLs might be invalid at this point, or the image may not
# fulfill the requirements
for candidate in source.get(album, extra):
source.fetch_image(candidate, extra)
if candidate.validate(extra):
out = candidate
self._log.debug(
u'using {0.LOC_STR} image {1}'.format(
source, util.displayable_path(out.path)))
break
if out:
break
if out:
out.resize(extra)
return out
def batch_fetch_art(self, lib, albums, force):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force and os.path.isfile(album.artpath):
message = ui.colorize('text_highlight_minor', u'has album art')
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
# sources.
local_paths = None if force else [album.path]
candidate = self.art_for_album(album, local_paths)
if candidate:
self._set_art(album, candidate)
message = ui.colorize('text_success', u'found album art')
else:
message = ui.colorize('text_error', u'no art found')
self._log.info(u'{0}: {1}', album, message)
|
|
#!/usr/bin/env python
"""xml2json.py Convert XML to JSON
Relies on ElementTree for the XML parsing. This is based on
pesterfish.py but uses a different XML->JSON mapping.
The XML->JSON mapping is described at
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
Rewritten to a command line utility by Hay Kranen < github.com/hay > with
contributions from George Hamilton (gmh04) and Dan Brown (jdanbrown)
XML JSON
<e/> "e": null
<e>text</e> "e": "text"
<e name="value" /> "e": { "@name": "value" }
<e name="value">text</e> "e": { "@name": "value", "#text": "text" }
<e> <a>text</a ><b>text</b> </e> "e": { "a": "text", "b": "text" }
<e> <a>text</a> <a>text</a> </e> "e": { "a": ["text", "text"] }
<e> text <a>text</a> </e> "e": { "#text": "text", "a": "text" }
This is very similar to the mapping used for Yahoo Web Services
(http://developer.yahoo.com/common/json.html#xml).
This is a mess in that it is so unpredictable -- it requires lots of testing
(e.g. to see if values are lists or strings or dictionaries). For use
in Python this could be vastly cleaner. Think about whether the internal
form can be more self-consistent while maintaining good external
characteristics for the JSON.
Look at the Yahoo version closely to see how it works. Maybe can adopt
that completely if it makes more sense...
R. White, 2006 November 6
S DIAZ with Added path management
"""
import json
import optparse
import sys
import os
import xml.etree.cElementTree as ET
def strip_tag(tag):
strip_ns_tag = tag
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = {}
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
else:
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None
return {elem_tag: d}
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = {}
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if options.pretty:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def xml2json(xmlstring, options, strip_ns=1, strip=1):
"""Convert an XML string into a JSON string."""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem)
def main():
p = optparse.OptionParser(
description='Converts XML to JSON or the other way around. Reads from standard input by default, or from file if given.',
prog='xml2json',
usage='%prog -t xml2json -o file.json [file]'
)
p.add_option('--path', '-p', help="Load path instead of one file")
p.add_option('--type', '-t', help="'xml2json' or 'json2xml'", default="xml2json")
p.add_option('--out', '-o', help="Write to OUT instead of stdout")
p.add_option(
'--strip_text', action="store_true",
dest="strip_text", help="Strip text for xml2json")
p.add_option(
'--pretty', action="store_true",
dest="pretty", help="Format JSON output so it is easier to read")
p.add_option(
'--strip_namespace', action="store_true",
dest="strip_ns", help="Strip namespace for xml2json")
p.add_option(
'--strip_newlines', action="store_true",
dest="strip_nl", help="Strip newlines for xml2json")
options, arguments = p.parse_args()
inputstream = sys.stdin
if len(arguments) == 1:
try:
inputstream = open(arguments[0])
except:
sys.stderr.write("Problem reading '{0}'\n".format(arguments[0]))
p.print_help()
sys.exit(-1)
list_values=[];
name_values=[];
if options.path:
name_values=os.listdir(options.path)
list_values= [None] * len(name_values)
for elem in range(0,len(name_values)):
list_values[elem]=open(os.path.join(options.path,name_values[elem]))
else:
list_values=[inputstream]
if len(arguments)>0:
name_values=[arguments[0]]
for elem in range(0,len(list_values)):
input = list_values[elem].read()
strip = 0
strip_ns = 0
if options.strip_text:
strip = 1
if options.strip_ns:
strip_ns = 1
if options.strip_nl:
input = input.replace('\n', '').replace('\r','')
try:
if (options.type == "xml2json"):
out = xml2json(input, options, strip_ns, strip)
else:
out = json2xml(input)
if len(list_values)>1:
outName=".json"
if (options.type == "json2xml"):
outName=".xml"
print ("Write "+name_values[elem]+outName)
file = open(os.path.join(options.path,name_values[elem]+outName), 'w')
file.write(out)
file.close()
except :
print ("Not a good file "+ name_values[elem]+outName)
if (options.out):
file = open(options.out, 'w')
file.write(out)
file.close()
else:
if len(list_values)==1 :
print(out)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==========================================
Reading and writing simple WAV audio files
==========================================
Read and write WAV file format audio data using the WAVParser and WAVWriter
components, respectively.
Example Usage
-------------
Playing a WAV file, where we don't know the format until we play it::
from Kamaelia.Audio.PyMedia.Output import Output
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Carousel import Carousel
def makeAudioOutput(format_info):
return Output( sample_rate = format_info['sample_rate'],
format = format_info['sample_format'],
channels = format_info['channels']
)
Graphline(
SRC = RateControlledFileReader("test.wav",readmode="bytes",rate=44100*4),
WAV = WAVParser(),
DST = Carousel(makeAudioOutput),
linkages = {
("SRC","outbox") : ("WAV","inbox"),
("SRC","signal") : ("WAV","control"),
("WAV","outbox") : ("DST","inbox"),
("WAV","signal") : ("DST","control"),
("WAV","all_meta") : ("DST","next"),
}
).run()
Capturing audio and writing it to a WAV file::
from Kamaelia.Audio.PyMedia.Input import Input
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.Chassis.Pipeline import Pipeline
Pipeline( Input(sample_rate=44100, channels=2, format="S16_LE"),
WAVWriter(sample_rate=44100, channels=2, format="S16_LE"),
SimpleFileWriter("captured_audio.wav"),
).run()
WAVParser behaviour
-------------------
Send binary data as strings containing a WAV file to the "inbox" inbox.
As soon as the format of the audio data is determined (from the headers) it is
sent out the "all_meta" outbox as a dictionary, for example::
{ "sample_format" : "S16_LE",
"channels" : 2,
"sample_rate" : 44100,
}
The individual components are also sent out the "sample_format", "channels" and
"sample_rate" outboxes.
The raw audio data from the incoming WAV data is sent out of the "outbox"
outbox, until the end of the WAV file is reached. If the WAV headers specify an
audio size of zero, then it is assumed to be of indefinite length, otherwise the
value is assumed to be the actual size, and this component will terminate and
send out a producerFinished() message when it thinks it has reached the end.
This component supports sending the raw audio data to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
WAVWriter behaviour
-------------------
Initialise this component, specifying the format the audio data will be in.
Send raw audio data (in the format you specified!) as binary strings to the
"inbox" inbox, and this component will write it out as WAV file format data out
of the "outbox" outbox.
The WAV format headers will immediately be sent out of the "outbox" outbox as
soon as this component is initialised and activated (ie. before you even start
sending it audio data to write out). The size of the audio data is set to zero
as the component has no way of knowing the duration of the audio.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
Development history
-------------------
WAVWriter is based on code by Ryn Lothian developed during summer 2006.
"""
from Axon.Component import component
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Axon.AxonExceptions import noSpaceInBox
import struct
import string
class WAVParser(component):
"""\
WAVParser() -> new WAVParser component.
Send WAV format audio file data to its "inbox" inbox, and the raw audio
data will be sent out of the "outbox" outbox as binary strings. The format
of the audio data is also sent out of other outboxes as soon as it is
determined (before the data starts to flow).
"""
Inboxes = { "inbox" : "Raw WAV file data",
"control" : "Shutdown signalling",
}
Outboxes = { "outbox" : "Binary audio data strings",
"signal" : "Shutdown signalling",
"sample_format" : "Sample format of the audio (eg. 'S16_LE')",
"channels" : "Number of channels in the audio",
"sample_rate" : "The sample rate of the audio",
"all_meta" : "Dict of 'sample_format', 'sample_rate', and 'channels'",
}
def __init__(self):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(WAVParser,self).__init__()
self.remainder = ""
self.shutdownMsg = None
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
returns "NOW" if immediate shutdown is required, or "WHENEVER" if the
component can shutdown when it has finished processing pending data.
"""
while self.dataReady("control"):
newMsg = self.recv("control")
if isinstance(newMsg, shutdownMicroprocess):
self.shutdownMsg = newMsg
elif self.shutdownMsg is None and isinstance(newMsg, producerFinished):
self.shutdownMsg = newMsg
if isinstance(self.shutdownMsg, shutdownMicroprocess):
return "NOW"
elif self.shutdownMsg is not None:
return "WHENEVER"
else:
return None
def readline(self):
"""\
Generator.
Read up to the next newline char from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
bytes = []
newdata = self.remainder
index = newdata.find("\x0a")
while index==-1:
bytes.append(newdata)
while not self.dataReady("inbox"):
if self.checkShutdown():
self.bytesread=""
return
self.pause()
yield 1
newdata = self.recv("inbox")
index = newdata.find("\x0a")
tail = newdata[:index+1]
self.remainder = newdata[index+1:]
bytes.append(tail)
self.bytesread = "".join(bytes)
return
def readbytes(self,size):
"""\
Generator.
Read the specified number of bytes from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
buf = [self.remainder]
bufsize = len(self.remainder)
while bufsize < size:
if self.dataReady("inbox"):
newdata = self.recv("inbox")
buf.append(newdata)
bufsize += len(newdata)
shutdown = self.checkShutdown()
if shutdown == "NOW" or (shutdown and not self.dataReady("inbox") and bufsize<size):
self.bytesread=""
return
if bufsize<size and not self.anyReady():
self.pause()
yield 1
excess = bufsize-size
if excess:
wanted = buf[:-1]
tail, self.remainder = buf[-1][:-excess], buf[-1][-excess:]
wanted.append(tail)
else:
wanted = buf
self.remainder = ""
self.bytesread = "".join(wanted)
return
def safesend(self, data, boxname):
"""\
Generator.
Sends data out of the named outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space and retries
until it succeeds.
If a shutdownMicroprocess message is received, returns early.
"""
while 1:
try:
self.send(data, boxname)
return
except noSpaceInBox:
if self.checkShutdown() == "NOW":
return
self.pause()
yield 1
def readuptobytes(self,size):
"""\
Generator.
Reads up to the specified number of bytes from any remainder, or (if
there is no remainder) the next string that arrives at the "inbox" inbox
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
while self.remainder == "":
if self.dataReady("inbox"):
self.remainder = self.recv("inbox")
else:
shutdown = self.checkShutdown()
if shutdown == "NOW" or (shutdown and not self.dataReady("inbox")):
break
if self.remainder == "":
self.pause()
yield 1
self.bytesread = self.remainder[:size]
self.remainder = self.remainder[size:]
def main(self):
# parse header
for _ in self.readbytes(16): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
riff,filesize,wavfmt = struct.unpack("<4sl8s",self.bytesread)
assert(riff=="RIFF" and wavfmt=="WAVEfmt ")
for _ in self.readbytes(20): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
filesize -= 24
chunksize, format, channels, sample_rate, bytesPerSec, blockAlign, bitsPerSample = struct.unpack("<lhHLLHH", self.bytesread)
headerBytesLeft = 16 - chunksize
if format == 1: # uncompressed audio
if bitsPerSample <= 8:
audioformat = "S8"
blocksize=1*channels
elif bitsPerSample <= 16:
audioformat = "S16_LE"
blocksize=2*channels
else:
raise "Can't handle WAV file with "+str(bitsPerSample)+"bits per sample"
if blocksize != blockAlign:
raise "Can't handle WAV files with awkward block alignment padding between *every* sample!"
self.send(channels,"channels")
self.send(audioformat,"sample_format")
self.send(sample_rate,"sample_rate")
self.send( {"channels" : channels,
"sample_format" : audioformat,
"sample_rate" : sample_rate,
}, "all_meta")
else:
raise "Can't handle WAV file in anything other than uncompressed format. Format tag found = "+str(format)
# skip any excess header bytes
if headerBytesLeft > 0:
for _ in self.readbytes(headerBytesLeft): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
filesize-=headerBytesLeft
# hunt for the DATA chunk
while 1:
for _ in self.readbytes(8): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
chunk, size = struct.unpack("<4sl",self.bytesread)
if chunk=="data":
break
# skip over this chunk; if the size is odd, then take into account a postfixed padding byte
if (size % 1):
size+=1
for _ in self.readbytes(size): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
filesize-=size+8
# we're now in a data chunk
# we can read to our hearts content, until we reach the end
if size<=0:
size=-1
while size!=0:
if size>0:
for _ in self.readuptobytes(size): yield _
else:
for _ in self.readuptobytes(32768): yield _
for _ in self.safesend(self.bytesread,"outbox"): yield _
size-=len(self.bytesread)
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
if self.shutdownMsg:
self.send(self.shutdownMsg, "signal")
else:
self.send(producerFinished(), "signal")
class WAVWriter(component):
"""\
WAVWriter(channels, sample_format, sample_rate) -> new WAVWriter component.
Send raw audio data as binary strings to the "inbox" inbox and WAV format
audio data will be sent out of the "outbox" outbox as binary strings.
"""
def __init__(self, channels, sample_format, sample_rate):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(WAVWriter, self).__init__()
if sample_format == "S8":
self.bitsPerSample = 8
self.bytespersample = 1
elif sample_format == "S16_LE":
self.bitsPerSample = 16
self.bytespersample = 2
else:
raise "WAVWriter can't handle sample format "+str(sample_format)+" at the moment"
self.samplingfrequency = sample_rate
self.channels = channels
def handleControl(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg, shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, (producerFinished,shutdownMicroprocess))
def mustStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def waitSend(self,data,boxname):
while 1:
try:
self.send(data,boxname)
return
except noSpaceInBox:
if self.mustStop():
raise "STOP"
self.pause()
yield 1
if self.mustStop():
raise "STOP"
def main(self):
self.shutdownMsg=None
try:
#we don't know the length yet, so we say the file lasts an arbitrary (long) time
riffchunk = "RIFF" + struct.pack("<L", 0x0) + "WAVE"
bytespersecond = self.bytespersample * self.channels * self.samplingfrequency
formatchunk = "fmt "
formatchunk += struct.pack("<L", self.bitsPerSample)
formatchunk += struct.pack("<H", 0x01) #PCM/Linear quantization
formatchunk += struct.pack("<H", self.channels)
formatchunk += struct.pack("<L", self.samplingfrequency)
formatchunk += struct.pack("<L", bytespersecond)
formatchunk += struct.pack("<H", self.bytespersample * self.channels)
formatchunk += struct.pack("<H", self.bitsPerSample)
datachunkheader = "data" + struct.pack("<L", 0x0) #again, an arbitrary (large) value
for _ in self.waitSend(riffchunk + formatchunk + datachunkheader, "outbox"):
yield 1
running = True
while running:
yield 1
while self.dataReady("inbox"): # we accept binary sample data in strings
sampledata = self.recv("inbox")
for _ in self.waitSend(sampledata, "outbox"):
yield 1
if self.canStop():
raise "STOP"
self.pause()
except "STOP":
self.send(self.shutdownMsg,"signal")
__kamaelia_components__ = ( WAVParser, WAVWriter, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
from Kamaelia.Audio.PyMedia.Output import Output
from Kamaelia.Chassis.Carousel import Carousel
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.File.Writing import SimpleFileWriter
print "Reading in WAV file, parsing it, then writing it out as test.wav ..."
Graphline(
READ = RateControlledFileReader("/usr/share/sounds/alsa/Front_Center.wav",readmode="bytes",rate=1000000),
PARSE = WAVParser(),
ENC = Carousel(lambda meta : WAVWriter(**meta)),
WRITE = SimpleFileWriter("test.wav"),
linkages = {
("READ", "outbox") : ("PARSE", "inbox"),
("PARSE", "outbox") : ("ENC", "inbox"),
("PARSE", "all_meta") : ("ENC", "next"),
("ENC", "outbox") : ("WRITE", "inbox"),
("READ", "signal") : ("PARSE", "control"),
("PARSE", "signal") : ("ENC", "control"),
("ENC", "signal") : ("WRITE", "control"),
},
).run()
print "Reading in test.wav and playing it back ..."
Graphline(
SRC = RateControlledFileReader("test.wav",readmode="bytes",rate=44100*4),
WAV = WAVParser(),
DST = Carousel(lambda meta:
Output(sample_rate=meta['sample_rate'],format=meta['sample_format'],channels=meta['channels'])
),
linkages = {
("SRC","outbox") : ("WAV","inbox"),
("SRC","signal") : ("WAV","control"),
("WAV","outbox") : ("DST","inbox"),
("WAV","signal") : ("DST","control"),
("WAV","all_meta") : ("DST","next"),
}
).run()
|
|
#!/usr/bin/python
# Crystals (working title)
#
# Copyright (c) 2010 Matt Windsor, Michael Walker and Alexander
# Preisinger.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# *Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * The names of contributors may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AFOREMENTIONED COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import wx
import webbrowser
import xml.etree.ElementTree as etree
from wx.lib.stattext import GenStaticText
from wx.lib.dialogs import textEntryDialog
# locale support
_ = wx.GetTranslation
class Creator(wx.Frame):
# custom IDs | I prefer them to be here :-)
ID_TREE = wx.NewId()
def __init__(self):
wx.Frame.__init__(self, None, -1, "Crystals Dialog Creator")
# Attributes
self.files = []
self.font_bold = self.GetFont()
self.font_bold.SetWeight(wx.BOLD)
self.dlg_ctrl_drag_item = None
# Style
self.SetMinSize((750, 400))
self.SetSize((800, 600))
# Layout & Controls
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
vbox1 = wx.BoxSizer(wx.VERTICAL)
dlg_header = wx.StaticText(self, -1, _("Dialog:"))
dlg_header.SetFont(self.font_bold)
# The Dialog Control will be used by the XMLFileCtrls
self.dlg_ctrl = wx.TreeCtrl(self, self.ID_TREE, size=(200, 0),
style=wx.SUNKEN_BORDER|wx.TR_HAS_BUTTONS|wx.TR_HIDE_ROOT|
wx.TR_FULL_ROW_HIGHLIGHT|wx.TR_MULTIPLE)
add_label_button = wx.Button(self, wx.ID_ADD, _("Add Label"))
remove_label_button = wx.Button(self, wx.ID_REMOVE, _("Remove Label"))
self.tab_ctrl = wx.Notebook(self)
self._new_file()
vbox1.Add(dlg_header, 0, wx.ALIGN_LEFT | wx.LEFT | wx.RIGHT | wx.TOP,
5)
vbox1.Add(self.dlg_ctrl, 1, wx.ALL | wx.EXPAND | wx.HORIZONTAL, 5)
vbox1.Add(add_label_button, 0, wx.LEFT | wx.RIGHT | wx.EXPAND
| wx.VERTICAL, 5)
vbox1.Add(remove_label_button, 0, wx.ALL | wx.EXPAND | wx.VERTICAL, 5)
hbox1.Add(vbox1, 0, wx.EXPAND)
hbox1.Add(self.tab_ctrl, 1, wx.ALL | wx.EXPAND, 5)
# Menu
mb = wx.MenuBar()
menu_file = wx.Menu()
menu_file.Append(wx.ID_NEW)# _("&New"))
menu_file.Append(wx.ID_OPEN)#, _("&Open file(s)..."))
menu_file.Append(wx.ID_SAVE)#, _("&Save"))
menu_file.Append(wx.ID_SAVEAS)#, _("Save &As..."))
menu_file.Append(wx.ID_CLOSE)#, _("&Close file"))
menu_file.AppendSeparator()
menu_file.Append(wx.ID_EXIT)#, _("&Quit"))
#menu_dialog = wx.Menu()
menu_help = wx.Menu()
menu_help.Append(wx.ID_ABOUT, _("About"))
mb.Append(menu_file, _("&File"))
mb.Append(menu_help, _("&Help"))
self.SetMenuBar(mb)
# Events
self.Bind(wx.EVT_BUTTON, self.on_add_label, id=wx.ID_ADD)
self.Bind(wx.EVT_BUTTON, self.on_remove_label, id=wx.ID_REMOVE)
self.Bind(wx.EVT_MENU, self.on_new, id=wx.ID_NEW)
self.Bind(wx.EVT_MENU, self.on_open, id=wx.ID_OPEN)
self.Bind(wx.EVT_MENU, self.on_save, id=wx.ID_SAVE)
self.Bind(wx.EVT_MENU, self.on_save_as, id=wx.ID_SAVEAS)
self.Bind(wx.EVT_MENU, self.on_close, id=wx.ID_CLOSE)
self.Bind(wx.EVT_MENU, self.on_about, id=wx.ID_ABOUT)
self.Bind(wx.EVT_MENU, self.on_exit, id=wx.ID_EXIT)
self.Bind(wx.EVT_CLOSE, self.on_exit)
# Final touch
self.SetSizer(hbox1)
self.Show(True)
def on_add_label(self, event):
"""add a new label to the TreeCtrl call trough the child"""
self.tab_ctrl.GetCurrentPage().on_add_label(event)
def on_remove_label(self, event):
"""removes label from the TreeCtrl call trough the child"""
self.tab_ctrl.GetCurrentPage().on_remove_label(event)
def on_new(self, event):
"""create a new file"""
pass
def _new_file(self):
self.tab_ctrl.AddPage(XMLFileCtrl(self.tab_ctrl), _("New"))
self.tab_ctrl.GetCurrentPage().new()
def on_open(self, event):
"""load xml files"""
pass
def on_save(self, event):
"""save xml files"""
pass
def on_save_as(self, event):
"""save xml file with a different name"""
pass
def on_close(self, event):
"""close file"""
# check if there are changes which should be saved
if self.changes:
pass
def _close_file(self, file_id):
pass
def on_about(self, event):
"""show application informations"""
AboutDialog(self)
def on_exit(self, event):
"""close the application"""
# close every file before exiting
for _file in self.files:
self._close_file(filename)
self.Destroy()
sys.exit(0)
class XMLFileCtrl(wx.Panel):
"""class to handle the XML files and manage the controls at once"""
ID_TEXT = wx.NewId()
ID_CHOICE_ADD = wx.NewId()
ID_CHOICE_REMOVE = wx.NewId()
DATA = {}
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.data = {}
self.labels = {}
self.font_bold = parent.GetParent().font_bold
self.ID_TREE = parent.GetParent().ID_TREE
self.font_italic = self.GetFont()
self.font_italic.SetStyle(wx.FONTSTYLE_ITALIC)
self.dlg_ctrl = parent.GetParent().dlg_ctrl
self.current_tag_type = "text"
#Layout & Controls
next_button = wx.Button(self, wx.ID_FORWARD, label="->")
prev_button = wx.Button(self, wx.ID_BACKWARD, label="<-")
rm_button = wx.Button(self, wx.ID_REMOVE, label=_("Remove Tag"))
self.addupdate_button = wx.Button(self)
actor_id_header = wx.StaticText(self, -1, _("Speakers Actor-ID:"))
actor_id_header.SetFont(self.font_bold)
self.actor_id_ctrl = wx.ComboBox(self)
self.actor_id_ctrl.SetFocus()
type_box = wx.StaticBox(self, -1, _("Content Type:"))
type_box.SetFont(self.font_bold)
self.text_rb = wx.RadioButton(self, label="text"
, style=wx.RB_GROUP)
self.choice_rb = wx.RadioButton(self, label="choice")
self.set_rb = wx.RadioButton(self, label="set")
self.goto_rb = wx.RadioButton(self, label="goto")
self.requ_rb = wx.RadioButton(self, label="requirement")
# Just a dummy panel
self.controls = wx.Panel(self)
self.controls.Hide()
# Controls for the text option
self.text_ctrls = wx.Panel(self)
text_header = wx.StaticText(self.text_ctrls, label=_("Text:"))
text_header.SetFont(self.font_bold)
self.text_text = wx.TextCtrl(self.text_ctrls, self.ID_TEXT,
style=wx.TE_MULTILINE|wx.SUNKEN_BORDER)
tbox = wx.BoxSizer(wx.VERTICAL)
tbox.Add(text_header, 0, wx.LEFT, 5)
tbox.Add(self.text_text, 0, wx.ALL | wx.EXPAND, 5)
self.text_ctrls.SetSizer(tbox)
self.text_ctrls.Hide()
# Controls for the choice option
self.choice_ctrls = wx.Panel(self)
choices_header = wx.StaticText(self.choice_ctrls, label=_("Choices:"))
choices_header.SetFont(self.font_bold)
self.choice_choices = wx.ListCtrl(self.choice_ctrls, size=(225, 200),
style=wx.LC_REPORT|wx.SUNKEN_BORDER|wx.LC_AUTOARRANGE)
self.choice_choices.InsertColumn(0, _("Label"), width=75)
self.choice_choices.InsertColumn(1, _("Summary"), width=150)
summary_header = wx.StaticText(self.choice_ctrls, label=_("Summary:"))
summary_header.SetFont(self.font_bold)
self.choice_summary = wx.TextCtrl(self.choice_ctrls)
label_header = wx.StaticText(self.choice_ctrls, label=_("Label:"))
label_header.SetFont(self.font_bold)
self.choice_label = wx.ComboBox(self.choice_ctrls)
add_button = wx.Button(self.choice_ctrls, self.ID_CHOICE_ADD,
_("Add Choice"))
remove_button = wx.Button(self.choice_ctrls, self.ID_CHOICE_REMOVE,
_("Remove Choice"))
chbox = wx.BoxSizer(wx.HORIZONTAL)
cvbox1 = wx.BoxSizer(wx.VERTICAL)
cvbox1.Add(choices_header, 0, wx.LEFT, 5)
cvbox1.Add(self.choice_choices, 0, wx.EXPAND | wx.ALL, 5)
chbox.Add(cvbox1, 1, wx.ALL | wx.EXPAND)
cvbox2 = wx.BoxSizer(wx.VERTICAL)
cvbox2.Add(summary_header, 0, wx.LEFT | wx.BOTTOM, 5)
cvbox2.Add(self.choice_summary, 0, wx.EXPAND)
cvbox2.Add((0, 5))
cvbox2.Add(label_header, 0, wx.ALL, 5)
cvbox2.Add(self.choice_label, 0, wx.EXPAND)
bubox = wx.BoxSizer(wx.HORIZONTAL)
bubox.Add(add_button)
bubox.Add(remove_button)
cvbox2.Add(bubox, 0, wx.TOP, 5)
chbox.Add(cvbox2, 1, wx.EXPAND | wx.ALL)
self.choice_ctrls.SetSizer(chbox)
self.choice_ctrls.Hide()
self.set_ctrls = wx.Panel(self)
self.set_ctrls.Hide()
self.goto_ctrls = wx.Panel(self)
self.goto_ctrls.Hide()
self.requirement_ctrls = wx.Panel(self)
self.requirement_ctrls.Hide()
self.sizer = wx.BoxSizer(wx.VERTICAL) # lowest for everything
hbox1 = wx.BoxSizer(wx.HORIZONTAL) # sepparates radio buttons
stbox = wx.StaticBoxSizer(type_box, wx.VERTICAL) # radio buttons
vbox2 = wx.BoxSizer(wx.VERTICAL) # sepparate buttons and actor id
hbox2 = wx.BoxSizer(wx.HORIZONTAL) # buttons
hbox3 = wx.BoxSizer(wx.HORIZONTAL) # buttons
hbox2.Add(prev_button)
hbox2.Add(self.addupdate_button)
hbox2.Add(rm_button)
hbox2.Add(next_button)
vbox2.AddSizer(hbox2, 0, wx.EXPAND)
vbox2.AddSizer(hbox3)
vbox2.AddSpacer((30, 30))
vbox2.Add(actor_id_header, 0, wx.ALL, 5)
vbox2.Add(self.actor_id_ctrl, 0, wx.LEFT | wx.EXPAND, 5)
# small hack to make StaticBoxSizer look nice
stbox.Add((160, 0))
stbox.Add(self.text_rb)
stbox.Add(self.choice_rb)
stbox.Add(self.set_rb)
stbox.Add(self.goto_rb)
stbox.Add(self.requ_rb)
hbox1.AddSizer(vbox2, 0, wx.EXPAND)
hbox1.Add((20, 20), 1, wx.EXPAND)
hbox1.Add(stbox, 0, wx.ALL, 5)
self.sizer.AddSizer(hbox1, 0, wx.ALL | wx.EXPAND, 5)
self.sizer.Add(self.controls, 0, wx.ALL | wx.EXPAND, 5)
self.SetSizer(self.sizer)
# Events
self.Bind(wx.EVT_BUTTON, self.on_next, id=wx.ID_FORWARD)
self.Bind(wx.EVT_BUTTON, self.on_prev, id=wx.ID_BACKWARD)
self.Bind(wx.EVT_BUTTON, self.on_remove, id=wx.ID_REMOVE)
self.Bind(wx.EVT_BUTTON, self.on_add, id=wx.ID_ADD)
self.Bind(wx.EVT_BUTTON, self.on_update, id=wx.ID_REFRESH)
self.Bind(wx.EVT_RADIOBUTTON, self.on_change_type,
id=self.text_rb.GetId())
self.Bind(wx.EVT_RADIOBUTTON, self.on_change_type,
id=self.choice_rb.GetId())
self.Bind(wx.EVT_RADIOBUTTON, self.on_change_type,
id=self.set_rb.GetId())
self.Bind(wx.EVT_RADIOBUTTON, self.on_change_type,
id=self.goto_rb.GetId())
self.Bind(wx.EVT_RADIOBUTTON, self.on_change_type,
id=self.requ_rb.GetId())
self.dlg_ctrl.Bind(wx.EVT_TREE_BEGIN_DRAG, self.on_drag,
id=self.ID_TREE)
self.dlg_ctrl.Bind(wx.EVT_TREE_END_DRAG, self.on_drop,
id=self.ID_TREE)
self.dlg_ctrl.Bind(wx.EVT_TREE_SEL_CHANGED, self.on_change_selection,
id=self.ID_TREE)
def _swap_addupdate_buttons(self, id):
if id == wx.ID_ADD:
self.addupdate_button.SetId(wx.ID_ADD)
self.addupdate_button.SetLabel(_("Add Tag"))
else:
self.addupdate_button.SetId(wx.ID_REFRESH)
self.addupdate_button.SetLabel(_("Update Tag"))
def focus(self):
"""clear and fill the TreeCtrl for the new focus"""
self.dlg_ctrl.DeleteAllItems()
id = self.dlg_ctrl.AddRoot("dialog")
self.labels['root'] = id
self._add_label("requirements")
content = self._add_label("content")
self.dlg_ctrl.SelectItem(self.labels["content"]["children"][-1])
def new(self):
"""set the default options for a new tag or file"""
self._swap_addupdate_buttons(wx.ID_ADD)
self._change_controls(self.text_ctrls)
self.focus()
def load(self, tag_name):
"""load the informations from a tag"""
def save(self, tag_name):
"""save the informations in a tag"""
def _add_label(self, label_name):
"""internal function to unify adding labels"""
root = self.labels['root']
lb_id = self.dlg_ctrl.AppendItem(root, label_name)
self.dlg_ctrl.SetItemFont(lb_id, self.font_bold)
add_new_tag = self.dlg_ctrl.AppendItem(lb_id, "New Tag ...")
self.dlg_ctrl.SetItemFont(add_new_tag, self.font_italic)
self.dlg_ctrl.Expand(lb_id)
self.labels.update({label_name : {"id" : lb_id,
"children" : [add_new_tag]},
})
return lb_id
def on_drag(self, event):
"""allow dragging items"""
item = event.GetItem()
if self.dlg_ctrl.GetChildrenCount(item) == 0 and self.labels \
[self.dlg_ctrl.GetItemText(self.dlg_ctrl.GetItemParent(item))]\
["children"][-1] != item:
event.Allow()
self.dlg_ctrl_drag_item = event.GetItem()
else:
return
def on_drop(self, event):
# If we dropped somewhere that isn't on top of an item,
# ignore the event
if not event.GetItem().IsOk():
return
# Make sure this memeber exists.
try:
old = self.dlg_ctrl_drag_item
except:
return
# Get the other IDs that are involved
new = event.GetItem()
new_parent = self.dlg_ctrl.GetItemParent(new)
new_parent_text = self.dlg_ctrl.GetItemText(new_parent)
if self.labels["root"] == new_parent or self.labels["requirements"] \
["id"] == new_parent:
return
# Move 'em
text = self.dlg_ctrl.GetItemText(old)
old_parent = self.dlg_ctrl.GetItemParent(old)
old_parent_text = self.dlg_ctrl.GetItemText(old_parent)
self.dlg_ctrl.Delete(old)
self.labels[old_parent_text]["children"].remove(old)
prev_idx = self.labels[new_parent_text]["children"].index(new) + 1
#prevent copying the NewTag tag
if len(self.labels[new_parent_text]["children"]) == 1:
prev_idx -= 1
new = new_parent
elif len(self.labels[new_parent_text]["children"]) == prev_idx:
prev_idx -= 1
new = self.labels[new_parent_text]["children"][prev_idx-1]
id = self.dlg_ctrl.InsertItem(new_parent, new, text)
self.labels[new_parent_text]["children"].insert(prev_idx, id)
def __debug_labels(self):
for i in self.labels:
if i != "root":
print "---"+i
for a in self.labels[i]["children"]:
print self.dlg_ctrl.GetItemText(a)
print ""
def on_add_label(self, event):
"""add a new label to the TreeCtrl, call from the parent"""
entry = textEntryDialog(self, _('Label Name:'),
_('Enter a name for the new label.'),
style=wx.BORDER_NONE|wx.OK|wx.CANCEL)
text = entry.text.strip()
if text != "":
self._add_label(entry.text)
def on_remove_label(self, event):
"""removes a label from the TreeCtrl, call from the parent"""
item = self.dlg_ctrl.GetSelection()
name = self.dlg_ctrl.GetItemText(item)
if name == "content" or name == "requirements":
wx.MessageBox(_("You can't remove the {0} label.".format(name)),
_("Can't remove label."), style=wx.OK | wx.ICON_ERROR)
else:
if name not in self.labels:
wx.MessageBox(_("Please select a label ant NOT a tag."),
_("No label selected."))
else:
self.dlg_ctrl.Delete(item)
self.labels.pop(name)
def _change_controls(self, new_controls):
self.sizer.Detach(self.controls)
self.controls.Hide()
self.controls = new_controls
self.controls.Show()
self.sizer.Add(self.controls, 0, wx.ALL | wx.EXPAND, 5)
self.controls.Layout()
self.sizer.Layout()
self.Layout()
def on_change_type(self, event):
"""changes the controls if the type is changed"""
label = event.GetEventObject().GetLabel().strip()
self._change_controls(eval("self.{0}_ctrls".format(label)))
self.current_tag_type = label
if label == "text" or label == "choice":
self.actor_id_ctrl.Enable()
else:
self.actor_id_ctrl.Disable()
def on_next(self, event):
"""switch to the next tag in occurence order"""
pass
def on_prev(self, event):
"""switch to the previous tag in occurence order"""
pass
def on_remove(self, event):
"""remove tag"""
self._swap_addupdate_buttons(wx.ID_REFRESH)
def on_add(self, event):
"""add new tag"""
sel_id = self.dlg_ctrl.GetSelection()
if self.dlg_ctrl.GetItemText(sel_id) not in self.labels:
label_id = self.dlg_ctrl.GetItemParent(sel_id)
else:
label_id = sel_id
label_name = self.dlg_ctrl.GetItemText(label_id)
if self.current_tag_type == "text":
if len(self.labels[label_name]["children"]) > 1:
prev_id = self.labels[label_name]["children"][-2]
else:
prev_id = label_id
string = "{0}->{1}".format(self.actor_id_ctrl.GetValue(),
self.text_text.GetValue())
id = self.dlg_ctrl.InsertItem(label_id, prev_id, string.strip())
self.labels[label_name]["children"].insert(-1, id)
def on_update(self, event):
"""update tag"""
pass
def on_change_selection(self, event):
"""switch radio buttons on changed selection of the dlg_ctrl"""
id = self.dlg_ctrl.GetSelection()
if id == self.labels["requirements"]["id"] or id == self.labels \
["requirements"]["children"][-1]:
event.SetEventObject(self.requ_rb)
self.on_change_type(event)
self.requ_rb.SetValue(True)
#wx.RadioButton.SetValue()
def close(self, event):
"""closes file, called by the parent"""
pass
class AboutDialog(wx.Dialog):
"""about dialog to display informations"""
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, title=_("About"))
self.parent = parent
self.SetSize((400, 400))
vsizer = wx.BoxSizer(wx.VERTICAL)
notebook = wx.Notebook(self, -1, style=wx.NB_BOTTOM)
self.add_page_version(notebook)
self.add_page_credits(notebook)
self.add_page_licence(notebook)
vsizer.Add(notebook, 1, wx.EXPAND | wx.ALL, 5)
closebutton = wx.Button(self, wx.ID_CLOSE)
vsizer.Add(closebutton, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.SetSizer(vsizer)
self.Bind(wx.EVT_BUTTON, self.on_close, id=wx.ID_CLOSE)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.ShowModal()
def add_page_version(self, notebook):
panel = wx.Panel(notebook, -1)
vsizer = wx.BoxSizer(wx.VERTICAL)
h_font = wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD)
header = wx.StaticText(panel, -1, _("Crystals Dialog Creator"))
header.SetFont(h_font)
vsizer.Add(header, 0, wx.ALIGN_CENTER | wx.ALL, 10)
link = Link(panel, -1, 'http://github.com/CaptainHayashi/crystals', \
URL='http://github.com/CaptainHayashi/crystals')
vsizer.Add(link, 0, wx.ALIGN_CENTER | wx.ALL, 10)
panel.SetSizer(vsizer)
notebook.AddPage(panel, _("Version"), True)
def add_page_credits(self, notebook):
listbox = wx.ListBox(notebook, -1)
listbox.Append("Alexander Preisinger")
notebook.AddPage(listbox, _("Credits"))
def add_page_licence(self, notebook):
font = self.GetFont()
font.SetFaceName('Monospace')
licence = """Copyright (c) 2010, Alexander Preisinger
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
txtcrtl = wx.TextCtrl(notebook, -1, size=(100, 100),
style=wx.TE_MULTILINE|wx.TE_READONLY)
txtcrtl.SetFont(font)
txtcrtl.SetValue(licence)
notebook.AddPage(txtcrtl, _("Licence"))
def on_close(self, event):
self.Destroy()
class Link(GenStaticText):
def __init__(self, parent, id=-1, label='', pos=(-1, -1),
size=(-1, -1), style=0, name='Link', URL=''):
GenStaticText.__init__(self, parent, id, label, pos, size, style, name)
self.url = URL
self.font1 = wx.Font(9, wx.SWISS, wx.NORMAL, wx.BOLD, True, 'Verdana')
self.font2 = wx.Font(9, wx.SWISS, wx.NORMAL, wx.BOLD, False, 'Verdana')
self.SetFont(self.font2)
self.SetForegroundColour('#0000ff')
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
self.Bind(wx.EVT_MOTION, self.on_mouse_event)
def on_mouse_event(self, event):
if event.Moving():
self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
self.SetFont(self.font1)
elif event.LeftUp():
webbrowser.open_new(self.url)
else:
self.SetCursor(wx.NullCursor)
self.SetFont(self.font2)
event.Skip()
if __name__ == "__main__":
app = wx.App()
Creator()
app.MainLoop()
# vim: et ai ts=4 sw=4 softtabstop=4:
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# Generated: Sat Oct 7 01:35:22 2017
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from gnuradio import analog
from gnuradio import audio
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.wxgui import forms
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import osmosdr
import time
import wx
class top_block(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Top Block")
_icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
##################################################
# Variables
##################################################
self.tr_width = tr_width = 10e3
self.samp_rate = samp_rate = 1.2e6
self.deci_2 = deci_2 = 3
self.deci_1 = deci_1 = 9
self.cutoff = cutoff = 100e3
self.Frequency = Frequency = 104.3e6
##################################################
# Blocks
##################################################
_tr_width_sizer = wx.BoxSizer(wx.VERTICAL)
self._tr_width_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_tr_width_sizer,
value=self.tr_width,
callback=self.set_tr_width,
label='tr_width',
converter=forms.float_converter(),
proportion=0,
)
self._tr_width_slider = forms.slider(
parent=self.GetWin(),
sizer=_tr_width_sizer,
value=self.tr_width,
callback=self.set_tr_width,
minimum=1e3,
maximum=100e3,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.Add(_tr_width_sizer)
_cutoff_sizer = wx.BoxSizer(wx.VERTICAL)
self._cutoff_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_cutoff_sizer,
value=self.cutoff,
callback=self.set_cutoff,
label='cutoff',
converter=forms.float_converter(),
proportion=0,
)
self._cutoff_slider = forms.slider(
parent=self.GetWin(),
sizer=_cutoff_sizer,
value=self.cutoff,
callback=self.set_cutoff,
minimum=50e3,
maximum=200e3,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.Add(_cutoff_sizer)
_Frequency_sizer = wx.BoxSizer(wx.VERTICAL)
self._Frequency_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_Frequency_sizer,
value=self.Frequency,
callback=self.set_Frequency,
label='Frequency',
converter=forms.float_converter(),
proportion=0,
)
self._Frequency_slider = forms.slider(
parent=self.GetWin(),
sizer=_Frequency_sizer,
value=self.Frequency,
callback=self.set_Frequency,
minimum=88e6,
maximum=108e6,
num_steps=(108-88)*10,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.Add(_Frequency_sizer)
self.rtlsdr_source_0 = osmosdr.source( args="numchan=" + str(1) + " " + '' )
self.rtlsdr_source_0.set_sample_rate(samp_rate)
self.rtlsdr_source_0.set_center_freq(Frequency, 0)
self.rtlsdr_source_0.set_freq_corr(0, 0)
self.rtlsdr_source_0.set_dc_offset_mode(0, 0)
self.rtlsdr_source_0.set_iq_balance_mode(0, 0)
self.rtlsdr_source_0.set_gain_mode(False, 0)
self.rtlsdr_source_0.set_gain(10, 0)
self.rtlsdr_source_0.set_if_gain(20, 0)
self.rtlsdr_source_0.set_bb_gain(20, 0)
self.rtlsdr_source_0.set_antenna('', 0)
self.rtlsdr_source_0.set_bandwidth(0, 0)
self.low_pass_filter_0 = filter.fir_filter_ccf(deci_1, firdes.low_pass(
1, samp_rate, cutoff, tr_width, firdes.WIN_HAMMING, 6.76))
self.audio_sink_0 = audio.sink(44100, '', True)
self.analog_wfm_rcv_0 = analog.wfm_rcv(
quad_rate=samp_rate/deci_1,
audio_decimation=deci_2,
)
##################################################
# Connections
##################################################
self.connect((self.analog_wfm_rcv_0, 0), (self.audio_sink_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.analog_wfm_rcv_0, 0))
self.connect((self.rtlsdr_source_0, 0), (self.low_pass_filter_0, 0))
def get_tr_width(self):
return self.tr_width
def set_tr_width(self, tr_width):
self.tr_width = tr_width
self._tr_width_slider.set_value(self.tr_width)
self._tr_width_text_box.set_value(self.tr_width)
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, self.cutoff, self.tr_width, firdes.WIN_HAMMING, 6.76))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.rtlsdr_source_0.set_sample_rate(self.samp_rate)
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, self.cutoff, self.tr_width, firdes.WIN_HAMMING, 6.76))
def get_deci_2(self):
return self.deci_2
def set_deci_2(self, deci_2):
self.deci_2 = deci_2
def get_deci_1(self):
return self.deci_1
def set_deci_1(self, deci_1):
self.deci_1 = deci_1
def get_cutoff(self):
return self.cutoff
def set_cutoff(self, cutoff):
self.cutoff = cutoff
self._cutoff_slider.set_value(self.cutoff)
self._cutoff_text_box.set_value(self.cutoff)
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, self.cutoff, self.tr_width, firdes.WIN_HAMMING, 6.76))
def get_Frequency(self):
return self.Frequency
def set_Frequency(self, Frequency):
self.Frequency = Frequency
self._Frequency_slider.set_value(self.Frequency)
self._Frequency_text_box.set_value(self.Frequency)
self.rtlsdr_source_0.set_center_freq(self.Frequency, 0)
def main(top_block_cls=top_block, options=None):
tb = top_block_cls()
tb.Start(True)
tb.Wait()
if __name__ == '__main__':
main()
|
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for implementing interaction between MIDI and SequenceGenerators."""
import abc
import threading
import time
import note_seq
from note_seq.protobuf import generator_pb2
from note_seq.protobuf import music_pb2
import tensorflow.compat.v1 as tf
def adjust_sequence_times(sequence, delta_time):
"""Adjusts note and total NoteSequence times by `delta_time`."""
retimed_sequence = music_pb2.NoteSequence()
retimed_sequence.CopyFrom(sequence)
for note in retimed_sequence.notes:
note.start_time += delta_time
note.end_time += delta_time
retimed_sequence.total_time += delta_time
return retimed_sequence
class MidiInteraction(threading.Thread):
"""Base class for handling interaction between MIDI and SequenceGenerator.
Child classes will provided the "main loop" of an interactive session between
a MidiHub used for MIDI I/O and sequences generated by a SequenceGenerator in
their `run` methods.
Should be started by calling `start` to launch in a separate thread.
Args:
midi_hub: The MidiHub to use for MIDI I/O.
sequence_generators: A collection of SequenceGenerator objects.
qpm: The quarters per minute to use for this interaction. May be overriden
by control changes sent to `tempo_control_number`.
generator_select_control_number: An optional MIDI control number whose
value to use for selection a sequence generator from the collection.
Must be provided if `sequence_generators` contains multiple
SequenceGenerators.
tempo_control_number: An optional MIDI control number whose value to use to
determine the qpm for this interaction. On receipt of a control change,
the qpm will be set to 60 more than the control change value.
temperature_control_number: The optional control change number to use for
controlling generation softmax temperature.
Raises:
ValueError: If `generator_select_control_number` is None and
`sequence_generators` contains multiple SequenceGenerators.
"""
_metaclass__ = abc.ABCMeta
# Base QPM when set by a tempo control change.
_BASE_QPM = 60
def __init__(self,
midi_hub,
sequence_generators,
qpm,
generator_select_control_number=None,
tempo_control_number=None,
temperature_control_number=None):
if generator_select_control_number is None and len(sequence_generators) > 1:
raise ValueError(
'`generator_select_control_number` cannot be None if there are '
'multiple SequenceGenerators.')
self._midi_hub = midi_hub
self._sequence_generators = sequence_generators
self._default_qpm = qpm
self._generator_select_control_number = generator_select_control_number
self._tempo_control_number = tempo_control_number
self._temperature_control_number = temperature_control_number
# A signal to tell the main loop when to stop.
self._stop_signal = threading.Event()
super(MidiInteraction, self).__init__()
@property
def _sequence_generator(self):
"""Returns the SequenceGenerator selected by the current control value."""
if len(self._sequence_generators) == 1:
return self._sequence_generators[0]
val = self._midi_hub.control_value(self._generator_select_control_number)
val = 0 if val is None else val
return self._sequence_generators[val % len(self._sequence_generators)]
@property
def _qpm(self):
"""Returns the qpm based on the current tempo control value."""
val = self._midi_hub.control_value(self._tempo_control_number)
return self._default_qpm if val is None else val + self._BASE_QPM
def _temperature(self, min_temp=0.1, max_temp=2.0, default=1.0):
"""Returns the temperature based on the current control value.
Linearly interpolates between `min_temp` and `max_temp`.
Args:
min_temp: The minimum temperature, which will be returned when value is 0.
max_temp: The maximum temperature, which will be returned when value is
127.
default: The temperature to return if control value is None.
Returns:
A float temperature value based on the 8-bit MIDI control value.
"""
val = self._midi_hub.control_value(self._temperature_control_number)
if val is None:
return default
return min_temp + (val / 127.) * (max_temp - min_temp)
@abc.abstractmethod
def run(self):
"""The main loop for the interaction.
Must exit shortly after `self._stop_signal` is set.
"""
pass
def stop(self):
"""Stops the main loop, and blocks until the interaction is stopped."""
self._stop_signal.set()
self.join()
class CallAndResponseMidiInteraction(MidiInteraction):
"""Implementation of a MidiInteraction for interactive "call and response".
Alternates between receiving input from the MidiHub ("call") and playing
generated sequences ("response"). During the call stage, the input is captured
and used to generate the response, which is then played back during the
response stage.
The call phrase is started when notes are received and ended by an external
signal (`end_call_signal`) or after receiving no note events for a full tick.
The response phrase is immediately generated and played. Its length is
optionally determined by a control value set for
`response_ticks_control_number` or by the length of the call.
Args:
midi_hub: The MidiHub to use for MIDI I/O.
sequence_generators: A collection of SequenceGenerator objects.
qpm: The quarters per minute to use for this interaction. May be overriden
by control changes sent to `tempo_control_number`.
generator_select_control_number: An optional MIDI control number whose
value to use for selection a sequence generator from the collection.
Must be provided if `sequence_generators` contains multiple
SequenceGenerators.
clock_signal: An optional midi_hub.MidiSignal to use as a clock. Each tick
period should have the same duration. No other assumptions are made
about the duration, but is typically equivalent to a bar length. Either
this or `tick_duration` must be specified.be
tick_duration: An optional float specifying the duration of a tick period in
seconds. No assumptions are made about the duration, but is typically
equivalent to a bar length. Either this or `clock_signal` must be
specified.
end_call_signal: The optional midi_hub.MidiSignal to use as a signal to stop
the call phrase at the end of the current tick.
panic_signal: The optional midi_hub.MidiSignal to use as a signal to end
all open notes and clear the playback sequence.
mutate_signal: The optional midi_hub.MidiSignal to use as a signal to
generate a new response sequence using the current response as the
input.
allow_overlap: A boolean specifying whether to allow the call to overlap
with the response.
metronome_channel: The optional 0-based MIDI channel to output metronome on.
Ignored if `clock_signal` is provided.
min_listen_ticks_control_number: The optional control change number to use
for controlling the minimum call phrase length in clock ticks.
max_listen_ticks_control_number: The optional control change number to use
for controlling the maximum call phrase length in clock ticks. Call
phrases will automatically be ended and responses generated when this
length is reached.
response_ticks_control_number: The optional control change number to use for
controlling the length of the response in clock ticks.
tempo_control_number: An optional MIDI control number whose value to use to
determine the qpm for this interaction. On receipt of a control change,
the qpm will be set to 60 more than the control change value.
temperature_control_number: The optional control change number to use for
controlling generation softmax temperature.
loop_control_number: The optional control change number to use for
determining whether the response should be looped. Looping is enabled
when the value is 127 and disabled otherwise.
state_control_number: The optinal control change number to use for sending
state update control changes. The values are 0 for `IDLE`, 1 for
`LISTENING`, and 2 for `RESPONDING`.
Raises:
ValueError: If exactly one of `clock_signal` or `tick_duration` is not
specified.
"""
class State(object):
"""Class holding state value representations."""
IDLE = 0
LISTENING = 1
RESPONDING = 2
_STATE_NAMES = {
IDLE: 'Idle', LISTENING: 'Listening', RESPONDING: 'Responding'}
@classmethod
def to_string(cls, state):
return cls._STATE_NAMES[state]
def __init__(self,
midi_hub,
sequence_generators,
qpm,
generator_select_control_number,
clock_signal=None,
tick_duration=None,
end_call_signal=None,
panic_signal=None,
mutate_signal=None,
allow_overlap=False,
metronome_channel=None,
min_listen_ticks_control_number=None,
max_listen_ticks_control_number=None,
response_ticks_control_number=None,
tempo_control_number=None,
temperature_control_number=None,
loop_control_number=None,
state_control_number=None):
super(CallAndResponseMidiInteraction, self).__init__(
midi_hub, sequence_generators, qpm, generator_select_control_number,
tempo_control_number, temperature_control_number)
if [clock_signal, tick_duration].count(None) != 1:
raise ValueError(
'Exactly one of `clock_signal` or `tick_duration` must be specified.')
self._clock_signal = clock_signal
self._tick_duration = tick_duration
self._end_call_signal = end_call_signal
self._panic_signal = panic_signal
self._mutate_signal = mutate_signal
self._allow_overlap = allow_overlap
self._metronome_channel = metronome_channel
self._min_listen_ticks_control_number = min_listen_ticks_control_number
self._max_listen_ticks_control_number = max_listen_ticks_control_number
self._response_ticks_control_number = response_ticks_control_number
self._loop_control_number = loop_control_number
self._state_control_number = state_control_number
# Event for signalling when to end a call.
self._end_call = threading.Event()
# Event for signalling when to flush playback sequence.
self._panic = threading.Event()
# Even for signalling when to mutate response.
self._mutate = threading.Event()
def _update_state(self, state):
"""Logs and sends a control change with the state."""
if self._state_control_number is not None:
self._midi_hub.send_control_change(self._state_control_number, state)
tf.logging.info('State: %s', self.State.to_string(state))
def _end_call_callback(self, unused_captured_seq):
"""Method to use as a callback for setting the end call signal."""
self._end_call.set()
tf.logging.info('End call signal received.')
def _panic_callback(self, unused_captured_seq):
"""Method to use as a callback for setting the panic signal."""
self._panic.set()
tf.logging.info('Panic signal received.')
def _mutate_callback(self, unused_captured_seq):
"""Method to use as a callback for setting the mutate signal."""
self._mutate.set()
tf.logging.info('Mutate signal received.')
@property
def _min_listen_ticks(self):
"""Returns the min listen ticks based on the current control value."""
val = self._midi_hub.control_value(
self._min_listen_ticks_control_number)
return 0 if val is None else val
@property
def _max_listen_ticks(self):
"""Returns the max listen ticks based on the current control value."""
val = self._midi_hub.control_value(
self._max_listen_ticks_control_number)
return float('inf') if not val else val
@property
def _should_loop(self):
return (self._loop_control_number and
self._midi_hub.control_value(self._loop_control_number) == 127)
def _generate(self, input_sequence, zero_time, response_start_time,
response_end_time):
"""Generates a response sequence with the currently-selected generator.
Args:
input_sequence: The NoteSequence to use as a generation seed.
zero_time: The float time in seconds to treat as the start of the input.
response_start_time: The float time in seconds for the start of
generation.
response_end_time: The float time in seconds for the end of generation.
Returns:
The generated NoteSequence.
"""
# Generation is simplified if we always start at 0 time.
response_start_time -= zero_time
response_end_time -= zero_time
generator_options = generator_pb2.GeneratorOptions()
generator_options.input_sections.add(
start_time=0,
end_time=response_start_time)
generator_options.generate_sections.add(
start_time=response_start_time,
end_time=response_end_time)
# Get current temperature setting.
generator_options.args['temperature'].float_value = self._temperature()
# Generate response.
tf.logging.info(
"Generating sequence using '%s' generator.",
self._sequence_generator.details.id)
tf.logging.debug('Generator Details: %s',
self._sequence_generator.details)
tf.logging.debug('Bundle Details: %s',
self._sequence_generator.bundle_details)
tf.logging.debug('Generator Options: %s', generator_options)
response_sequence = self._sequence_generator.generate(
adjust_sequence_times(input_sequence, -zero_time), generator_options)
response_sequence = note_seq.trim_note_sequence(response_sequence,
response_start_time,
response_end_time)
return adjust_sequence_times(response_sequence, zero_time)
def run(self):
"""The main loop for a real-time call and response interaction."""
start_time = time.time()
self._captor = self._midi_hub.start_capture(self._qpm, start_time)
if not self._clock_signal and self._metronome_channel is not None:
self._midi_hub.start_metronome(
self._qpm, start_time, channel=self._metronome_channel)
# Set callback for end call signal.
if self._end_call_signal is not None:
self._captor.register_callback(self._end_call_callback,
signal=self._end_call_signal)
if self._panic_signal is not None:
self._captor.register_callback(self._panic_callback,
signal=self._panic_signal)
if self._mutate_signal is not None:
self._captor.register_callback(self._mutate_callback,
signal=self._mutate_signal)
# Keep track of the end of the previous tick time.
last_tick_time = time.time()
# Keep track of the duration of a listen state.
listen_ticks = 0
# Start with an empty response sequence.
response_sequence = music_pb2.NoteSequence()
response_start_time = 0
response_duration = 0
player = self._midi_hub.start_playback(
response_sequence, allow_updates=True)
# Enter loop at each clock tick.
for captured_sequence in self._captor.iterate(signal=self._clock_signal,
period=self._tick_duration):
if self._stop_signal.is_set():
break
if self._panic.is_set():
response_sequence = music_pb2.NoteSequence()
player.update_sequence(response_sequence)
self._panic.clear()
tick_time = captured_sequence.total_time
# Set to current QPM, since it might have changed.
if not self._clock_signal and self._metronome_channel is not None:
self._midi_hub.start_metronome(
self._qpm, tick_time, channel=self._metronome_channel)
captured_sequence.tempos[0].qpm = self._qpm
tick_duration = tick_time - last_tick_time
if captured_sequence.notes:
last_end_time = max(note.end_time for note in captured_sequence.notes)
else:
last_end_time = 0.0
# True iff there was no input captured during the last tick.
silent_tick = last_end_time <= last_tick_time
if not silent_tick:
listen_ticks += 1
if not captured_sequence.notes:
# Reset captured sequence since we are still idling.
if response_sequence.total_time <= tick_time:
self._update_state(self.State.IDLE)
if self._captor.start_time < tick_time:
self._captor.start_time = tick_time
self._end_call.clear()
listen_ticks = 0
elif (self._end_call.is_set() or
silent_tick or
listen_ticks >= self._max_listen_ticks):
if listen_ticks < self._min_listen_ticks:
tf.logging.info(
'Input too short (%d vs %d). Skipping.',
listen_ticks,
self._min_listen_ticks)
self._captor.start_time = tick_time
else:
# Create response and start playback.
self._update_state(self.State.RESPONDING)
capture_start_time = self._captor.start_time
if silent_tick:
# Move the sequence forward one tick in time.
captured_sequence = adjust_sequence_times(
captured_sequence, tick_duration)
captured_sequence.total_time = tick_time
capture_start_time += tick_duration
# Compute duration of response.
num_ticks = self._midi_hub.control_value(
self._response_ticks_control_number)
if num_ticks:
response_duration = num_ticks * tick_duration
else:
# Use capture duration.
response_duration = tick_time - capture_start_time
response_start_time = tick_time
response_sequence = self._generate(
captured_sequence,
capture_start_time,
response_start_time,
response_start_time + response_duration)
# If it took too long to generate, push response to next tick.
if (time.time() - response_start_time) >= tick_duration / 4:
push_ticks = (
(time.time() - response_start_time) // tick_duration + 1)
response_start_time += push_ticks * tick_duration
response_sequence = adjust_sequence_times(
response_sequence, push_ticks * tick_duration)
tf.logging.warn(
'Response too late. Pushing back %d ticks.', push_ticks)
# Start response playback. Specify the start_time to avoid stripping
# initial events due to generation lag.
player.update_sequence(
response_sequence, start_time=response_start_time)
# Optionally capture during playback.
if self._allow_overlap:
self._captor.start_time = response_start_time
else:
self._captor.start_time = response_start_time + response_duration
# Clear end signal and reset listen_ticks.
self._end_call.clear()
listen_ticks = 0
else:
# Continue listening.
self._update_state(self.State.LISTENING)
# Potentially loop or mutate previous response.
if self._mutate.is_set() and not response_sequence.notes:
self._mutate.clear()
tf.logging.warn('Ignoring mutate request with nothing to mutate.')
if (response_sequence.total_time <= tick_time and
(self._should_loop or self._mutate.is_set())):
if self._mutate.is_set():
new_start_time = response_start_time + response_duration
new_end_time = new_start_time + response_duration
response_sequence = self._generate(
response_sequence,
response_start_time,
new_start_time,
new_end_time)
response_start_time = new_start_time
self._mutate.clear()
response_sequence = adjust_sequence_times(
response_sequence, tick_time - response_start_time)
response_start_time = tick_time
player.update_sequence(
response_sequence, start_time=tick_time)
last_tick_time = tick_time
player.stop()
def stop(self):
self._stop_signal.set()
self._captor.stop()
self._midi_hub.stop_metronome()
super(CallAndResponseMidiInteraction, self).stop()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import distutils
import glob
import heapq
import logging
import os
import os.path
import shutil
import subprocess as subprocess
import sys
import tempfile
import time
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core.backends import browser_backend
from telemetry.core.backends.chrome import chrome_browser_backend
from telemetry.util import support_binaries
class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a locally-executed browser instance, on Linux,
Mac or Windows.
"""
def __init__(self, browser_options, executable, flash_path, is_content_shell,
browser_directory, output_profile_path, extensions_to_load):
super(DesktopBrowserBackend, self).__init__(
is_content_shell=is_content_shell,
supports_extensions=not is_content_shell,
browser_options=browser_options,
output_profile_path=output_profile_path,
extensions_to_load=extensions_to_load)
# Initialize fields so that an explosion during init doesn't break in Close.
self._proc = None
self._tmp_profile_dir = None
self._tmp_output_file = None
self._executable = executable
if not self._executable:
raise Exception('Cannot create browser, no executable found!')
assert not flash_path or os.path.exists(flash_path)
self._flash_path = flash_path
if len(extensions_to_load) > 0 and is_content_shell:
raise browser_backend.ExtensionsNotSupportedException(
'Content shell does not support extensions.')
self._browser_directory = browser_directory
self._port = None
self._profile_dir = None
self._tmp_minidump_dir = tempfile.mkdtemp()
self._crash_service = None
self._SetupProfile()
def _SetupProfile(self):
if not self.browser_options.dont_override_profile:
if self._output_profile_path:
# If both |_output_profile_path| and |profile_dir| are specified then
# the calling code will throw an exception, so we don't need to worry
# about that case here.
self._tmp_profile_dir = self._output_profile_path
else:
self._tmp_profile_dir = tempfile.mkdtemp()
profile_dir = self._profile_dir or self.browser_options.profile_dir
if profile_dir:
if self.is_content_shell:
logging.critical('Profiles cannot be used with content shell')
sys.exit(1)
logging.info("Using profile directory:'%s'." % profile_dir)
shutil.rmtree(self._tmp_profile_dir)
shutil.copytree(profile_dir, self._tmp_profile_dir)
if self.browser_options.use_devtools_active_port:
# No matter whether we're using an existing profile directory or
# creating a new one, always delete the well-known file containing
# the active DevTools port number.
port_file = self._GetDevToolsActivePortPath()
if os.path.isfile(port_file):
try:
os.remove(port_file)
except Exception as e:
logging.critical('Unable to remove DevToolsActivePort file: %s' % e)
sys.exit(1)
def _GetDevToolsActivePortPath(self):
return os.path.join(self.profile_directory, 'DevToolsActivePort')
def _GetCrashServicePipeName(self):
# Ensure a unique pipe name by using the name of the temp dir.
return r'\\.\pipe\%s_service' % os.path.basename(self._tmp_minidump_dir)
def _StartCrashService(self):
os_name = self._browser.platform.GetOSName()
if os_name != 'win':
return None
return subprocess.Popen([
support_binaries.FindPath('crash_service', os_name),
'--no-window',
'--dumps-dir=%s' % self._tmp_minidump_dir,
'--pipe-name=%s' % self._GetCrashServicePipeName()])
def _GetCdbPath(self):
search_paths = [os.getenv('PROGRAMFILES(X86)', ''),
os.getenv('PROGRAMFILES', ''),
os.getenv('LOCALAPPDATA', ''),
os.getenv('PATH', '')]
possible_paths = [
'Debugging Tools For Windows',
'Debugging Tools For Windows (x86)',
'Debugging Tools For Windows (x64)',
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x86'),
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x64'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x86'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x64'),
]
for possible_path in possible_paths:
path = distutils.spawn.find_executable(
os.path.join(possible_path, 'cdb'),
path=os.pathsep.join(search_paths))
if path:
return path
return None
def HasBrowserFinishedLaunching(self):
# In addition to the functional check performed by the base class, quickly
# check if the browser process is still alive.
if not self.IsBrowserRunning():
raise exceptions.ProcessGoneException(
"Return code: %d" % self._proc.returncode)
if self.browser_options.use_devtools_active_port:
# The Telemetry user selected the new code path to start DevTools on
# an ephemeral port. Wait for the well-known file containing the port
# number to exist.
port_file = self._GetDevToolsActivePortPath()
if not os.path.isfile(port_file):
# File isn't ready yet. Return false. Will retry.
return False
# Attempt to avoid reading the file until it's populated.
got_port = False
try:
if os.stat(port_file).st_size > 0:
with open(port_file) as f:
port_string = f.read()
self._port = int(port_string)
logging.info('Discovered ephemeral port %s' % self._port)
got_port = True
except Exception:
# Both stat and open can throw exceptions.
pass
if not got_port:
# File isn't ready yet. Return false. Will retry.
return False
return super(DesktopBrowserBackend, self).HasBrowserFinishedLaunching()
def GetBrowserStartupArgs(self):
args = super(DesktopBrowserBackend, self).GetBrowserStartupArgs()
if self.browser_options.use_devtools_active_port:
self._port = 0
else:
self._port = util.GetUnreservedAvailableLocalPort()
logging.info('Requested remote debugging port: %d' % self._port)
args.append('--remote-debugging-port=%i' % self._port)
args.append('--enable-crash-reporter-for-testing')
args.append('--use-mock-keychain')
if not self.is_content_shell:
args.append('--window-size=1280,1024')
if self._flash_path:
args.append('--ppapi-flash-path=%s' % self._flash_path)
if not self.browser_options.dont_override_profile:
args.append('--user-data-dir=%s' % self._tmp_profile_dir)
return args
def SetProfileDirectory(self, profile_dir):
# Make sure _profile_dir hasn't already been set.
assert self._profile_dir is None
if self.is_content_shell:
logging.critical('Profile creation cannot be used with content shell')
sys.exit(1)
self._profile_dir = profile_dir
def Start(self):
assert not self._proc, 'Must call Close() before Start()'
args = [self._executable]
args.extend(self.GetBrowserStartupArgs())
if self.browser_options.startup_url:
args.append(self.browser_options.startup_url)
env = os.environ.copy()
env['CHROME_HEADLESS'] = '1' # Don't upload minidumps.
env['BREAKPAD_DUMP_LOCATION'] = self._tmp_minidump_dir
env['CHROME_BREAKPAD_PIPE_NAME'] = self._GetCrashServicePipeName()
self._crash_service = self._StartCrashService()
logging.debug('Starting Chrome %s', args)
if not self.browser_options.show_stdout:
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
args, stdout=self._tmp_output_file, stderr=subprocess.STDOUT, env=env)
else:
self._proc = subprocess.Popen(args, env=env)
try:
self._WaitForBrowserToComeUp()
self._PostBrowserStartupInitialization()
except:
self.Close()
raise
@property
def pid(self):
if self._proc:
return self._proc.pid
return None
@property
def browser_directory(self):
return self._browser_directory
@property
def profile_directory(self):
return self._tmp_profile_dir
def IsBrowserRunning(self):
return self._proc and self._proc.poll() == None
def GetStandardOutput(self):
if not self._tmp_output_file:
if self.browser_options.show_stdout:
# This can happen in the case that loading the Chrome binary fails.
# We print rather than using logging here, because that makes a
# recursive call to this function.
print >> sys.stderr, "Can't get standard output with --show-stdout"
return ''
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
def _GetMostRecentMinidump(self):
dumps = glob.glob(os.path.join(self._tmp_minidump_dir, '*.dmp'))
if not dumps:
return None
most_recent_dump = heapq.nlargest(1, dumps, os.path.getmtime)[0]
if os.path.getmtime(most_recent_dump) < (time.time() - (5 * 60)):
logging.warning('Crash dump is older than 5 minutes. May not be correct.')
return most_recent_dump
def _GetStackFromMinidump(self, minidump):
os_name = self._browser.platform.GetOSName()
if os_name == 'win':
cdb = self._GetCdbPath()
if not cdb:
logging.warning('cdb.exe not found.')
return None
output = subprocess.check_output([cdb, '-y', self._browser_directory,
'-c', '.ecxr;k30;q', '-z', minidump])
stack_start = output.find('ChildEBP')
stack_end = output.find('quit:')
return output[stack_start:stack_end]
stackwalk = support_binaries.FindPath('minidump_stackwalk', os_name)
if not stackwalk:
logging.warning('minidump_stackwalk binary not found.')
return None
symbols = glob.glob(os.path.join(self._browser_directory, '*.breakpad*'))
if not symbols:
logging.warning('No breakpad symbols found.')
return None
with open(minidump, 'rb') as infile:
minidump += '.stripped'
with open(minidump, 'wb') as outfile:
outfile.write(''.join(infile.read().partition('MDMP')[1:]))
symbols_path = os.path.join(self._tmp_minidump_dir, 'symbols')
for symbol in sorted(symbols, key=os.path.getmtime, reverse=True):
if not os.path.isfile(symbol):
continue
with open(symbol, 'r') as f:
fields = f.readline().split()
if not fields:
continue
sha = fields[3]
binary = ' '.join(fields[4:])
symbol_path = os.path.join(symbols_path, binary, sha)
if os.path.exists(symbol_path):
continue
os.makedirs(symbol_path)
shutil.copyfile(symbol, os.path.join(symbol_path, binary + '.sym'))
return subprocess.check_output([stackwalk, minidump, symbols_path],
stderr=open(os.devnull, 'w'))
def GetStackTrace(self):
most_recent_dump = self._GetMostRecentMinidump()
if not most_recent_dump:
logging.warning('No crash dump found. Returning browser stdout.')
return self.GetStandardOutput()
stack = self._GetStackFromMinidump(most_recent_dump)
if not stack:
logging.warning('Failed to symbolize minidump. Returning browser stdout.')
return self.GetStandardOutput()
return stack
def __del__(self):
self.Close()
def Close(self):
super(DesktopBrowserBackend, self).Close()
# First, try to politely shutdown.
if self.IsBrowserRunning():
self._proc.terminate()
try:
util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
self._proc = None
except util.TimeoutException:
logging.warning('Failed to gracefully shutdown. Proceeding to kill.')
# If it didn't comply, get more aggressive.
if self.IsBrowserRunning():
self._proc.kill()
try:
util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=10)
except util.TimeoutException:
raise Exception('Could not shutdown the browser.')
finally:
self._proc = None
if self._crash_service:
self._crash_service.kill()
self._crash_service = None
if self._output_profile_path:
# If we need the output then double check that it exists.
if not (self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir)):
raise Exception("No profile directory generated by Chrome: '%s'." %
self._tmp_profile_dir)
else:
# If we don't need the profile after the run then cleanup.
if self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir):
shutil.rmtree(self._tmp_profile_dir, ignore_errors=True)
self._tmp_profile_dir = None
if self._tmp_output_file:
self._tmp_output_file.close()
self._tmp_output_file = None
|
|
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy
from devilry.devilry_account.exceptions import IllegalOperationError
class UserQuerySet(models.QuerySet):
def prefetch_related_notification_emails(self):
"""
Use this if need to get efficient access to the primary :class:`.UserEmail`.
This will add the ``notification_useremail_objects`` attribute to each returned
:class:`.User`. ``notification_useremail_objects`` is a list that you can use
if you need access to the :class:`.UserEmail` objects.
Use :meth:`.User.notification_emails` to access the emails as a list of
strings.
"""
return self.prefetch_related(
models.Prefetch('useremail_set',
queryset=UserEmail.objects.filter(use_for_notifications=True),
to_attr='notification_useremail_objects'))
def prefetch_related_primary_email(self):
"""
Use this if need to get efficient access to the primary :class:`.UserEmail`.
This will add the ``primary_useremail_objects`` attribute to each returned
:class:`.User`. ``primary_useremail_objects`` is a list, and you should not
use it directly. Use :meth:`.User.primary_useremail_object` or
:meth:`.User.primary_email` to access the primary email.
"""
return self.prefetch_related(
models.Prefetch('useremail_set',
queryset=UserEmail.objects.filter(is_primary=True),
to_attr='primary_useremail_objects'))
def prefetch_related_primary_username(self):
"""
Use this if need to get efficient access to the primary :class:`.UserName`.
This will add the ``primary_username_objects`` attribute to each returned
:class:`.User`. ``primary_username_objects`` is a list, and you should not
use it directly. Use :meth:`.User.primary_username_object` or
:meth:`.User.primary_username` to access the primary username.
"""
return self.prefetch_related(
models.Prefetch('username_set',
queryset=UserName.objects.filter(is_primary=True),
to_attr='primary_username_objects'))
def filter_by_emails(self, emails):
"""
Filter the queryset to only include users with email address
in the ``emails`` iterable.
"""
return self.filter(useremail__email__in=emails).distinct()
def filter_by_usernames(self, usernames):
"""
Filter the queryset to only include users with username
in the ``usernames`` iterable.
"""
return self.filter(username__username__in=usernames).distinct()
class UserManager(BaseUserManager):
"""
Manager for :class:`User`.
"""
use_for_related_fields = True
def get_queryset(self):
return UserQuerySet(self.model, using=self._db)
#
#
# From the QuerySet
#
#
def prefetch_related_notification_emails(self):
"""
See :meth:`.UserQuerySet.prefetch_related_notification_emails`.
"""
return self.get_queryset().prefetch_related_notification_emails()
def prefetch_related_primary_email(self):
"""
See :meth:`.UserQuerySet.prefetch_related_primary_email`.
"""
return self.get_queryset().prefetch_related_primary_email()
def prefetch_related_primary_username(self):
"""
See :meth:`.UserQuerySet.prefetch_related_primary_username`.
"""
return self.get_queryset().prefetch_related_primary_username()
def filter_by_emails(self, emails):
"""
See :meth:`.UserQuerySet.filter_by_emails`.
"""
return self.get_queryset().filter_by_emails(emails)
def filter_by_usernames(self, usernames):
"""
See :meth:`.UserQuerySet.filter_by_usernames`.
"""
return self.get_queryset().filter_by_usernames(usernames)
#
#
# Manager-specific methods
#
#
def user_is_basenodeadmin(self, user, *basenode_modelsclasses):
"""
Check if the given user is admin on any of the given
``basenode_modelsclasses``.
:param basenode_modelsclasses:
Model classes. They must have an ``admins`` one-to-many relationship
with User.
"""
for cls in basenode_modelsclasses:
if cls.objects.filter(admins__id=user.id).exists():
return True
return False
def user_is_admin(self, user):
"""
Check if the given user is admin on any subject or period.
"""
return PermissionGroupUser.objects.filter(user=user).exists()
def user_is_admin_or_superuser(self, user):
"""
Return ``True`` if ``user.is_superuser``, and fall back to calling
:func:`.user_is_admin` if not.
"""
if user.is_superuser:
return True
else:
return self.user_is_admin(user)
def user_is_examiner(self, user):
"""
Returns ``True`` if the given ``user`` is examiner on any AssignmentGroup.
"""
from devilry.apps.core.models.assignment_group import AssignmentGroup
return AssignmentGroup.objects.filter_examiner_has_access(user).exists()
def user_is_student(self, user):
"""
Returns ``True`` if the given ``user`` is candidate on any AssignmentGroup.
"""
from devilry.apps.core.models.assignment_group import AssignmentGroup
return AssignmentGroup.objects.filter_student_has_access(user).exists()
def create_user(self, username='', email='', password=None, **kwargs):
"""
Create a new user.
Requires ``username`` or ``email``, and both can be supplied.
If ``username`` is supplied, we create a UserName object with ``is_primary=True``,
and if ``email`` is supplied, we create a UserEmail object with
``use_for_notifications=True``.
If ``password`` is supplied, we set the password, otherwise we
set an unusable password.
Other than that, you can provide any :class:`.User` fields except
``shortname``. ``shortname`` is created from username or email (in that order).
"""
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND and username:
raise IllegalOperationError('Can not create user with username when the '
'CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND-setting is True')
shortname = username or email
user = self.model(shortname=shortname, **kwargs)
if password:
user.set_password(password)
else:
user.set_unusable_password()
user.full_clean()
user.save(using=self._db)
if username:
user.username_set.create(username=username, is_primary=True)
if username and not email:
email_username_suffix = getattr(settings, 'DEVILRY_DEFAULT_EMAIL_USERNAME_SUFFIX', None)
if email_username_suffix:
if '@' not in email_username_suffix:
email_username_suffix = '@{}'.format(email_username_suffix)
email = '{}{}'.format(username, email_username_suffix)
if email:
user.useremail_set.create(email=email, is_primary=True,
use_for_notifications=True)
return user
def get_user(self, username='', email=''):
if not username and not email:
raise ValueError('username or email must be supplied')
if username:
return self.get_by_username(username=username)
else:
return self.get_by_email(email=email)
def get_or_create_user(self, username='', email='', password=None, **kwargs):
if not username and not email:
raise ValueError('username or email must be supplied')
try:
user = self.get_user(username=username, email=email)
except self.model.DoesNotExist:
user = None
if user:
return user, False
return self.create_user(username=username, email=email, password=password, **kwargs), True
def create_superuser(self, password=None, **kwargs):
"""
Create a new superuser.
"""
email = kwargs.pop(User.USERNAME_FIELD)
user = self.create_user(email=email, password=password, is_superuser=True, **kwargs)
return user
def get_by_email(self, email):
"""
Get a user by any of their emails.
Raises:
User.DoesNotExist: If no :class:`.UserEmail` with the given email is found.
Returns:
User: The user object.
"""
return self.get_queryset().filter(useremail__email=email).get()
def get_by_username(self, username):
"""
Get a user by any of their username.
Raises:
User.DoesNotExist: If no :class:`.UserName` with the given username is found.
Returns:
User: The user object.
"""
return self.get_queryset()\
.filter(
models.Q(username__username=username) |
models.Q(shortname=username))\
.get()
def __create_primary_useremail_objects_from_users(self, users):
"""
Create :class:`.UserEmail` objects for the given iterable of
:class:`.User` objects.
Uses the ``shortname`` as email, and the UserEmail objects
is all marked as primary emails, and as notification
emails.
"""
new_useremail_objects = []
for user in users:
new_username_object = UserEmail(user=user,
email=user.shortname,
is_primary=True,
use_for_notifications=True)
new_useremail_objects.append(new_username_object)
UserEmail.objects.bulk_create(new_useremail_objects)
def bulk_create_from_emails(self, emails):
"""
Bulk create users for all the emails
in the given ``emails`` iterator.
All users is created with unusable password.
We create a :class:`.UserEmail` object for each of the created
users. This UserEmail object has ``is_primary`` set to ``True``.
Raises:
devilry_account.exceptions.IllegalOperationError: If the
``CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND``-setting is ``False``.
Returns:
A ``(created_users, excluded_emails)``-tuple.
``created_users`` is a queryset with the created users.
``excluded_emails`` is a set of the emails that already existed.
"""
if not settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
raise IllegalOperationError('You can not use bulk_create_from_emails() when '
'CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND is False.')
existing_emails = set(UserEmail.objects.filter(email__in=emails).values_list('email', flat=True))
existing_shortnames = set(User.objects.filter(shortname__in=emails).values_list('shortname', flat=True))
existing_emails = existing_emails.union(existing_shortnames)
all_emails_set = set(emails)
new_emails_set = all_emails_set.difference(existing_emails)
new_user_objects = []
for email in new_emails_set:
new_user = User(shortname=email)
new_user.set_unusable_password()
new_user_objects.append(new_user)
User.objects.bulk_create(new_user_objects)
created_users = User.objects.filter(shortname__in=new_emails_set)
self.__create_primary_useremail_objects_from_users(created_users)
return created_users, existing_emails
def __create_primary_username_objects_from_users(self, users):
"""
Create :class:`.UserName` objects for the given iterable of
:class:`.User` objects.
Uses the ``shortname`` as username, and the UserName objects
is all marked as primary usernames.
"""
new_username_objects = []
for user in users:
new_username_object = UserName(username=user.shortname, is_primary=True, user=user)
new_username_objects.append(new_username_object)
UserName.objects.bulk_create(new_username_objects)
def __create_primary_useremail_objects_from_users_via_suffix(self, users):
"""
Create :class:`.UserEmail` objects for the given iterable of
:class:`.User` objects.
Uses the ``shortname@<settings.DEVILRY_DEFAULT_EMAIL_SUFFIX>`` as email,
and the UserEmail objects is all marked as primary emails, and as notification
emails.
"""
new_useremail_objects = []
for user in users:
new_username_object = UserEmail(
user=user,
email='{}{}'.format(user.shortname, settings.DEVILRY_DEFAULT_EMAIL_SUFFIX),
is_primary=True,
use_for_notifications=True)
new_useremail_objects.append(new_username_object)
UserEmail.objects.bulk_create(new_useremail_objects)
def bulk_create_from_usernames(self, usernames):
"""
Bulk create users for all the usernames
in the given ``usernames`` iterator.
All users is created with unusable password.
We create a :class:`.UserName` object for each of the created
users. This UserName object has ``is_primary`` set to ``True``.
Raises:
devilry_account.exceptions.IllegalOperationError: If the
``CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND``-setting is ``True``.
Returns:
A ``(created_users, excluded_usernames)``-tuple.
``created_users`` is a queryset with the created users.
``excluded_usernames`` is a set of the usernames that already existed.
"""
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
raise IllegalOperationError('You can not use bulk_create_from_usernames() when '
'CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND is True.')
existing_usernames = set(UserName.objects.filter(username__in=usernames).values_list('username', flat=True))
existing_shortnames = set(User.objects.filter(shortname__in=usernames).values_list('shortname', flat=True))
existing_usernames = existing_usernames.union(existing_shortnames)
all_usernames_set = set(usernames)
new_usernames_set = all_usernames_set.difference(existing_usernames)
new_user_objects = []
for username in new_usernames_set:
new_user = User(shortname=username)
new_user.set_unusable_password()
new_user_objects.append(new_user)
User.objects.bulk_create(new_user_objects)
created_users = User.objects.filter(shortname__in=new_usernames_set)
self.__create_primary_username_objects_from_users(created_users)
if settings.DEVILRY_DEFAULT_EMAIL_SUFFIX:
self.__create_primary_useremail_objects_from_users_via_suffix(created_users)
return created_users, existing_usernames
class User(AbstractBaseUser):
"""
User model for Devilry.
"""
objects = UserManager()
#: Is this user a superuser?
is_superuser = models.BooleanField(
verbose_name=gettext_lazy('superuser status'),
default=False,
help_text=gettext_lazy('Designates that this user has all permissions without '
'explicitly assigning them.'))
#: Short name for the user.
#: This will be set to the primary email address or to the primary username
#: depending on the auth backend.
#: Must be unique.
shortname = models.CharField(
max_length=255,
blank=False, null=False,
editable=True,
unique=True,
help_text=gettext_lazy('The short name for the user. This is set automatically to the '
'email or username depending on the method used for authentication.')
)
#: Full name of the user. Optional.
fullname = models.TextField(
verbose_name=gettext_lazy('Full name'),
blank=True, default="", null=False)
#: The last name of the user. Optional.
#: Used to sort by last name.
lastname = models.TextField(
verbose_name=gettext_lazy('Last name'),
blank=True, default="", null=False)
#: The datetime the user was created.
datetime_joined = models.DateTimeField(
verbose_name=gettext_lazy('date joined'),
default=timezone.now)
#: Datetime when this account was suspended.
suspended_datetime = models.DateTimeField(
null=True, blank=True,
verbose_name=gettext_lazy('Suspension time'),
help_text=gettext_lazy('Time when the account was suspended'))
#: Reason why the account is suspended.
suspended_reason = models.TextField(
blank=True, default='',
verbose_name=gettext_lazy('Reason for suspension'))
#: The language code for the preferred language for the user.
languagecode = models.CharField(
max_length=10, blank=True, null=False,
default='',
verbose_name=gettext_lazy('Preferred language')
)
USERNAME_FIELD = 'shortname'
REQUIRED_FIELDS = []
class Meta:
verbose_name = gettext_lazy('User')
verbose_name_plural = gettext_lazy('Users')
@property
def is_staff(self):
return self.is_superuser
@property
def is_active(self):
return self.suspended_datetime is None
def get_full_name(self):
"""
Get the :obj:`~.User.fullname`, falling back to :obj:`~.User.shortname`
if fullname is not set.
"""
return self.fullname or self.shortname
def get_short_name(self):
"""
Get the short name for the user.
"""
return self.shortname
def get_displayname(self):
"""
Get as much of the name as possible. If we have only
shortname, return that, if we have both shortname and
fullname, return ``<fullname> (<shortname>)``.
"""
if self.fullname:
return '{} ({})'.format(self.fullname, self.shortname)
else:
return self.shortname
def get_initials(self):
"""
Get the initials of the users name.
"""
if self.fullname:
return ''.join(word[0].upper() for word in self.fullname.split())
else:
return self.shortname[0].upper()
def has_module_perms(self, *args, **kwargs):
return self.is_superuser
def has_perm(self, *args, **kwargs):
return self.is_superuser
def get_all_permissions(self, *args, **kwargs):
return set()
def get_group_permissions(self, *args, **kwargs):
return set()
def get_user_permissions(self, *args, **kwargs):
return set()
def __str__(self):
return self.shortname
def clean(self):
if self.suspended_datetime is None and self.suspended_reason != '':
raise ValidationError(gettext_lazy('Can not provide a reason for suspension when suspension time is blank.'))
if not self.shortname:
raise ValidationError(gettext_lazy('Short name is required.'))
if self.fullname:
self.lastname = self.fullname.split()[-1]
@property
def notification_emails(self):
"""
Get the notification emails as a list of strings.
Only works if you have used :meth:`.UserQuerySet.prefetch_related_notification_emails`
on the queryset.
"""
return [useremail.email for useremail in self.notification_useremail_objects]
@property
def primary_useremail_object(self):
"""
Get the primary email address of the user as a :class:`.UserEmail` object.
Only works if you have used :meth:`.UserQuerySet.prefetch_related_primary_email`
on the queryset.
Returns ``None`` if we have no primary email.
"""
try:
return self.primary_useremail_objects[0]
except IndexError:
return None
@property
def primary_email(self):
"""
Get the primary email address of the user as a string.
Only works if you have used :meth:`.UserQuerySet.prefetch_related_primary_email`
on the queryset.
Returns ``None`` if we have no primary email.
"""
primary_useremail_object = self.primary_useremail_object
if primary_useremail_object:
return primary_useremail_object.email
else:
return None
@property
def primary_username_object(self):
"""
Get the primary username of the user as a :class:`.UserName` object.
Only works if you have used :meth:`.UserQuerySet.prefetch_related_primary_username`
on the queryset.
Returns ``None`` if we have no primary username.
"""
try:
return self.primary_username_objects[0]
except IndexError:
return None
@property
def primary_username(self):
"""
Get the primary username of the user as a string.
Only works if you have used :meth:`.UserQuerySet.prefetch_related_primary_username`
on the queryset.
Returns ``None`` if we have no primary username.
"""
primary_username_object = self.primary_username_object
if primary_username_object:
return primary_username_object.username
else:
return None
class AbstractUserIdentity(models.Model):
"""
Base class for :class:`.UserEmail` and :class:`.UserName`.
"""
class Meta:
abstract = True
#: Foreign key to the user owning this email address.
user = models.ForeignKey(User, on_delete=models.CASCADE)
#: The datetime when this was created.
created_datetime = models.DateTimeField(
default=timezone.now,
editable=False,
null=False, blank=False)
#: The datetime when this was last updated.
last_updated_datetime = models.DateTimeField(
default=timezone.now,
editable=False,
null=False, blank=False)
def clean(self):
self.last_updated_datetime = timezone.now()
class UserEmail(AbstractUserIdentity):
"""
Stores a single email address for a :class:`.User`.
"""
class Meta:
verbose_name = gettext_lazy('Email address')
verbose_name_plural = gettext_lazy('Email addresses')
unique_together = [
('user', 'is_primary')
]
#: The email address of the user.
#: Must be unique.
email = models.EmailField(
verbose_name=gettext_lazy('Email'),
unique=True,
max_length=255)
#: Is this a notification email for the user?
#: A user can have multiple notification emails.
use_for_notifications = models.BooleanField(
default=True,
verbose_name=gettext_lazy('Send notifications to this email address?'))
#: Is this the primary email for the user?
#: Valid values are: ``None`` and ``True``, and only
#: one UserEmail per user can have ``is_primary=True``.
is_primary = models.BooleanField(
null=True, blank=True,
verbose_name=gettext_lazy('Is this your primary email?'),
choices=[
(None, gettext_lazy('No')),
(True, gettext_lazy('Yes'))
],
help_text=gettext_lazy('Your primary email is the email address used when we '
'need to display a single email address.')
)
def clean(self):
if self.is_primary is False:
raise ValidationError('is_primary can not be False. Valid values are: True, None.')
if self.is_primary:
other_useremails = UserEmail.objects.filter(user=self.user)
if self.id is not None:
other_useremails = other_useremails.exclude(id=self.id)
other_useremails.update(is_primary=None)
# if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
# user = self.user
# if user.shortname != self.email:
# user.shortname = self.email
# user.save()
def __str__(self):
return gettext_lazy('%(email)s - User%(userid)s#%(userhortname)s') % {
'email': self.email,
'userid': self.user_id,
'userhortname': self.user.shortname
}
class UserName(AbstractUserIdentity):
"""
Stores a single username for a :class:`.User`.
The username is used for login, and the primary username
is synced into :obj:`.User.shortname`.
"""
class Meta:
verbose_name = gettext_lazy('Username')
verbose_name_plural = gettext_lazy('Usernames')
unique_together = [
('user', 'is_primary')
]
#: The username of the user.
#: Must be unique.
username = models.CharField(
verbose_name=gettext_lazy('Username'),
unique=True,
max_length=255)
#: Is this the primary username for the user?
#: Valid values are: ``None`` and ``True``, and only
#: one UserName per user can have ``is_primary=True``.
is_primary = models.BooleanField(
null=True, blank=True,
verbose_name=gettext_lazy('Is this your primary username?'),
choices=[
(None, gettext_lazy('No')),
(True, gettext_lazy('Yes'))
],
help_text=gettext_lazy('Your primary username is shown alongside your full '
'name to identify you to teachers, examiners and '
'other students.')
)
def clean(self):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
raise ValidationError('Can not create UserName objects when the '
'CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND is True.')
if self.is_primary is False:
raise ValidationError('is_primary can not be False. Valid values are: True, None.')
if self.is_primary:
other_usernames = UserName.objects.filter(user=self.user)
if self.id is not None:
other_usernames = other_usernames.exclude(id=self.id)
other_usernames.update(is_primary=None)
# user = self.user
# if user.shortname != self.username:
# user.shortname = self.username
# user.save()
def __str__(self):
return gettext_lazy('%(username)s - User%(userid)s') % {
'username': self.username,
'userid': self.user_id
}
class PermissionGroupUser(models.Model):
"""
Defines the many-to-many relationship between :class:`.User`
and :class:`.PermissionGroup`.
"""
class Meta:
verbose_name = gettext_lazy('Permission group user')
verbose_name_plural = gettext_lazy('Permission group users')
unique_together = (
('permissiongroup', 'user'),
)
#: The group.
permissiongroup = models.ForeignKey('devilry_account.PermissionGroup', on_delete=models.CASCADE)
#: The user.
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return gettext_lazy('%(user)s in group %(permissiongroup)s') % {
'user': self.user.shortname,
'permissiongroup': self.permissiongroup.name,
}
class PermissionGroupQuerySet(models.QuerySet):
def get_name_prefix_from_syncsystem(self, grouptype, basenode):
return '{prefix}-{grouptype}-#{id}-'.format(
prefix=settings.DEVILRY_SYNCSYSTEM_SHORTNAME,
grouptype=grouptype,
id=basenode.id)
def get_name_from_syncsystem(self, grouptype, basenode):
return '{}({})'.format(
self.get_name_prefix_from_syncsystem(grouptype=grouptype,
basenode=basenode),
basenode.get_path())
def create_permissiongroup(self, grouptype, basenode,
name, is_custom_manageable=False):
permissiongroup = self.model(
name=name,
grouptype=grouptype,
is_custom_manageable=is_custom_manageable)
permissiongroup.full_clean()
permissiongroup.save()
if grouptype == PermissionGroup.GROUPTYPE_PERIODADMIN:
period_permissiongroup = PeriodPermissionGroup(
period=basenode,
permissiongroup=permissiongroup)
period_permissiongroup.full_clean()
period_permissiongroup.save()
else:
subject_permissiongroup = SubjectPermissionGroup(
subject=basenode,
permissiongroup=permissiongroup)
subject_permissiongroup.full_clean()
subject_permissiongroup.save()
return permissiongroup
def get_syncsystem_permissiongroup(self, grouptype, basenode):
name_prefix = self.get_name_prefix_from_syncsystem(
grouptype=grouptype,
basenode=basenode)
return PermissionGroup.objects \
.filter(name__startswith=name_prefix,
grouptype=grouptype) \
.get()
def create_or_update_syncsystem_permissiongroup(
self, grouptype, basenode):
try:
permissiongroup = self.get_syncsystem_permissiongroup(
grouptype=grouptype,
basenode=basenode)
except PermissionGroup.DoesNotExist:
name = self.get_name_from_syncsystem(
grouptype=grouptype,
basenode=basenode)
permissiongroup = self.create_permissiongroup(
basenode=basenode,
grouptype=grouptype,
name=name,
is_custom_manageable=False)
return permissiongroup, True
else:
return permissiongroup, False
class PermissionGroup(models.Model):
"""
Permission group data model.
Each group has a :obj:`~.PermissionGroup.grouptype` which determines
the type of objects it can be added to.
"""
objects = PermissionGroupQuerySet.as_manager()
#: The value for :obj:`~.PermissionGroup.grouptype` that identifies the group as
#: a departmentadmin permission group.
GROUPTYPE_DEPARTMENTADMIN = 'departmentadmin'
#: The value for :obj:`~.PermissionGroup.grouptype` that identifies the group as
#: a subjectadmin permission group.
GROUPTYPE_SUBJECTADMIN = 'subjectadmin'
#: The value for :obj:`~.PermissionGroup.grouptype` that identifies the group as
#: a periodadmin permission group.
GROUPTYPE_PERIODADMIN = 'periodadmin'
#: Choices for :obj:`~.PermissionGroup.grouptype`.
GROUPTYPE_CHOICES = (
(GROUPTYPE_DEPARTMENTADMIN, gettext_lazy('Department administrator group')),
(GROUPTYPE_SUBJECTADMIN, gettext_lazy('Course administrator group')),
(GROUPTYPE_PERIODADMIN, gettext_lazy('Semester administrator group')),
)
class Meta:
verbose_name = gettext_lazy('Permission group')
verbose_name_plural = gettext_lazy('Permission group')
#: The name of the group. Must be unique.
name = models.CharField(
max_length=255,
null=False, blank=False, unique=True,
verbose_name=gettext_lazy('Name'),
help_text=gettext_lazy('A unique name for this group.'))
#: The time this group was created.
created_datetime = models.DateTimeField(
null=False, auto_now_add=True,
editable=False,
verbose_name=gettext_lazy('Created time'),
help_text=gettext_lazy('The time when this group was created.'))
#: Last time this group was updated.
updated_datetime = models.DateTimeField(
null=False, auto_now=True,
editable=False,
verbose_name=gettext_lazy('Last updated time'),
help_text=gettext_lazy('The time when this group last updated.'))
#: Last time this group was updated from a third party sync system.
syncsystem_update_datetime = models.DateTimeField(
null=True,
editable=False,
verbose_name=gettext_lazy('Last updated from syncsystem time'),
help_text=gettext_lazy('The time when this group last updated from a third party sync system.'))
#: The grouptype determines what kind of object a group can be added to:
#:
#: - ``subjectadmin``: Can be assigned to **a single** :class:`devilry.apps.core.models.Subject`.
#: - ``periodadmin``: Can be assigned to **a single** :class:`devilry.apps.core.models.Period`.
#: - ``departmentadmin``: Can be assigned to multiple :class:`devilry.apps.core.models.Subject`.
grouptype = models.CharField(
max_length=30,
choices=GROUPTYPE_CHOICES,
null=False, blank=False,
verbose_name=gettext_lazy('Permission group type'),
help_text=gettext_lazy('Course and semester administrator groups can only be assigned to a single '
'course or semester. Department administrator groups can be assigned to multiple '
'courses. You can not change this for existing permission groups.')
)
#: Is this group manageable by normal admins?
#:
#: Only superusers can edit the group
#: if this is ``False``. Use cases for setting this to ``False``:
#:
#: - Superusers want to create groups that they have full control over.
#: - Groups imported from a third party sync system.
#:
#: If grouptype is ``departmentadmin``, this can not be ``True``.
is_custom_manageable = models.BooleanField(
default=False,
verbose_name=gettext_lazy('Custom manageable?'),
help_text=gettext_lazy('Is this group mageable by non-superusers. Can not be enabled for '
'department administrator groups.')
)
#: Users belonging to this group.
users = models.ManyToManyField(
through=PermissionGroupUser,
to=User,
verbose_name=gettext_lazy('Users in this group'),
blank=True)
def __str__(self):
return '{name} ({grouptype})'.format(
name=self.name,
grouptype=self.get_grouptype_display()
)
def clean(self):
if self.grouptype == self.GROUPTYPE_DEPARTMENTADMIN and self.is_custom_manageable:
raise ValidationError(gettext_lazy('Department administrator groups can not be '
'custom manageable.'))
if self.id is not None:
currently_stored_group = PermissionGroup.objects.get(id=self.id)
if currently_stored_group.grouptype != self.grouptype:
raise ValidationError(gettext_lazy('Permission group type can not be changed.'))
class PeriodPermissionGroupQuerySet(models.QuerySet):
"""
QuerySet for :class:`.PeriodPermission`.
"""
def user_is_periodadmin_for_period(self, user, period):
"""
Find out if the given ``user`` is ``"periodadmin"`` on the given ``period``.
You normally do not use this directly, but use :meth:`.get_devilryrole_for_user_on_period`.
Args:
period: A :class:`devilry.apps.core.models.Subject` object.
user: A User object.
Returns:
bool: ``True`` if the given user is periodadmin on the given period,
otherwise it returns ``False``.
"""
return PeriodPermissionGroup.objects \
.filter(permissiongroup__users=user,
period=period)\
.exists()
def get_devilryrole_for_user_on_period(self, user, period):
"""
Get the devilryrole for the given ``user`` on the given ``period``.
.. seealso:: If you need to find the same information for a subject,
use :meth:`.SubjectPermissionGroupQuerySet.get_devilryrole_for_user_on_subject`.
Args:
period: A :class:`devilry.apps.core.models.Subject` object.
user: A User object.
Returns:
str: One of the following looked up in the listed order:
- ``"departmentadmin"``: If the user is superuser or in a
:class:`.SubjectPermissionGroup` with :obj:`.PermissionGroup.GROUPTYPE_DEPARTMENTADMIN`
for the subject owning the period.
- ``"subjectadmin"``: If the user is in a
:class:`.SubjectPermissionGroup` with :obj:`.PermissionGroup.GROUPTYPE_SUBJECTADMIN`
for the subject owning the period.
- ``"periodadmin"``: If the user is in a :class:`.PeriodPermissionGroup` for the period.
- ``None``: If no of the conditions listed above is met.
"""
devilryrole = SubjectPermissionGroup.objects.get_devilryrole_for_user_on_subject(
user=user, subject=period.subject)
if devilryrole:
return devilryrole
elif self.user_is_periodadmin_for_period(user=user, period=period):
return 'periodadmin'
else:
return None
def get_custom_managable_periodpermissiongroup_for_period(self, period):
"""
Get the PeriodPermissionGroup where :obj:`.PermissionGroup.is_custom_manageable` is ``True``
for the given ``period``.
Each :class:`devilry.apps.core.models.Period` has exactly one custom managable
:class:`.PermissionGroup`, which is the permission group where admins added
by non-superusers via the admin UI are added.
.. note:: The queryset used does ``select_related('permissiongroup')``, so looking
up permissiongroup for the returned PeriodPermissionGroup does not require
an extra database query.
Args:
period: A :class:`devilry.apps.core.models.Period` object.
Raises:
PeriodPermissionGroup.DoesNotExist: If no custom managable PermissionGroup exists for
the given period.
"""
return self.filter(period=period, permissiongroup__is_custom_manageable=True)\
.select_related('permissiongroup')\
.get()
class PeriodPermissionGroup(models.Model):
"""
Defines the many-to-many relationship between
:class:`devilry.apps.core.Period` and :class:`.PermissionGroup`.
"""
objects = PeriodPermissionGroupQuerySet.as_manager()
class Meta:
verbose_name = gettext_lazy('Semester permission group')
verbose_name_plural = gettext_lazy('Semester permission groups')
unique_together = (
('permissiongroup', 'period'),
)
#: The group.
permissiongroup = models.ForeignKey('devilry_account.PermissionGroup', on_delete=models.CASCADE)
#: The :class:`devilry.apps.core.Period`.
period = models.ForeignKey('core.Period', on_delete=models.CASCADE)
def __str__(self):
if self.permissiongroup.is_custom_manageable:
return gettext_lazy('Semester administrators for %(period)s') % {
'period': self.period.get_path(),
}
else:
return self.permissiongroup.name
def clean(self):
if self.permissiongroup.grouptype != PermissionGroup.GROUPTYPE_PERIODADMIN:
raise ValidationError(gettext_lazy(
'Only semesters can be added to semester administrator permission groups.'))
if self.permissiongroup.is_custom_manageable:
queryset = PeriodPermissionGroup.objects \
.filter(permissiongroup=self.permissiongroup)
if self.id is not None:
queryset = queryset.exclude(id=self.id)
if queryset.exists():
raise ValidationError(gettext_lazy('Only a single editable permission group '
'is allowed for a semester.'))
class SubjectPermissionGroupQuerySet(models.QuerySet):
"""
QuerySet for :class:`.SubjectPermission`.
"""
def __user_is_admin_on_subjectpermissiongroup(self, user, subject, grouptype):
return SubjectPermissionGroup.objects \
.filter(permissiongroup__users=user,
permissiongroup__grouptype=grouptype,
subject=subject)\
.exists()
def user_is_departmentadmin_for_subject(self, user, subject):
"""
Find out if the given ``user`` is ``"departmentadmin"`` on the given ``subject``.
You normally do not use this directly, but use :meth:`.get_devilryrole_for_user_on_subject`.
Args:
subject: A :class:`devilry.apps.core.models.Subject` object.
user: A User object.
Returns:
bool: ``True`` if the given user is departmentadmin on the given subject,
otherwise it returns ``False``.
"""
return self.__user_is_admin_on_subjectpermissiongroup(
user=user,
subject=subject,
grouptype=PermissionGroup.GROUPTYPE_DEPARTMENTADMIN
)
def user_is_subjectadmin_for_subject(self, user, subject):
"""
Find out if the given ``user`` is ``"subjectadmin"`` on the given ``subject``.
This does not take higher permissions into consideration, so you
should check if the user is departmentadmin with :meth:`.user_is_departmentadmin_for_subject`
if you want to find the highest permission for the user.
You normally do not use this directly, but use :meth:`.get_devilryrole_for_user_on_subject`.
Args:
subject: A :class:`devilry.apps.core.models.Subject` object.
user: A User object.
Returns:
bool: ``True`` if the given user is subjectadmin on the given subject,
otherwise it returns ``False``.
"""
return self.__user_is_admin_on_subjectpermissiongroup(
user=user,
subject=subject,
grouptype=PermissionGroup.GROUPTYPE_SUBJECTADMIN
)
def get_devilryrole_for_user_on_subject(self, user, subject):
"""
Get the devilryrole for the given ``user`` on the given ``subject``.
.. seealso:: If you need to find the same information for a period,
use :meth:`.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`.
Args:
subject: A :class:`devilry.apps.core.models.Subject` object.
user: A User object.
Returns:
str: One of the following looked up in the listed order:
- ``"departmentadmin"``: If the user is superuser or in a
:class:`.SubjectPermissionGroup` with :obj:`.PermissionGroup.GROUPTYPE_DEPARTMENTADMIN`
for the subject owning the period.
- ``"subjectadmin"``: If the user is in a
:class:`.SubjectPermissionGroup` with :obj:`.PermissionGroup.GROUPTYPE_SUBJECTADMIN`
for the subject owning the period.
- ``None``: If no of the conditions listed above is met.
"""
if user.is_superuser or self.user_is_departmentadmin_for_subject(user=user, subject=subject):
return 'departmentadmin'
elif self.user_is_subjectadmin_for_subject(user=user, subject=subject):
return 'subjectadmin'
else:
return None
def get_custom_managable_subjectpermissiongroup_for_subject(self, subject):
"""
Get the SubjectPermissionGroup where :obj:`.PermissionGroup.is_custom_manageable` is ``True``
for the given ``subject``.
Each :class:`devilry.apps.core.models.Subject` has exactly one custom managable
:class:`.PermissionGroup`, which is the permission group where admins added
by non-superusers via the admin UI are added.
.. note:: The queryset used does ``select_related('permissiongroup')``, so looking
up permissiongroup for the returned SubjectPermissionGroup does not require
an extra database query.
Args:
subject: A :class:`devilry.apps.core.models.Subject` object.
Raises:
SubjectPermissionGroup.DoesNotExist: If no custom managable PermissionGroup exists for
the given subject.
"""
return self.filter(subject=subject, permissiongroup__is_custom_manageable=True)\
.select_related('permissiongroup')\
.get()
class SubjectPermissionGroup(models.Model):
"""
Defines the many-to-many relationship between
:class:`devilry.apps.core.Subject` and :class:`.PermissionGroup`.
"""
objects = SubjectPermissionGroupQuerySet.as_manager()
class Meta:
verbose_name = gettext_lazy('Course permission group')
verbose_name_plural = gettext_lazy('Course permission groups')
unique_together = (
('permissiongroup', 'subject'),
)
#: The permissiongroup.
permissiongroup = models.ForeignKey('devilry_account.PermissionGroup', on_delete=models.CASCADE)
#: The :class:`devilry.apps.core.Subject`.
subject = models.ForeignKey('core.Subject', on_delete=models.CASCADE)
def __str__(self):
if self.permissiongroup.is_custom_manageable:
return gettext_lazy('Course administrators for %(subject)s') % {
'subject': self.subject.short_name,
}
else:
return self.permissiongroup.name
def clean(self):
if self.permissiongroup.grouptype not in [PermissionGroup.GROUPTYPE_SUBJECTADMIN,
PermissionGroup.GROUPTYPE_DEPARTMENTADMIN]:
raise ValidationError(gettext_lazy(
'Courses can only be added to course and department administrator permission groups.'))
if self.permissiongroup.is_custom_manageable:
queryset = SubjectPermissionGroup.objects \
.filter(permissiongroup=self.permissiongroup)
if self.id is not None:
queryset = queryset.exclude(id=self.id)
if queryset.exists():
raise ValidationError(gettext_lazy('Only a single editable permission group '
'is allowed for a course.'))
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import fasteners
import six
from taskflow import exceptions as exc
from taskflow import flow
from taskflow import logging
from taskflow import task
from taskflow.types import graph as gr
from taskflow.types import tree as tr
from taskflow.utils import iter_utils
from taskflow.utils import misc
from taskflow.flow import (LINK_INVARIANT, LINK_RETRY) # noqa
LOG = logging.getLogger(__name__)
# Constants attached to node attributes in the execution graph (and tree
# node metadata), provided as constants here and constants in the compilation
# class (so that users will not have to import this file to access them); but
# provide them as module constants so that internal code can more
# easily access them...
TASK = 'task'
RETRY = 'retry'
FLOW = 'flow'
# Quite often used together, so make a tuple everyone can share...
ATOMS = (TASK, RETRY)
class Compilation(object):
"""The result of a compilers compile() is this *immutable* object."""
#: Task nodes will have a ``kind`` attribute/metadata key with this value.
TASK = TASK
#: Retry nodes will have a ``kind`` attribute/metadata key with this value.
RETRY = RETRY
#: Flow nodes will have a ``kind`` attribute/metadata key with this value.
FLOW = FLOW
def __init__(self, execution_graph, hierarchy):
self._execution_graph = execution_graph
self._hierarchy = hierarchy
@property
def execution_graph(self):
"""The execution ordering of atoms (as a graph structure)."""
return self._execution_graph
@property
def hierarchy(self):
"""The hierachy of patterns (as a tree structure)."""
return self._hierarchy
def _overlap_occurence_detector(to_graph, from_graph):
"""Returns how many nodes in 'from' graph are in 'to' graph (if any)."""
return iter_utils.count(node for node in from_graph.nodes_iter()
if node in to_graph)
def _add_update_edges(graph, nodes_from, nodes_to, attr_dict=None):
"""Adds/updates edges from nodes to other nodes in the specified graph.
It will connect the 'nodes_from' to the 'nodes_to' if an edge currently
does *not* exist (if it does already exist then the edges attributes
are just updated instead). When an edge is created the provided edge
attributes dictionary will be applied to the new edge between these two
nodes.
"""
# NOTE(harlowja): give each edge its own attr copy so that if it's
# later modified that the same copy isn't modified...
for u in nodes_from:
for v in nodes_to:
if not graph.has_edge(u, v):
if attr_dict:
graph.add_edge(u, v, attr_dict=attr_dict.copy())
else:
graph.add_edge(u, v)
else:
# Just update the attr_dict (if any).
if attr_dict:
graph.add_edge(u, v, attr_dict=attr_dict.copy())
class TaskCompiler(object):
"""Non-recursive compiler of tasks."""
@staticmethod
def handles(obj):
return isinstance(obj, task.BaseTask)
def compile(self, task, parent=None):
graph = gr.DiGraph(name=task.name)
graph.add_node(task, kind=TASK)
node = tr.Node(task, kind=TASK)
if parent is not None:
parent.add(node)
return graph, node
class FlowCompiler(object):
"""Recursive compiler of flows."""
@staticmethod
def handles(obj):
return isinstance(obj, flow.Flow)
def __init__(self, deep_compiler_func):
self._deep_compiler_func = deep_compiler_func
def compile(self, flow, parent=None):
"""Decomposes a flow into a graph and scope tree hierarchy."""
graph = gr.DiGraph(name=flow.name)
graph.add_node(flow, kind=FLOW, noop=True)
tree_node = tr.Node(flow, kind=FLOW, noop=True)
if parent is not None:
parent.add(tree_node)
if flow.retry is not None:
tree_node.add(tr.Node(flow.retry, kind=RETRY))
decomposed = dict(
(child, self._deep_compiler_func(child, parent=tree_node)[0])
for child in flow)
decomposed_graphs = list(six.itervalues(decomposed))
graph = gr.merge_graphs(graph, *decomposed_graphs,
overlap_detector=_overlap_occurence_detector)
for u, v, attr_dict in flow.iter_links():
u_graph = decomposed[u]
v_graph = decomposed[v]
_add_update_edges(graph, u_graph.no_successors_iter(),
list(v_graph.no_predecessors_iter()),
attr_dict=attr_dict)
if flow.retry is not None:
graph.add_node(flow.retry, kind=RETRY)
_add_update_edges(graph, [flow], [flow.retry],
attr_dict={LINK_INVARIANT: True})
for node in graph.nodes_iter():
if node is not flow.retry and node is not flow:
graph.node[node].setdefault(RETRY, flow.retry)
from_nodes = [flow.retry]
connected_attr_dict = {LINK_INVARIANT: True, LINK_RETRY: True}
else:
from_nodes = [flow]
connected_attr_dict = {LINK_INVARIANT: True}
connected_to = [
node for node in graph.no_predecessors_iter() if node is not flow
]
if connected_to:
# Ensure all nodes in this graph(s) that have no
# predecessors depend on this flow (or this flow's retry) so that
# we can depend on the flow being traversed before its
# children (even though at the current time it will be skipped).
_add_update_edges(graph, from_nodes, connected_to,
attr_dict=connected_attr_dict)
return graph, tree_node
class PatternCompiler(object):
"""Compiles a flow pattern (or task) into a compilation unit.
Let's dive into the basic idea for how this works:
The compiler here is provided a 'root' object via its __init__ method,
this object could be a task, or a flow (one of the supported patterns),
the end-goal is to produce a :py:class:`.Compilation` object as the result
with the needed components. If this is not possible a
:py:class:`~.taskflow.exceptions.CompilationFailure` will be raised.
In the case where a **unknown** type is being requested to compile
a ``TypeError`` will be raised and when a duplicate object (one that
has **already** been compiled) is encountered a ``ValueError`` is raised.
The complexity of this comes into play when the 'root' is a flow that
contains itself other nested flows (and so-on); to compile this object and
its contained objects into a graph that *preserves* the constraints the
pattern mandates we have to go through a recursive algorithm that creates
subgraphs for each nesting level, and then on the way back up through
the recursion (now with a decomposed mapping from contained patterns or
atoms to there corresponding subgraph) we have to then connect the
subgraphs (and the atom(s) there-in) that were decomposed for a pattern
correctly into a new graph and then ensure the pattern mandated
constraints are retained. Finally we then return to the
caller (and they will do the same thing up until the root node, which by
that point one graph is created with all contained atoms in the
pattern/nested patterns mandated ordering).
Also maintained in the :py:class:`.Compilation` object is a hierarchy of
the nesting of items (which is also built up during the above mentioned
recusion, via a much simpler algorithm); this is typically used later to
determine the prior atoms of a given atom when looking up values that can
be provided to that atom for execution (see the scopes.py file for how this
works). Note that although you *could* think that the graph itself could be
used for this, which in some ways it can (for limited usage) the hierarchy
retains the nested structure (which is useful for scoping analysis/lookup)
to be able to provide back a iterator that gives back the scopes visible
at each level (the graph does not have this information once flattened).
Let's take an example:
Given the pattern ``f(a(b, c), d)`` where ``f`` is a
:py:class:`~taskflow.patterns.linear_flow.Flow` with items ``a(b, c)``
where ``a`` is a :py:class:`~taskflow.patterns.linear_flow.Flow` composed
of tasks ``(b, c)`` and task ``d``.
The algorithm that will be performed (mirroring the above described logic)
will go through the following steps (the tree hierachy building is left
out as that is more obvious)::
Compiling f
- Decomposing flow f with no parent (must be the root)
- Compiling a
- Decomposing flow a with parent f
- Compiling b
- Decomposing task b with parent a
- Decomposed b into:
Name: b
Nodes: 1
- b
Edges: 0
- Compiling c
- Decomposing task c with parent a
- Decomposed c into:
Name: c
Nodes: 1
- c
Edges: 0
- Relinking decomposed b -> decomposed c
- Decomposed a into:
Name: a
Nodes: 2
- b
- c
Edges: 1
b -> c ({'invariant': True})
- Compiling d
- Decomposing task d with parent f
- Decomposed d into:
Name: d
Nodes: 1
- d
Edges: 0
- Relinking decomposed a -> decomposed d
- Decomposed f into:
Name: f
Nodes: 3
- c
- b
- d
Edges: 2
c -> d ({'invariant': True})
b -> c ({'invariant': True})
"""
def __init__(self, root, freeze=True):
self._root = root
self._history = set()
self._freeze = freeze
self._lock = threading.Lock()
self._compilation = None
self._matchers = (FlowCompiler(self._compile), TaskCompiler())
self._level = 0
def _compile(self, item, parent=None):
"""Compiles a item (pattern, task) into a graph + tree node."""
for m in self._matchers:
if m.handles(item):
self._pre_item_compile(item)
graph, node = m.compile(item, parent=parent)
self._post_item_compile(item, graph, node)
return graph, node
else:
raise TypeError("Unknown object '%s' (%s) requested to compile"
% (item, type(item)))
def _pre_item_compile(self, item):
"""Called before a item is compiled; any pre-compilation actions."""
if item in self._history:
raise ValueError("Already compiled item '%s' (%s), duplicate"
" and/or recursive compiling is not"
" supported" % (item, type(item)))
self._history.add(item)
if LOG.isEnabledFor(logging.BLATHER):
LOG.blather("%sCompiling '%s'", " " * self._level, item)
self._level += 1
def _post_item_compile(self, item, graph, node):
"""Called after a item is compiled; doing post-compilation actions."""
self._level -= 1
if LOG.isEnabledFor(logging.BLATHER):
prefix = ' ' * self._level
LOG.blather("%sDecomposed '%s' into:", prefix, item)
prefix = ' ' * (self._level + 1)
LOG.blather("%sGraph:", prefix)
for line in graph.pformat().splitlines():
LOG.blather("%s %s", prefix, line)
LOG.blather("%sHierarchy:", prefix)
for line in node.pformat().splitlines():
LOG.blather("%s %s", prefix, line)
def _pre_compile(self):
"""Called before the compilation of the root starts."""
self._history.clear()
self._level = 0
def _post_compile(self, graph, node):
"""Called after the compilation of the root finishes successfully."""
dup_names = misc.get_duplicate_keys(
(node for node, node_attrs in graph.nodes_iter(data=True)
if node_attrs['kind'] in ATOMS),
key=lambda node: node.name)
if dup_names:
raise exc.Duplicate(
"Atoms with duplicate names found: %s" % (sorted(dup_names)))
atoms = iter_utils.count(
node for node, node_attrs in graph.nodes_iter(data=True)
if node_attrs['kind'] in ATOMS)
if atoms == 0:
raise exc.Empty("Root container '%s' (%s) is empty"
% (self._root, type(self._root)))
self._history.clear()
@fasteners.locked
def compile(self):
"""Compiles the contained item into a compiled equivalent."""
if self._compilation is None:
self._pre_compile()
graph, node = self._compile(self._root, parent=None)
self._post_compile(graph, node)
if self._freeze:
graph.freeze()
node.freeze()
self._compilation = Compilation(graph, node)
return self._compilation
|
|
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from austria.models import PollingStation, Election, Party, List, Municipality, District, RegionalElectoralDistrict, State, PollingStationResult, ListResult
import json
import xml.etree.ElementTree as ET
import pprint
import requests
import datetime
import hashlib
class Command(BaseCommand):
help = 'Imports the election results'
def add_arguments(self, parser):
parser.add_argument(
'result_file',
nargs='?'
)
parser.add_argument(
'config_file',
nargs='?'
)
def handle(self, *args, **options):
# open config file
with open(options['config_file']) as config_file:
config = json.loads(config_file.read())
config['file_path'] = options['result_file']
config['election_queryset'] = Election.objects.get(short_name=config['election_short'])
config['ts_import'] = timezone.now()
config['log_detail'] = 'middle'
# import election results from file
data = self.open_results_file(config['file_path'])
# mapping of input data keys to database property names
data = self.map_keys(data, config)
# get lists queryset
config['lists_queryset'] = self.get_lists_queryset(data, config)
# write election results to database
self.import_results(data, config)
def open_results_file(self, local_path):
"""
Get the data from a local directory.
"""
print('Importing data from: {}'.format(local_path))
with open(local_path) as data_file:
data = json.loads(data_file.read())
return data
def map_keys(self, data, config):
"""
Maps keys of input data to database.
"""
new_data = []
# map the keys
mappings = config['mappings']
for mun in data:
tmp = {}
for key in mun.keys():
tmp[mappings[key]] = mun[key]
new_data.append(tmp)
return new_data
def get_lists_queryset(self, data, config):
# get party querysets
mappings = config['mappings']
lists_queryset = {}
for key, val in mappings.items():
if val not in config['no_list']:
try:
lists_queryset[val] = List.objects.get(short_name=val)
except:
print('Error: Electoral List "{}" does not exist.'.format(val))
return lists_queryset
def import_results(self, data, config):
"""
Imports results to database.
"""
ps_not_found = []
psr_num_entries_created = 0
psr_num_entries_updated = 0
lr_num_entries_created = 0
lr_num_entries_updated = 0
for mun in data:
mun_code = mun[config['spatial_id']]
elec_short = config['election_short']
# get timestamp of election result and convert it to server timezone
if 'timestamp' in config:
ts = datetime.datetime.strptime(config['timestamp'], '%Y-%m-%d %H:%M:%SZ')
else:
ts = datetime.datetime.strptime(mun['timestamp'], '%Y-%m-%d %H:%M:%SZ')
ts = timezone.make_aware(ts, timezone.get_current_timezone())
# Get eligible voters. Set to none if not in results
if config['eligible_voters']:
eligible_voters = mun['eligible_voters']
else:
eligible_voters = None
# check type of polling station: municipality, regional_electoral_district, state, district, country
if len(mun_code) == 6:
mun_code = mun_code[1:6]
if config['log_detail'] == 'high':
print("Municipality code shortened from 6 to 5 digits.")
not_country = not mun_code[:1] == '0'
not_state = not mun_code[1:5] == '0000'
not_red = mun_code[1:2].isdigit()
not_district = not mun_code[3:5] == '00'
not_absentee_ballot = not mun_code[3:5] == '99'
if not_country and not_state and not_red and not_district and not_absentee_ballot:
try:
if config['spatial_id'] == 'municipality_code':
ps = PollingStation.objects.get(municipality__code=mun['municipality_code'])
elif config['spatial_id'] == 'municipality_kennzahl':
ps = PollingStation.objects.get(municipality__kennzahl=mun['municipality_kennzahl'])
psr = PollingStationResult.objects.update_or_create(
polling_station = ps,
election = config['election_queryset'],
eligible_voters = eligible_voters,
votes = mun['votes'],
valid = mun['valid'],
invalid = mun['invalid'],
ts_result = ts
)
if psr[1] == True:
if config['log_detail'] == 'high':
print('New pollingStationResult "'+psr[0]+'" created.')
psr_num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('PollingStationResult "'+psr[0]+'" updated.')
psr_num_entries_updated += 1
for key, value in config['lists_queryset'].items():
if mun[key] == 'None':
votes = None
else:
votes = mun[key]
lr = ListResult.objects.update_or_create(
polling_station_result = psr[0],
election_list = value,
votes = votes
)
if lr[1] == True:
if config['log_detail'] == 'high':
print('New ListResult "'+lr[0]+'" created.')
lr_num_entries_created += 1
else:
if config['log_detail'] == 'high':
print('ListResult "'+lr[0]+'" updated.')
lr_num_entries_updated += 1
except Exception as e:
if config['log_detail'] == 'middle' and config['log_detail'] == 'high':
print('Warning: PollingStation {} not found.'.format(mun['municipality_code']))
ps_not_found.append(mun['municipality_code'])
else:
print('Municipality ' + mun_code + ' not stored in database, cause no municipality.')
config['psr-entries_created'] = psr_num_entries_created
config['psr-entries_updated'] = psr_num_entries_updated
config['lr-entries_created'] = lr_num_entries_created
config['lr-entries_updated'] = lr_num_entries_updated
print('PollingStationResult table imported: '+ 'new entries: '+str(psr_num_entries_created)+', updated entries: '+str(psr_num_entries_updated))
print('ListResult table imported: '+ 'new entries: '+str(lr_num_entries_created)+', updated entries: '+str(lr_num_entries_updated))
print('These polling stations where not found:', ps_not_found)
# def compute_aggregates(self, config):
# """
# Compute aggregates from results
# """
#
# ele = config['election_queryset']
# # next: von ListResult auf PollingStationResult gehen und dann auch partei stimmen raus holen und aufsummieren
# data = PollingStationResult.objects.select_related('polling_station__municipality__district__state', 'polling_station__municipality__regional_electoral_district').all().filter(election=ele)
# municipalities = []
# for mun in data:
# tmp = {}
# tmp['votes'] = int(mun.votes)
# tmp['valid'] = int(mun.valid)
# tmp['invalid'] = int(mun.invalid)
# tmp['mun_code'] = str(mun.polling_station.municipality)
# tmp['dis_code'] = str(mun.polling_station.municipality.district)
# tmp['red_code'] = str(mun.polling_station.municipality.regional_electoral_district)
# tmp['state_code'] = str(mun.polling_station.municipality.district.state)
# municipalities.append(tmp)
# df = pd.DataFrame(municipalities)
# df['election'] = config['election_queryset']
# print(df)
# dis = df.groupby('dis_code').sum()
# red = df.groupby('red_code').sum()
# state = df.groupby('state_code').sum()
# #print(dis)
# #print(red)
# #print(state)
# red_entries = red.T.to_dict()
# dis_entries = dis.T.to_dict()
# state_entries = state.T.to_dict()
# #print(dis_entries)
# #print(red_entries)
# #print(state_entries)
#
# #DistrictResult.objects.bulk_create(dis_entries)
# #REDResult.objects.bulk_create(red_entries)
# #StateResult.objects.bulk_create(state_entries)
#
# #ListDistrictResult
# #ListREDResult
# #ListStateResult
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import os
from resource_management.core.resources import Directory
from resource_management.core.resources import File
from resource_management.core.resources.system import Execute
from resource_management.core.source import DownloadSource
from resource_management.core.source import InlineTemplate
from resource_management.core.source import Template
from resource_management.libraries.functions.lzo_utils import install_lzo_if_needed
from resource_management.libraries.functions import format
from resource_management.libraries.functions import compare_versions
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.core.resources.packaging import Package
from resource_management.core.shell import as_user
# TODO: see if see can remove this
def oozie(is_server=False):
import params
if is_server:
params.HdfsResource(params.oozie_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.oozie_user,
mode=params.oozie_hdfs_user_mode
)
params.HdfsResource(None, action="execute")
Directory(params.conf_dir,
create_parents = True,
owner = params.oozie_user,
group = params.user_group
)
XmlConfig("oozie-site.xml",
conf_dir = params.conf_dir,
configurations = params.oozie_site,
configuration_attributes=params.config['configuration_attributes']['oozie-site'],
owner = params.oozie_user,
group = params.user_group,
mode = 0660
)
File(format("{conf_dir}/oozie-env.sh"),
owner=params.oozie_user,
content=InlineTemplate(params.oozie_env_sh_template),
group=params.user_group,
)
if (params.log4j_props != None):
File(format("{params.conf_dir}/oozie-log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.oozie_user,
content=params.log4j_props
)
elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
File(format("{params.conf_dir}/oozie-log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.oozie_user
)
File(format("{params.conf_dir}/adminusers.txt"),
mode=0644,
group=params.user_group,
owner=params.oozie_user,
content=Template('adminusers.txt.j2', oozie_user=params.oozie_user)
)
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
params.jdbc_driver_name == "org.postgresql.Driver" or \
params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}/{check_db_connection_jar_name}")),
)
pass
oozie_ownership()
if params.lzo_enabled:
install_lzo_if_needed()
Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
not_if = no_op_test,
)
if is_server:
oozie_server_specific()
def download_database_library_if_needed():
"""
Downloads the library to use when connecting to the Oozie database, if
necessary. The library will be downloaded to 'params.target' unless
otherwise specified.
:param target_directory: the location where the database library will be
downloaded to.
:return:
"""
import params
# check to see if the JDBC driver name is in the list of ones that need to
# be downloaded
if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \
params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source),
)
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.target),
#creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo = True)
File ( params.target,
owner = params.oozie_user,
group = params.user_group
)
def oozie_ownership():
import params
File ( format("{conf_dir}/hadoop-config.xml"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/oozie-default.xml"),
owner = params.oozie_user,
group = params.user_group,
mode = 0644
)
Directory ( format("{conf_dir}/action-conf"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/action-conf/hive.xml"),
owner = params.oozie_user,
group = params.user_group
)
def oozie_server_specific():
import params
no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
File(params.pid_file,
action="delete",
not_if=no_op_test
)
oozie_server_directories = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
Directory( oozie_server_directories,
owner = params.oozie_user,
group = params.user_group,
mode = 0755,
create_parents = True,
cd_access="a",
)
Directory(params.oozie_libext_dir,
create_parents = True,
)
hashcode_file = format("{oozie_home}/.hashcode")
hashcode = hashlib.md5(format('{oozie_home}/oozie-sharelib.tar.gz')).hexdigest()
skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share && [[ `cat {hashcode_file}` == '{hashcode}' ]]")
untar_sharelib = ('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home)
Execute( untar_sharelib, # time-expensive
not_if = format("{no_op_test} || {skip_recreate_sharelib}"),
sudo = True,
)
configure_cmds = []
#configure_cmds.append(('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home))
#configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
#configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
configure_cmds.append(('chown', '-RL', format('{oozie_user}:{user_group}'), params.oozie_webapps_conf_dir))
Execute( configure_cmds,
not_if = no_op_test,
sudo = True,
)
if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \
params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source),
)
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.target),
#creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo = True)
File ( params.target,
owner = params.oozie_user,
group = params.user_group
)
#falcon el extension
if params.has_falcon_host:
Execute(format('rm -rf {oozie_libext_dir}/falcon-oozie-el-extension.jar'),)
if params.security_enabled:
Execute(format('/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab {hdfs_principal_name}'))
Execute(format('hadoop fs -get /user/falcon/temp/falcon-oozie-el-extension.jar {oozie_libext_dir}'),
not_if = no_op_test,
)
Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension.jar'),
not_if = no_op_test,
)
prepare_war_cmd_file = format("{oozie_home}/.prepare_war_cmd")
prepare_war_cmd = format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war {oozie_secure}")
skip_prepare_war_cmd = format("test -f {prepare_war_cmd_file} && [[ `cat {prepare_war_cmd_file}` == '{prepare_war_cmd}' ]]")
Execute(prepare_war_cmd, # time-expensive
user = params.oozie_user,
not_if = format("{no_op_test} || {skip_recreate_sharelib} && {skip_prepare_war_cmd}")
)
File(hashcode_file,
content = hashcode,
mode = 0644,
)
File(prepare_war_cmd_file,
content = prepare_war_cmd,
mode = 0644,
)
# Create hive-site and tez-site configs for oozie
Directory(params.hive_conf_dir,
create_parents = True,
owner = params.oozie_user,
group = params.user_group
)
if 'hive-site' in params.config['configurations']:
XmlConfig("hive-site.xml",
conf_dir=params.hive_conf_dir,
configurations=params.config['configurations']['hive-site'],
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.oozie_user,
group=params.user_group,
mode=0640
)
'''if 'tez-site' in params.config['configurations']:
XmlConfig( "tez-site.xml",
conf_dir = params.hive_conf_dir,
configurations = params.config['configurations']['tez-site'],
configuration_attributes=params.config['configuration_attributes']['tez-site'],
owner = params.oozie_user,
group = params.user_group,
mode = 0664
)'''
Execute(('chown', '-R', format("{oozie_user}:{user_group}"), params.oozie_server_dir),
sudo=True
)
|
|
""" Tests from Michael Wester's 1999 paper "Review of CAS mathematical
capabilities".
http://www.math.unm.edu/~wester/cas/book/Wester.pdf
See also http://math.unm.edu/~wester/cas_review.html for detailed output of each
tested system.
"""
from sympy import (Rational, symbols, factorial, sqrt, log, exp, oo, product,
binomial, rf, pi, gamma, igcd, factorint, radsimp, combsimp,
npartitions, totient, primerange, factor, simplify, gcd, resultant, expand,
I, trigsimp, tan, sin, cos, diff, nan, limit, EulerGamma, polygamma,
bernoulli, hyper, hyperexpand, besselj, asin, assoc_legendre, Function, re,
im, DiracDelta, chebyshevt, atan, sinh, cosh, floor, ceiling, solve, asinh,
LambertW, N, apart, sqrtdenest, factorial2, powdenest, Mul, S, mpmath, ZZ,
Poly, expand_func, E, Q, And, Or, Le, Lt, Ge, Gt, QQ, ask, refine, AlgebraicNumber,
elliptic_e, elliptic_f, powsimp)
from sympy.functions.combinatorial.numbers import stirling
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.utilities.pytest import XFAIL, slow
from sympy.utilities.iterables import partitions
from sympy.mpmath import mpi, mpc
from sympy.physics.quantum import Commutator
from sympy.assumptions import assuming
from sympy.polys.rings import vring
from sympy.polys.fields import vfield
from sympy.polys.solvers import solve_lin_sys
R = Rational
x, y, z = symbols('x y z')
i, j, k, l, m, n = symbols('i j k l m n', integer=True)
f = Function('f')
g = Function('g')
# A. Boolean Logic and Quantifier Elimination
# Not implemented.
# B. Set Theory
# Not implemented.
# C. Numbers
def test_C1():
assert (factorial(50) ==
30414093201713378043612608166064768844377641568960512000000000000)
def test_C2():
assert (factorint(factorial(50)) == {2: 47, 3: 22, 5: 12, 7: 8,
11: 4, 13: 3, 17: 2, 19: 2, 23: 2, 29: 1, 31: 1, 37: 1,
41: 1, 43: 1, 47: 1})
def test_C3():
assert (factorial2(10), factorial2(9)) == (3840, 945)
# Base conversions; not really implemented by sympy
# Whatever. Take credit!
def test_C4():
assert 0xABC == 2748
def test_C5():
assert 123 == int('234', 7)
def test_C6():
assert int('677', 8) == int('1BF', 16) == 447
def test_C7():
assert log(32768, 8) == 5
def test_C8():
# Modular multiplicative inverse. Would be nice if divmod could do this.
assert ZZ.invert(5, 7) == 3
assert ZZ.invert(5, 6) == 5
def test_C9():
assert igcd(igcd(1776, 1554), 5698) == 74
def test_C10():
x = 0
for n in range(2, 11):
x += R(1, n)
assert x == R(4861, 2520)
def test_C11():
assert R(1, 7) == S('0.[142857]')
def test_C12():
assert R(7, 11) * R(22, 7) == 2
def test_C13():
test = R(10, 7) * (1 + R(29, 1000)) ** R(1, 3)
good = 3 ** R(1, 3)
assert test == good
def test_C14():
assert sqrtdenest(sqrt(2*sqrt(3) + 4)) == 1 + sqrt(3)
def test_C15():
test = sqrtdenest(sqrt(14 + 3*sqrt(3 + 2*sqrt(5 - 12*sqrt(3 - 2*sqrt(2))))))
good = sqrt(2) + 3
assert test == good
def test_C16():
test = sqrtdenest(sqrt(10 + 2*sqrt(6) + 2*sqrt(10) + 2*sqrt(15)))
good = sqrt(2) + sqrt(3) + sqrt(5)
assert test == good
def test_C17():
test = radsimp((sqrt(3) + sqrt(2)) / (sqrt(3) - sqrt(2)))
good = 5 + 2*sqrt(6)
assert test == good
def test_C18():
assert simplify((sqrt(-2 + sqrt(-5)) * sqrt(-2 - sqrt(-5))).expand(complex=True)) == 3
@XFAIL
def test_C19():
assert radsimp(simplify((90 + 35*sqrt(7)) ** R(1, 3))) == 3 + sqrt(7)
@XFAIL
def test_C20():
inside = (135 + 78*sqrt(3))
test = simplify((inside**R(2, 3) + 3) * sqrt(3) / inside**R(1, 3))
assert test == 12
@XFAIL
def test_C21():
assert simplify((41 + 29*sqrt(2)) ** R(1, 5)) == 1 + sqrt(2)
@XFAIL
def test_C22():
test = simplify(((6 - 4*sqrt(2))*log(3 - 2*sqrt(2)) + (3 - 2*sqrt(2))*log(17
- 12*sqrt(2)) + 32 - 24*sqrt(2)) / (48*sqrt(2) - 72))
good = sqrt(2)/3 - log(sqrt(2) - 1)/3
assert test == good
def test_C23():
assert 2 * oo - 3 == oo
@XFAIL
def test_C24():
raise NotImplementedError("2**aleph_null == aleph_1")
# D. Numerical Analysis
def test_D1():
assert 0.0 / sqrt(2) == 0.0
def test_D2():
assert str(exp(-1000000).evalf()) == '3.29683147808856e-434295'
def test_D3():
assert exp(pi*sqrt(163)).evalf(50).num.ae(262537412640768744)
def test_D4():
assert floor(R(-5, 3)) == -2
assert ceiling(R(-5, 3)) == -1
@XFAIL
def test_D5():
raise NotImplementedError("cubic_spline([1, 2, 4, 5], [1, 4, 2, 3], x)(3) == 27/8")
@XFAIL
def test_D6():
raise NotImplementedError("translate sum(a[i]*x**i, (i,1,n)) to FORTRAN")
@XFAIL
def test_D7():
raise NotImplementedError("translate sum(a[i]*x**i, (i,1,n)) to C")
@XFAIL
def test_D8():
# One way is to cheat by converting the sum to a string,
# and replacing the '[' and ']' with ''.
# E.g., horner(S(str(_).replace('[','').replace(']','')))
raise NotImplementedError("apply Horner's rule to sum(a[i]*x**i, (i,1,5))")
@XFAIL
def test_D9():
raise NotImplementedError("translate D8 to FORTRAN")
@XFAIL
def test_D10():
raise NotImplementedError("translate D8 to C")
@XFAIL
def test_D11():
#Is there a way to use count_ops?
raise NotImplementedError("flops(sum(product(f[i][k], (i,1,k)), (k,1,n)))")
@XFAIL
def test_D12():
assert (mpi(-4, 2) * x + mpi(1, 3)) ** 2 == mpi(-8, 16)*x**2 + mpi(-24, 12)*x + mpi(1, 9)
@XFAIL
def test_D13():
raise NotImplementedError("discretize a PDE: diff(f(x,t),t) == diff(diff(f(x,t),x),x)")
# E. Statistics
# See scipy; all of this is numerical.
# F. Combinatorial Theory.
def test_F1():
assert rf(x, 3) == x*(1 + x)*(2 + x)
def test_F2():
assert expand_func(binomial(n, 3)) == n*(n - 1)*(n - 2)/6
@XFAIL
def test_F3():
assert combsimp(2**n * factorial(n) * factorial2(2*n - 1)) == factorial(2*n)
@XFAIL
def test_F4():
assert combsimp((2**n * factorial(n) * product(2*k - 1, (k, 1, n)))) == factorial(2*n)
@XFAIL
def test_F5():
assert gamma(n + R(1, 2)) / sqrt(pi) / factorial(n) == factorial(2*n)/2**(2*n)/factorial(n)**2
def test_F6():
partTest = [p.copy() for p in partitions(4)]
partDesired = [{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2:1}, {1: 4}]
assert partTest == partDesired
def test_F7():
assert npartitions(4) == 5
def test_F8():
assert stirling(5, 2, signed=True) == -50 # if signed, then kind=1
def test_F9():
assert totient(1776) == 576
# G. Number Theory
def test_G1():
assert list(primerange(999983, 1000004)) == [999983, 1000003]
@XFAIL
def test_G2():
raise NotImplementedError("find the primitive root of 191 == 19")
@XFAIL
def test_G3():
raise NotImplementedError("(a+b)**p mod p == a**p + b**p mod p; p prime")
# ... G20 Modular equations and continued fractions are not implemented.
# H. Algebra
def test_H1():
assert simplify(2*2**n) == simplify(2**(n + 1))
assert powdenest(2*2**n) == simplify(2**(n + 1))
def test_H2():
assert powsimp(4 * 2**n) == 2**(n + 2)
def test_H3():
assert (-1)**(n*(n + 1)) == 1
def test_H4():
expr = factor(6*x - 10)
assert type(expr) is Mul
assert expr.args[0] == 2
assert expr.args[1] == 3*x - 5
p1 = 64*x**34 - 21*x**47 - 126*x**8 - 46*x**5 - 16*x**60 - 81
p2 = 72*x**60 - 25*x**25 - 19*x**23 - 22*x**39 - 83*x**52 + 54*x**10 + 81
q = 34*x**19 - 25*x**16 + 70*x**7 + 20*x**3 - 91*x - 86
def test_H5():
assert gcd(p1, p2, x) == 1
def test_H6():
assert gcd(expand(p1 * q), expand(p2 * q)) == q
def test_H7():
p1 = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5
p2 = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z
assert gcd(p1, p2, x, y, z) == 1
def test_H8():
p1 = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5
p2 = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z
q = 11*x**12*y**7*z**13 - 23*x**2*y**8*z**10 + 47*x**17*y**5*z**8
assert gcd(p1 * q, p2 * q, x, y, z) == q
def test_H9():
p1 = 2*x**(n + 4) - x**(n + 2)
p2 = 4*x**(n + 1) + 3*x**n
assert gcd(p1, p2) == x**n
def test_H10():
p1 = 3*x**4 + 3*x**3 + x**2 - x - 2
p2 = x**3 - 3*x**2 + x + 5
assert resultant(p1, p2, x) == 0
def test_H11():
assert resultant(p1 * q, p2 * q, x) == 0
def test_H12():
num = x**2 - 4
den = x**2 + 4*x + 4
assert simplify(num/den) == (x - 2)/(x + 2)
@XFAIL
def test_H13():
assert simplify((exp(x) - 1) / (exp(x/2) + 1)) == exp(x/2) - 1
def test_H14():
p = (x + 1) ** 20
ep = expand(p)
assert ep == (1 + 20*x + 190*x**2 + 1140*x**3 + 4845*x**4 + 15504*x**5
+ 38760*x**6 + 77520*x**7 + 125970*x**8 + 167960*x**9 + 184756*x**10
+ 167960*x**11 + 125970*x**12 + 77520*x**13 + 38760*x**14 + 15504*x**15
+ 4845*x**16 + 1140*x**17 + 190*x**18 + 20*x**19 + x**20)
dep = diff(ep, x)
assert dep == (20 + 380*x + 3420*x**2 + 19380*x**3 + 77520*x**4
+ 232560*x**5 + 542640*x**6 + 1007760*x**7 + 1511640*x**8 + 1847560*x**9
+ 1847560*x**10 + 1511640*x**11 + 1007760*x**12 + 542640*x**13
+ 232560*x**14 + 77520*x**15 + 19380*x**16 + 3420*x**17 + 380*x**18
+ 20*x**19)
assert factor(dep) == 20*(1 + x)**19
def test_H15():
assert simplify((Mul(*[x - r for r in solve(x**3 + x**2 - 7)]))) == x**3 + x**2 - 7
def test_H16():
assert factor(x**100 - 1) == ((x - 1)*(x + 1)*(x**2 + 1)*(x**4 - x**3
+ x**2 - x + 1)*(x**4 + x**3 + x**2 + x + 1)*(x**8 - x**6 + x**4
- x**2 + 1)*(x**20 - x**15 + x**10 - x**5 + 1)*(x**20 + x**15 + x**10
+ x**5 + 1)*(x**40 - x**30 + x**20 - x**10 + 1))
@slow
def test_H17():
assert simplify(factor(expand(p1 * p2)) - p1*p2) == 0
@XFAIL
def test_H18():
# Factor over complex rationals.
test = factor(4*x**4 + 8*x**3 + 77*x**2 + 18*x + 53)
good = (2*x + 3*I)*(2*x - 3*I)*(x + 1 - 4*I)(x + 1 + 4*I)
assert test == good
def test_H19():
a = symbols('a')
# The idea is to let a**2 == 2, then solve 1/(a-1). Answer is a+1")
assert Poly(a - 1).invert(Poly(a**2 - 2)) == a + 1
@XFAIL
def test_H20():
raise NotImplementedError("let a**2==2; (x**3 + (a-2)*x**2 - "
+ "(2*a+3)*x - 3*a) / (x**2-2) = (x**2 - 2*x - 3) / (x-a)")
@XFAIL
def test_H21():
raise NotImplementedError("evaluate (b+c)**4 assuming b**3==2, c**2==3. \
Answer is 2*b + 8*c + 18*b**2 + 12*b*c + 9")
def test_H22():
assert factor(x**4 - 3*x**2 + 1, modulus=5) == (x - 2)**2 * (x + 2)**2
def test_H23():
f = x**11 + x + 1
g = (x**2 + x + 1) * (x**9 - x**8 + x**6 - x**5 + x**3 - x**2 + 1)
assert factor(f, modulus=65537) == g
def test_H24():
phi = AlgebraicNumber(S.GoldenRatio.expand(func=True), alias='phi')
assert factor(x**4 - 3*x**2 + 1, extension=phi) == \
(x - phi)*(x + 1 - phi)*(x - 1 + phi)*(x + phi)
@slow
def test_H25():
e = (x - 2*y**2 + 3*z**3) ** 20
assert factor(expand(e)) == e
@slow
def test_H26():
g = expand((sin(x) - 2*cos(y)**2 + 3*tan(z)**3)**20)
assert factor(g, expand=False) == (-sin(x) + 2*cos(y)**2 - 3*tan(z)**3)**20
@slow
def test_H27():
f = 24*x*y**19*z**8 - 47*x**17*y**5*z**8 + 6*x**15*y**9*z**2 - 3*x**22 + 5
g = 34*x**5*y**8*z**13 + 20*x**7*y**7*z**7 + 12*x**9*y**16*z**4 + 80*y**14*z
h = -2*z*y**7 \
*(6*x**9*y**9*z**3 + 10*x**7*z**6 + 17*y*x**5*z**12 + 40*y**7) \
*(3*x**22 + 47*x**17*y**5*z**8 - 6*x**15*y**9*z**2 - 24*x*y**19*z**8 - 5)
assert factor(expand(f*g)) == h
@XFAIL
def test_H28():
raise NotImplementedError("expand ((1 - c**2)**5 * (1 - s**2)**5 * "
+ "(c**2 + s**2)**10) with c**2 + s**2 = 1. Answer is c**10*s**10.")
@XFAIL
def test_H29():
assert factor(4*x**2 - 21*x*y + 20*y**2, modulus=3) == (x + y)*(x - y)
def test_H30():
test = factor(x**3 + y**3, extension=sqrt(-3))
answer = (x + y)*(x + y*(-R(1, 2) - sqrt(3)/2*I))*(x + y*(-R(1, 2) + sqrt(3)/2*I))
assert answer == test
def test_H31():
f = (x**2 + 2*x + 3)/(x**3 + 4*x**2 + 5*x + 2)
g = 2 / (x + 1)**2 - 2 / (x + 1) + 3 / (x + 2)
assert apart(f) == g
@XFAIL
def test_H32(): # issue 3459
raise NotImplementedError("[A*B*C - (A*B*C)**(-1)]*A*C*B (product \
of a non-commuting product and its inverse)")
def test_H33():
A, B, C = symbols('A, B, C', commutatative=False)
assert (Commutator(A, Commutator(B, C))
+ Commutator(B, Commutator(C, A))
+ Commutator(C, Commutator(A, B))).doit().expand() == 0
# I. Trigonometry
@XFAIL
def test_I1():
assert tan(7*pi/10) == -sqrt(1 + 2/sqrt(5))
@XFAIL
def test_I2():
assert sqrt((1 + cos(6))/2) == -cos(3)
def test_I3():
assert cos(n*pi) + sin((4*n - 1)*pi/2) == (-1)**n - 1
def test_I4():
assert refine(cos(pi*cos(n*pi)) + sin(pi/2*cos(n*pi)), Q.integer(n)) == (-1)**n - 1
@XFAIL
def test_I5():
assert sin((n**5/5 + n**4/2 + n**3/3 - n/30) * pi) == 0
@XFAIL
def test_I6():
raise NotImplementedError("assuming -3*pi<x<-5*pi/2, abs(cos(x)) == -cos(x), abs(sin(x)) == -sin(x)")
@XFAIL
def test_I7():
assert cos(3*x)/cos(x) == cos(x)**2 - 3*sin(x)**2
@XFAIL
def test_I8():
assert cos(3*x)/cos(x) == 2*cos(2*x) - 1
@XFAIL
def test_I9():
# Supposed to do this with rewrite rules.
assert cos(3*x)/cos(x) == cos(x)**2 - 3*sin(x)**2
def test_I10():
assert trigsimp((tan(x)**2 + 1 - cos(x)**-2) / (sin(x)**2 + cos(x)**2 - 1)) == nan
#@XFAIL
#def test_I11():
# assert limit((tan(x)**2 + 1 - cos(x)**-2) / (sin(x)**2 + cos(x)**2 - 1), x, 0) != 0
@XFAIL
def test_I12():
try:
# This should fail or return nan or something.
diff((tan(x)**2 + 1 - cos(x)**-2) / (sin(x)**2 + cos(x)**2 - 1), x)
except:
assert True
else:
assert False, "taking the derivative with a fraction equivalent to 0/0 should fail"
# J. Special functions.
def test_J1():
assert bernoulli(16) == R(-3617, 510)
def test_J2():
assert diff(elliptic_e(x, y**2), y) == (elliptic_e(x, y**2) - elliptic_f(x, y**2))/y
@XFAIL
def test_J3():
raise NotImplementedError("Jacobi elliptic functions: diff(dn(u,k), u) == -k**2*sn(u,k)*cn(u,k)")
def test_J4():
assert gamma(R(-1, 2)) == -2*sqrt(pi)
def test_J5():
assert polygamma(0, R(1, 3)) == -EulerGamma - pi/2*sqrt(R(1, 3)) - R(3, 2)*log(3)
def test_J6():
assert mpmath.besselj(2, 1 + 1j).ae(mpc('0.04157988694396212', '0.24739764151330632'))
def test_J7():
assert simplify(besselj(R(-5,2), pi/2)) == 12/(pi**2)
def test_J8():
p = besselj(R(3,2), z)
q = (sin(z)/z - cos(z))/sqrt(pi*z/2)
assert simplify(expand_func(p) -q) == 0
def test_J9():
assert besselj(0, z).diff(z) == - besselj(1, z)
def test_J10():
mu, nu = symbols('mu, nu', integer=True)
assert assoc_legendre(nu, mu, 0) == 2**mu*sqrt(pi)/gamma((nu - mu)/2 + 1)/gamma((-nu - mu + 1)/2)
def test_J11():
assert simplify(assoc_legendre(3, 1, x)) == simplify(-R(3, 2)*sqrt(1 - x**2)*(5*x**2 - 1))
@slow
def test_J12():
assert simplify(chebyshevt(1008, x) - 2*x*chebyshevt(1007, x) + chebyshevt(1006, x)) == 0
def test_J13():
a = symbols('a', integer=True, negative=False)
assert chebyshevt(a, -1) == (-1)**a
def test_J14():
p = hyper([S(1)/2, S(1)/2], [S(3)/2], z**2)
assert hyperexpand(p) == asin(z)/z
@XFAIL
def test_J15():
raise NotImplementedError("F((n+2)/2,-(n-2)/2,R(3,2),sin(z)**2) == sin(n*z)/(n*sin(z)*cos(z)); F(.) is hypergeometric function")
@XFAIL
def test_J16():
raise NotImplementedError("diff(zeta(x), x) @ x=0 == -log(2*pi)/2")
@XFAIL
def test_J17():
assert deltaintegrate(f((x + 2)/5)*DiracDelta((x - 2)/3) - g(x)*diff(DiracDelta(x - 1), x), (x, 0, 3))
@XFAIL
def test_J18():
raise NotImplementedError("define an antisymmetric function")
# K. The Complex Domain
def test_K1():
z1, z2 = symbols('z1, z2', complex=True)
assert re(z1 + I*z2) == -im(z2) + re(z1)
assert im(z1 + I*z2) == im(z1) + re(z2)
@XFAIL # abs(...).n() does evaluate to 1.00000...
def test_K2():
assert abs(3 - sqrt(7) + I*sqrt(6*sqrt(7) - 15)) == 1
@XFAIL
def test_K3():
a, b = symbols('a, b', real=True)
assert simplify(abs(1/(a + I/a + I*b))) == 1/sqrt(a**2 + (I/a + b)**2)
def test_K4():
assert log(3 + 4*I).expand(complex=True) == log(5) + I*atan(R(4, 3))
def test_K5():
x, y = symbols('x, y', real=True)
assert tan(x + I*y).expand(complex=True) == sin(x)*cos(x) / (cos(x)**2 +
sinh(y)**2) + I*sinh(y)*cosh(y) / (cos(x)**2 + sinh(y)**2)
def test_K6():
assert sqrt(x*y*abs(z)**2)/(sqrt(x)*abs(z)) == sqrt(x*y)/sqrt(x)
assert sqrt(x*y*abs(z)**2)/(sqrt(x)*abs(z)) != sqrt(y)
def test_K7():
y = symbols('y', real=True, negative=False)
expr = sqrt(x*y*abs(z)**2)/(sqrt(x)*abs(z))
sexpr = simplify(expr)
assert sexpr == sqrt(y)
@XFAIL
def test_K8():
z = symbols('z', complex=True)
assert simplify(sqrt(1/z) - 1/sqrt(z)) != 0 # Passes
z = symbols('z', complex=True, negative=False)
assert simplify(sqrt(1/z) - 1/sqrt(z)) == 0 # Fails
def test_K9():
z = symbols('z', real=True, positive=True)
assert simplify(sqrt(1/z) - 1/sqrt(z)) == 0
def test_K10():
z = symbols('z', real=True, negative=True)
assert simplify(sqrt(1/z) + 1/sqrt(z)) == 0
# This goes up to K25
# L. Determining Zero Equivalence
def test_L1():
assert sqrt(997) - (997**3)**R(1, 6) == 0
def test_L2():
assert sqrt(999983) - (999983**3)**R(1, 6) == 0
def test_L3():
assert simplify((2**R(1, 3) + 4**R(1, 3))**3 - 6*(2**R(1, 3) + 4**R(1, 3)) - 6) == 0
def test_L4():
assert trigsimp(cos(x)**3 + cos(x)*sin(x)**2 - cos(x)) == 0
@XFAIL
def test_L5():
assert log(tan(R(1, 2)*x + pi/4)) - asinh(tan(x)) == 0
def test_L6():
assert (log(tan(x/2 + pi/4)) - asinh(tan(x))).diff(x).subs({x: 0}) == 0
@XFAIL
def test_L7():
assert simplify(log((2*sqrt(x) + 1)/(sqrt(4*x + 4*sqrt(x) + 1)))) == 0
@XFAIL
def test_L8():
assert simplify((4*x + 4*sqrt(x) + 1)**(sqrt(x)/(2*sqrt(x) + 1)) \
*(2*sqrt(x) + 1)**(1/(2*sqrt(x) + 1)) - 2*sqrt(x) - 1) == 0
@XFAIL
def test_L9():
z = symbols('z', complex=True)
assert simplify(2**(1 - z)*gamma(z)*zeta(z)*cos(z*pi/2) - pi**2*zeta(1 - z)) == 0
# M. Equations
@XFAIL
def test_M1():
assert Equality(x, 2)/2 + Equality(1, 1) == Equality(x/2 + 1, 2)
def test_M2():
# The roots of this equation should all be real. Note that this doesn't test
# that they are correct.
sol = solve(3*x**3 - 18*x**2 + 33*x - 19, x)
assert all(expand(x, complex=True).is_real for x in sol)
@XFAIL
def test_M5():
assert solve(x**6 - 9*x**4 - 4*x**3 + 27*x**2 - 36*x - 23, x) == [2**(1/3) + sqrt(3), 2**(1/3) - sqrt(3), +sqrt(3) - 1/2**(2/3) + I*sqrt(3)/2**(2/3), +sqrt(3) - 1/2**(2/3) - I*sqrt(3)/2**(2/3), -sqrt(3) - 1/2**(2/3) + I*sqrt(3)/2**(2/3), -sqrt(3) - 1/2**(2/3) - I*sqrt(3)/2**(2/3)]
def test_M6():
assert set(solve(x**7 - 1, x)) == set([cos(n*2*pi/7) + I*sin(n*2*pi/7) for n in range(0, 7)])
# The paper asks for exp terms, but sin's and cos's may be acceptable
def test_M7():
assert set(solve(x**8 - 8*x**7 + 34*x**6 - 92*x**5 + 175*x**4 - 236*x**3 +
226*x**2 - 140*x + 46, x)) == set([
1 + sqrt(2)*I*sqrt(sqrt(-3 + 4*sqrt(3)) + 3)/2,
1 + sqrt(2)*sqrt(-3 + sqrt(-3 + 4*sqrt(3)))/2,
1 - sqrt(2)*sqrt(-3 + I*sqrt(3 + 4*sqrt(3)))/2,
1 - sqrt(2)*I*sqrt(sqrt(-3 + 4*sqrt(3)) + 3)/2,
1 + sqrt(2)*sqrt(-3 - I*sqrt(3 + 4*sqrt(3)))/2,
1 + sqrt(2)*sqrt(-3 + I*sqrt(3 + 4*sqrt(3)))/2,
1 - sqrt(2)*sqrt(-3 - I*sqrt(3 + 4*sqrt(3)))/2,
1 - sqrt(2)*sqrt(-3 + sqrt(-3 + 4*sqrt(3)))/2,
])
@XFAIL # There are an infinite number of solutions.
def test_M8():
z = symbols('z', complex=True)
assert set(solve(exp(2*x) + 2*exp(x) + 1 - z, x)) == \
set([log(1 + z - 2*sqrt(z))/2, log(1 + z + 2*sqrt(z))/2])
# This one could be simplified better (the 1/2 could be pulled into the log
# as a sqrt, and the function inside the log can be factored as a square,
# giving [log(sqrt(z) - 1), log(sqrt(z) + 1)]). Also, there should be an
# infinite number of solutions.
# x = {log(sqrt(z) - 1), log(sqrt(z) + 1) + i pi} [+ n 2 pi i, + n 2 pi i]
# where n is an arbitrary integer. See url of detailed output above.
@XFAIL
def test_M9():
x = symbols('x', complex=True)
raise NotImplementedError("solve(exp(2-x**2)-exp(-x),x) has complex solutions.")
def test_M10():
assert solve(exp(x) - x, x) == [-LambertW(-1)]
@XFAIL
def test_M11():
assert solve(x**x - x, x) == [-1, 1]
def test_M12():
# TODO: x = [-1, 2*(+/-asinh(1)*I + n*pi}, 3*(pi/6 + n*pi/3)]
assert solve((x + 1)*(sin(x)**2 + 1)**2*cos(3*x)**3, x) == [
-1, pi/6, pi/2,
- I*log(1 + sqrt(2)), I*log(1 + sqrt(2)),
pi - I*log(1 + sqrt(2)), pi + I*log(1 + sqrt(2)),
]
def test_M13():
assert solve(sin(x) - cos(x), x) == [-3*pi/4, pi/4]
def test_M14():
assert solve(tan(x) - 1, x) == [pi/4]
def test_M15():
assert solve(sin(x) - S.Half) == [pi/6, 5*pi/6]
def test_M16():
assert solve(sin(x) - tan(x), x) == [0, 2*pi]
@XFAIL
def test_M17():
assert solve(asin(x) - atan(x),x) == [0]
@XFAIL
def test_M18():
assert solve(acos(x) - atan(x), x) == [sqrt((sqrt(5) - 1)/2)]
def test_M19():
assert solve((x - 2)/x**R(1, 3), x) == [2]
def test_M20():
assert solve(sqrt(x**2 + 1) - x + 2, x) == []
def test_M21():
assert solve(x + sqrt(x) - 2) == [1]
def test_M22():
assert solve(2*sqrt(x) + 3*x**R(1, 4) - 2) == [R(1, 16)]
def test_M23():
x = symbols('x', complex=True)
assert solve(x - 1/sqrt(1 + x**2)) == [
simplify(-I*sqrt((sqrt(5) + 1)/2)),
simplify( sqrt((sqrt(5) - 1)/2)),
]
def test_M24():
solution = solve(1 - binomial(m, 2)*2**k, k)
answer = log(2/(m*(m - 1)), 2)
assert solution[0].expand() == answer.expand()
def test_M25():
a, b, c, d = symbols(':d', positive=True)
x = symbols('x')
assert solve(a*b**x - c*d**x, x)[0].expand() == (log(c/a)/log(b/d)).expand()
def test_M26():
assert solve(sqrt(log(x)) - log(sqrt(x))) == [1, exp(4)]
@XFAIL
def test_M27():
x = symbols('x', real=True)
b = symbols('b', real=True)
with assuming(Q.is_true(sin(cos(1/E**2) + 1) + b > 0)):
solve(log(acos(asin(x**R(2,3) - b) - 1)) + 2, x) == [-b - sin(1 + cos(1/e**2))**R(3/2), b + sin(1 + cos(1/e**2))**R(3/2)]
@XFAIL
def test_M28():
assert solve(5*x + exp((x - 5)/2) - 8*x**3, x, assume=Q.real(x)) == [-0.784966, -0.016291, 0.802557]
def test_M29():
assert solve(abs(x - 1) - 2) == [-1, 3]
@XFAIL
def test_M30():
assert solve(abs(2*x + 5) - abs(x - 2),x, assume=Q.real(x)) == [-1, -7]
@XFAIL
def test_M31():
assert solve(1 - abs(x) - max(-x - 2, x - 2),x, assume=Q.real(x)) == [-3/2, 3/2]
@XFAIL
def test_M32():
assert solve(max(2 - x**2, x)- max(-x, (x**3)/9), assume=Q.real(x)) == [-1, 3]
@XFAIL
def test_M33():
# Second answer can be written in another form. The second answer is the root of x**3 + 9*x**2 - 18 = 0 in the interval (-2, -1).
assert solve(max(2 - x**2, x) - x**3/9, assume=Q.real(x)) == [-3, -1.554894, 3]
@XFAIL
def test_M34():
z = symbols('z', complex=True)
assert solve((1 + I) * z + (2 - I) * conjugate(z) + 3*I, z) == [2 + 3*I]
def test_M35():
x, y = symbols('x y', real=True)
assert solve((3*x - 2*y - I*y + 3*I).as_real_imag()) == {y: 3, x: 2}
@XFAIL
def test_M36():
assert solve(f**2 + f - 2, x) == [Eq(f(x), 1), Eq(f(x), -2)]
def test_M37():
assert solve([x + y + z - 6, 2*x + y + 2*z - 10, x + 3*y + z - 10 ]) == {x: -z + 4, y: 2}
@slow
def test_M38():
variabes = vring("k1:50", vfield("a,b,c", ZZ).to_domain())
system = [
-b*k8/a + c*k8/a, -b*k11/a + c*k11/a, -b*k10/a + c*k10/a + k2, -k3 - b*k9/a + c*k9/a,
-b*k14/a + c*k14/a, -b*k15/a + c*k15/a, -b*k18/a + c*k18/a - k2, -b*k17/a + c*k17/a,
-b*k16/a + c*k16/a + k4, -b*k13/a + c*k13/a - b*k21/a + c*k21/a + b*k5/a - c*k5/a,
b*k44/a - c*k44/a, -b*k45/a + c*k45/a, -b*k20/a + c*k20/a, -b*k44/a + c*k44/a,
b*k46/a - c*k46/a, b**2*k47/a**2 - 2*b*c*k47/a**2 + c**2*k47/a**2, k3, -k4,
-b*k12/a + c*k12/a - a*k6/b + c*k6/b, -b*k19/a + c*k19/a + a*k7/c - b*k7/c,
b*k45/a - c*k45/a, -b*k46/a + c*k46/a, -k48 + c*k48/a + c*k48/b - c**2*k48/(a*b),
-k49 + b*k49/a + b*k49/c - b**2*k49/(a*c), a*k1/b - c*k1/b, a*k4/b - c*k4/b,
a*k3/b - c*k3/b + k9, -k10 + a*k2/b - c*k2/b, a*k7/b - c*k7/b, -k9, k11,
b*k12/a - c*k12/a + a*k6/b - c*k6/b, a*k15/b - c*k15/b, k10 + a*k18/b - c*k18/b,
-k11 + a*k17/b - c*k17/b, a*k16/b - c*k16/b, -a*k13/b + c*k13/b + a*k21/b - c*k21/b + a*k5/b - c*k5/b,
-a*k44/b + c*k44/b, a*k45/b - c*k45/b, a*k14/c - b*k14/c + a*k20/b - c*k20/b,
a*k44/b - c*k44/b, -a*k46/b + c*k46/b, -k47 + c*k47/a + c*k47/b - c**2*k47/(a*b),
a*k19/b - c*k19/b, -a*k45/b + c*k45/b, a*k46/b - c*k46/b, a**2*k48/b**2 - 2*a*c*k48/b**2 + c**2*k48/b**2,
-k49 + a*k49/b + a*k49/c - a**2*k49/(b*c), k16, -k17, -a*k1/c + b*k1/c,
-k16 - a*k4/c + b*k4/c, -a*k3/c + b*k3/c, k18 - a*k2/c + b*k2/c, b*k19/a - c*k19/a - a*k7/c + b*k7/c,
-a*k6/c + b*k6/c, -a*k8/c + b*k8/c, -a*k11/c + b*k11/c + k17, -a*k10/c + b*k10/c - k18,
-a*k9/c + b*k9/c, -a*k14/c + b*k14/c - a*k20/b + c*k20/b, -a*k13/c + b*k13/c + a*k21/c - b*k21/c - a*k5/c + b*k5/c,
a*k44/c - b*k44/c, -a*k45/c + b*k45/c, -a*k44/c + b*k44/c, a*k46/c - b*k46/c,
-k47 + b*k47/a + b*k47/c - b**2*k47/(a*c), -a*k12/c + b*k12/c, a*k45/c - b*k45/c,
-a*k46/c + b*k46/c, -k48 + a*k48/b + a*k48/c - a**2*k48/(b*c),
a**2*k49/c**2 - 2*a*b*k49/c**2 + b**2*k49/c**2, k8, k11, -k15, k10 - k18,
-k17, k9, -k16, -k29, k14 - k32, -k21 + k23 - k31, -k24 - k30, -k35, k44,
-k45, k36, k13 - k23 + k39, -k20 + k38, k25 + k37, b*k26/a - c*k26/a - k34 + k42,
-2*k44, k45, k46, b*k47/a - c*k47/a, k41, k44, -k46, -b*k47/a + c*k47/a,
k12 + k24, -k19 - k25, -a*k27/b + c*k27/b - k33, k45, -k46, -a*k48/b + c*k48/b,
a*k28/c - b*k28/c + k40, -k45, k46, a*k48/b - c*k48/b, a*k49/c - b*k49/c,
-a*k49/c + b*k49/c, -k1, -k4, -k3, k15, k18 - k2, k17, k16, k22, k25 - k7,
k24 + k30, k21 + k23 - k31, k28, -k44, k45, -k30 - k6, k20 + k32, k27 + b*k33/a - c*k33/a,
k44, -k46, -b*k47/a + c*k47/a, -k36, k31 - k39 - k5, -k32 - k38, k19 - k37,
k26 - a*k34/b + c*k34/b - k42, k44, -2*k45, k46, a*k48/b - c*k48/b,
a*k35/c - b*k35/c - k41, -k44, k46, b*k47/a - c*k47/a, -a*k49/c + b*k49/c,
-k40, k45, -k46, -a*k48/b + c*k48/b, a*k49/c - b*k49/c, k1, k4, k3, -k8,
-k11, -k10 + k2, -k9, k37 + k7, -k14 - k38, -k22, -k25 - k37, -k24 + k6,
-k13 - k23 + k39, -k28 + b*k40/a - c*k40/a, k44, -k45, -k27, -k44, k46,
b*k47/a - c*k47/a, k29, k32 + k38, k31 - k39 + k5, -k12 + k30, k35 - a*k41/b + c*k41/b,
-k44, k45, -k26 + k34 + a*k42/c - b*k42/c, k44, k45, -2*k46, -b*k47/a + c*k47/a,
-a*k48/b + c*k48/b, a*k49/c - b*k49/c, k33, -k45, k46, a*k48/b - c*k48/b,
-a*k49/c + b*k49/c
]
solution = {
k49: 0, k48: 0, k47: 0, k46: 0, k45: 0, k44: 0, k41: 0, k40: 0,
k38: 0, k37: 0, k36: 0, k35: 0, k33: 0, k32: 0, k30: 0, k29: 0,
k28: 0, k27: 0, k25: 0, k24: 0, k22: 0, k21: 0, k20: 0, k19: 0,
k18: 0, k17: 0, k16: 0, k15: 0, k14: 0, k13: 0, k12: 0, k11: 0,
k10: 0, k9: 0, k8: 0, k7: 0, k6: 0, k5: 0, k4: 0, k3: 0,
k2: 0, k1: 0,
k34: b/c*k42, k31: k39, k26: a/c*k42, k23: k39
}
assert solve_lin_sys(system, variabes) == solution
def test_M39():
x, y, z = symbols('x y z', complex=True)
assert solve([x**2*y + 3*y*z - 4, -3*x**2*z + 2*y**2 + 1, 2*y*z**2 - z**2 - 1 ]) ==\
[{y: 1, z: 1, x: -1}, {y: 1, z: 1, x: 1},\
{y: sqrt(2)*I, z: R(1,3) - sqrt(2)*I/3, x: -sqrt(-1 - sqrt(2)*I)},\
{y: sqrt(2)*I, z: R(1,3) - sqrt(2)*I/3, x: sqrt(-1 - sqrt(2)*I)},\
{y: -sqrt(2)*I, z: R(1,3) + sqrt(2)*I/3, x: -sqrt(-1 + sqrt(2)*I)},\
{y: -sqrt(2)*I, z: R(1,3) + sqrt(2)*I/3, x: sqrt(-1 + sqrt(2)*I)}]
# N. Inequalities
def test_N1():
assert ask(Q.is_true(E**pi > pi**E))
@XFAIL
def test_N2():
x = symbols('x', real=True)
assert ask(Q.is_true(x**4 - x + 1 > 0))
assert ask(Q.is_true(x**4 - x + 1 > 1)) == False
@XFAIL
def test_N3():
x = symbols('x', real=True)
assert ask(Q.is_true(And(Lt(-1, x), Lt(x, 1))), Q.is_true(abs(x) < 1 ))
@XFAIL
def test_N4():
x, y = symbols('x y', real=True)
assert ask(Q.is_true(2*x**2 > 2*y**2), Q.is_true((x > y) & (y > 0)))
@XFAIL
def test_N5():
x, y, k = symbols('x y k', real=True)
assert ask(Q.is_true(k*x**2 > k*y**2), Q.is_true((x > y) & (y > 0) & (k > 0)))
@XFAIL
def test_N6():
x, y, k, n = symbols('x y k n', real=True)
assert ask(Q.is_true(k*x**n > k*y**n), Q.is_true((x > y) & (y > 0) & (k > 0) & (n > 0)))
@XFAIL
def test_N7():
x, y = symbols('x y', real=True)
assert ask(Q.is_true(y > 0), Q.is_true((x > 1) & (y >= x - 1)))
@XFAIL
def test_N8():
x, y, z = symbols('x y z', real=True)
assert ask(Q.is_true((x == y) & (y == z)), Q.is_true((x >= y) & (y >= z) & (z >= x)))
def test_N9():
with assuming(Q.real(x)):
assert solve(abs(x-1) > 2) == Or(x < -1, x > 3)
def test_N10():
p=(x - 1)*(x - 2)*(x - 3)*(x - 4)*(x - 5)
assert solve(expand(p) < 0, assume=Q.real(x)) == Or( And(Lt(2, x), Lt(x, 3)), And(Lt(4, x), Lt(x, 5)), Lt(x, 1))
def test_N11():
assert solve(6/(x - 3) <= 3, assume=Q.real(x)) == Or(5 <= x, x < 3)
|
|
"""ThreatConnect TI Adversary"""
from ..group import Group
class Adversary(Group):
"""Unique API calls for Adversary API Endpoints
Args:
tcex (TcEx): An instantiated instance of TcEx object.
name (str, kwargs): [Required for Create] The name for this Group.
owner (str, kwargs): The name for this Group. Default to default Org when not provided
"""
def __init__(self, tcex, **kwargs):
"""Initialize Class properties"""
super().__init__(
tcex, sub_type='Adversary', api_entity='adversary', api_branch='adversaries', **kwargs
)
def add_asset(self, asset_type, asset_value):
"""Add an asset to the Adversary
Args:
asset_type: (str) Either phone, handle, or urL
asset_value: (str) the value for the asset
Returns:
requests.Response: The response from the API call.
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
asset_methods = {
'handle': self.tc_requests.add_adversary_handle_asset,
'phone': self.tc_requests.add_adversary_phone_asset,
'url': self.tc_requests.add_adversary_url_asset,
}
# handle invalid input
if asset_methods.get(asset_type.lower()) is None:
self._tcex.handle_error(
925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]
)
return asset_methods.get(asset_type.lower())(self.unique_id, asset_value)
def add_handle_asset(self, value):
"""Add a Handle asset to the adversary.
Args:
value: The value of the asset
Returns:
requests.Response: The response from the API call.
"""
return self.add_asset('HANDLE', value)
def add_phone_asset(self, value):
"""Add a phone asset to the adversary.
Args:
value: The value of the asset
Returns:
requests.Response: The response from the API call.
"""
return self.add_asset('PHONE', value)
def add_url_asset(self, value):
"""Add a URL asset to the adversary.
Args:
value: The value of the asset
Returns:
requests.Response: The response from the API call.
"""
return self.add_asset('URL', value)
def asset(self, asset_id, asset_type, action='GET'):
"""Get specific Adversary asset type from API
Args:
asset_id: (str) The ID of the asset.
asset_type: (str) Either phone, handle, or url.
action: (str): The HTTP method (e.g., DELETE or GET)
Returns:
requests.Response: The response from the API call.
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
asset_methods = {
'handle': self.tc_requests.adversary_handle_asset,
'phone': self.tc_requests.adversary_phone_asset,
'url': self.tc_requests.adversary_url_asset,
}
# handle invalid input
if asset_methods.get(asset_type.lower()) is None:
self._tcex.handle_error(
925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]
)
return asset_methods.get(asset_type.lower())(self.unique_id, asset_id, action=action)
def assets(self, asset_type=None):
"""Retrieve all of the assets of a given asset_type
Args:
asset_type: (str) Either None, PHONE, HANDLE, or URL
Returns:
requests.Response: The response from the API call.
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
asset_methods = {
'handle': self.tc_requests.adversary_handle_assets,
'phone': self.tc_requests.adversary_phone_assets,
'url': self.tc_requests.adversary_url_assets,
}
if asset_type is None:
return self.tc_requests.adversary_assets(self.unique_id)
# handle invalid input
if asset_methods.get(asset_type.lower()) is None:
self._tcex.handle_error(
925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]
)
return asset_methods.get(asset_type.lower())(self.unique_id)
def delete_asset(self, asset_id, asset_type):
"""Delete the asset with the provided asset_id.
Args:
asset_id: The id of the asset.
asset_type: The asset type.
Returns:
requests.Response: The response from the API call.
"""
return self.asset(asset_id, asset_type=asset_type, action='DELETE')
def delete_handle_asset(self, asset_id):
"""Delete the handle asset with the passed in id
Args:
asset_id: The id of the asset to be deleted
Returns:
requests.Response: The response from the API call.
"""
return self.delete_asset(asset_id, 'HANDLE')
def delete_phone_asset(self, asset_id):
"""Delete the phone asset with the passed in id
Args:
asset_id: The id of the asset to be deleted
Returns:
requests.Response: The response from the API call.
"""
return self.delete_asset(asset_id, 'PHONE')
def delete_url_asset(self, asset_id):
"""Delete the url asset with the passed in id
Args:
asset_id: The id of the asset to be deleted
Returns:
requests.Response: The response from the API call.
"""
return self.delete_asset(asset_id, 'URL')
def get_asset(self, asset_id, asset_type):
"""Get the asset with the provided asset_id & asset_type.
Args:
asset_id: (str) The ID of the asset.
asset_type: (str) Either None, PHONE, HANDLE, or URL
Returns:
requests.Response: The response from the API call.
"""
return self.asset(asset_id, asset_type=asset_type)
def get_handle_asset(self, asset_id):
"""Get the handle asset with the passed in id
Args:
asset_id: The id of the asset.
Returns:
requests.Response: The response from the API call.
"""
return self.get_asset(asset_id, 'HANDLE')
def get_phone_asset(self, asset_id):
"""Get the phone asset with the passed in id
Args:
asset_id: The id of the asset.
Returns:
requests.Response: The response from the API call.
"""
return self.get_asset(asset_id, 'PHONE')
def get_url_asset(self, asset_id):
"""Get the url asset with the passed in id
Args:
asset_id: The id of the asset.
Returns:
requests.Response: The response from the API call.
"""
return self.get_asset(asset_id, 'URL')
# def handle_asset(self, asset_id, action='GET'):
# """Get the handle asset with the passed in id.
# Args:
# asset_id: The id of the asset.
# action: (str): The HTTP method (e.g., DELETE or GET)
# Returns:
# requests.Response: The response from the API call.
# """
# return self.asset(asset_id, 'HANDLE', action=action)
def handle_assets(self):
"""Return all of the handle assets"""
return self.assets(asset_type='HANDLE')
# def phone_asset(self, asset_id, action='GET'):
# """Get the phone asset with the passed in id.
# Args:
# asset_id: The id of the asset.
# action: (str): The HTTP method (e.g., DELETE or GET)
# Returns:
# requests.Response: The response from the API call.
# """
# return self.asset(asset_id, 'PHONE', action=action)
def phone_assets(self):
"""Return all of the phone assets"""
return self.assets(asset_type='PHONE')
# def url_asset(self, asset_id, action='GET'):
# """Get the url asset with the passed in id.
# Args:
# asset_id: The id of the asset.
# action: (str): The HTTP method (e.g., DELETE or GET)
# Returns:
# requests.Response: The response from the API call.
# """
# return self.asset(asset_id, 'URL', action=action)
def url_assets(self):
"""Return all of the url assets"""
return self.assets(asset_type='URL')
|
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Lee Clemens Computing Services, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Test Cases related to reading configuration files
"""
import argparse
import os
import random
import unittest
import imageroller.main
import imageroller.test
from imageroller import ConfigError
AUTH_NO_SECTION = """
[DEFAULT]
ApiUser = {ApiUser}
ApiKey = {ApiKey}
"""
AUTH_NO_USER = """
[AUTH]
ApiUserABSENT = {ApiUser}
ApiKey = {ApiKey}
"""
AUTH_BLANK_USER = """
[AUTH]
ApiUser =
ApiKey = {ApiKey}
"""
AUTH_NO_KEY = """
[AUTH]
ApiUser = {ApiUser}
ApiKeyABSENT = {ApiKey}
"""
AUTH_BLANK_KEY = """
[AUTH]
ApiUser = {ApiUser}
ApiKey =
"""
AUTH_VALID = """
[AUTH]
ApiUser = {ApiUser}
ApiKey = {ApiKey}
"""
CONFIG_NO_DEFAULT = """
[DEFAULT_ABSENT]
foo = bar
"""
CONFIG_NO_WORKERS = """
[DEFAULT]
ConcurrentWorkers_ABSENT = {ConcurrentWorkers}
"""
CONFIG_ZERO_WORKERS = """
[DEFAULT]
ConcurrentWorkers = 0
"""
CONFIG_NO_SERVER = """
[DEFAULT]
ConcurrentWorkers = {ConcurrentWorkers}
"""
CONFIG_SERVER_NO_SAVE_TIMEOUT = """
[DEFAULT]
ConcurrentWorkers = {ConcurrentWorkers}
[{TestServerFQDN}]
RetainImageMinutes = {RetainImageMinutes}
Region = {Region}
"""
CONFIG_SERVER_NO_RETAIN_IMAGE = """
[DEFAULT]
ConcurrentWorkers = {ConcurrentWorkers}
[{TestServerFQDN}]
SaveTimeoutMinutes = {SaveTimeoutMinutes}
Region = {Region}
"""
CONFIG_SERVER_NO_REGION = """
[DEFAULT]
ConcurrentWorkers = {ConcurrentWorkers}
[{TestServerFQDN}]
SaveTimeoutMinutes = {SaveTimeoutMinutes}
RetainImageMinutes = {RetainImageMinutes}
"""
CONFIG_SERVER_VALID_MINIMAL = """
[DEFAULT]
ConcurrentWorkers = {ConcurrentWorkers}
SaveTimeoutMinutes = {SaveTimeoutMinutes}
RetainImageMinutes = {RetainImageMinutes}
[{TestServerFQDN}]
Enabled = True
Region = {Region}
"""
CONFIG_SERVER_VALID_OVERRIDE = """
[DEFAULT]
ConcurrentWorkers = {ConcurrentWorkers}
SaveTimeoutMinutes = {SaveTimeoutMinutes}
RetainImageMinutes = {RetainImageMinutes}
Region = {Region}
[{OverrideNotExistFQDN}]
ConcurrentWorkers = {OverrideConcurrentWorkers}
[{OverrideWorkersFQDN}]
Enabled = True
ConcurrentWorkers = {OverrideConcurrentWorkers}
[{OverrideSaveTimeoutFQDN}]
Enabled = True
SaveTimeoutMinutes = {OverrideSaveTimeoutMinutes}
[{OverrideRetainImageFQDN}]
Enabled = True
RetainImageMinutes = {OverrideRetainImageMinutes}
[{OverrideRegionFQDN}]
Enabled = True
Region = {OverrideRegion}
"""
AUTH_DATA = {
"ApiUser": "TestRAXUsername",
"ApiKey": imageroller.test.generate_api_key()}
CONFIG_DATA = {
"ConcurrentWorkers": random.randint(4, 32),
"TestServerFQDN": "test.example.com",
"SaveTimeoutMinutes": 60,
"RetainImageMinutes": 120,
"Region": "DFW",
# Overridden values for test_server_override()
"OverrideConcurrentWorkers": 3,
"OverrideSaveTimeoutMinutes": 20,
"OverrideRetainImageMinutes": 45,
"OverrideRegion": "IAD",
"OverrideNotExistFQDN": "not.exist.example.com",
"OverrideWorkersFQDN": "workers.example.com",
"OverrideSaveTimeoutFQDN": "save.example.com",
"OverrideRetainImageFQDN": "retain.example.time",
"OverrideRegionFQDN": "region.example.time",
}
class ReadConfigsTestCase(unittest.TestCase):
"""Test Case calling parent function for reading configs
Specific test cases are handled by more specific test cases
"""
@classmethod
def setUpClass(cls):
"""Gets temp file paths for our config files
"""
cls._config = imageroller.test.write_config(
"config", CONFIG_SERVER_VALID_MINIMAL, CONFIG_DATA)
cls._auth = imageroller.test.write_config(
"auth", AUTH_VALID, AUTH_DATA)
@classmethod
def tearDownClass(cls):
"""Cleans up our test config files
"""
os.remove(cls._config)
os.remove(cls._auth)
def test_read_valid_configs(self):
"""Test main call to read_configs() which returns both config and auth
"""
args = argparse.Namespace(server=None, force=False)
with open(self._config) as config_f:
with open(self._auth) as auth_config_f:
(config_data, auth_tuple) = imageroller.main.read_configs(
args,
config_f,
auth_config_f)
self.assertEqual(config_data.concurrent_workers,
CONFIG_DATA["ConcurrentWorkers"])
self.assertEqual(len(config_data.server_data), 1)
self.assertTupleEqual(auth_tuple, (AUTH_DATA["ApiUser"],
AUTH_DATA["ApiKey"]))
class ServerConfigTestCase(unittest.TestCase):
"""Test Case related to reading and parsing the Auth Config
"""
@classmethod
def setUpClass(cls):
"""Gets temp file paths for our config files
"""
cls._no_default = imageroller.test.write_config(
"config", CONFIG_NO_DEFAULT, CONFIG_DATA)
cls._no_workers = imageroller.test.write_config(
"config", CONFIG_NO_WORKERS, CONFIG_DATA)
cls._zero_workers = imageroller.test.write_config(
"config", CONFIG_ZERO_WORKERS, CONFIG_DATA)
cls._no_server = imageroller.test.write_config(
"config", CONFIG_NO_SERVER, CONFIG_DATA)
cls._server_no_save_timeout = imageroller.test.write_config(
"config", CONFIG_SERVER_NO_SAVE_TIMEOUT, CONFIG_DATA)
cls._server_no_retain_image = imageroller.test.write_config(
"config", CONFIG_SERVER_NO_RETAIN_IMAGE, CONFIG_DATA)
cls._server_no_region = imageroller.test.write_config(
"config", CONFIG_SERVER_NO_REGION, CONFIG_DATA)
cls._server_valid_minimal = imageroller.test.write_config(
"config", CONFIG_SERVER_VALID_MINIMAL, CONFIG_DATA)
cls._server_valid_override = imageroller.test.write_config(
"config", CONFIG_SERVER_VALID_OVERRIDE, CONFIG_DATA)
@classmethod
def tearDownClass(cls):
"""Cleans up our test config files
"""
os.remove(cls._no_default)
os.remove(cls._no_workers)
os.remove(cls._zero_workers)
os.remove(cls._no_server)
os.remove(cls._server_no_save_timeout)
os.remove(cls._server_no_retain_image)
os.remove(cls._server_no_region)
os.remove(cls._server_valid_minimal)
os.remove(cls._server_valid_override)
def setUp(self):
# Our test command-line args (functions may safely alter values)
self._cmd_args = argparse.Namespace(server=None, force=False)
def test_no_default(self):
"""Test config with no [DEFAULT] section
Subsequently, the ConcurrentWorkers will not be defined
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(self._no_default))
# ConcurrentWorkers is the first value that is checked
self.assertEqual(str(cm.exception),
"Config must contain ConcurrentWorkers")
def test_no_workers(self):
"""Test config with no ConcurrentWorkers key
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(self._no_workers))
self.assertEqual(str(cm.exception),
"Config must contain ConcurrentWorkers")
def test_zero_workers(self):
"""Test config with ConcurrentWorkers = 0
"""
with self.assertRaises(ValueError) as cm:
imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(self._zero_workers))
self.assertEqual(str(cm.exception),
"Concurrent workers must be greater than 0")
def test_no_server(self):
"""Test config with no server sections
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(self._no_server))
self.assertEqual(str(cm.exception),
"You must configure at least one server")
def test_no_server_cmdline(self):
"""Test config with no server sections - cmdline
Server is specified on the command line that is not configured
"""
invalid_server = "invalid.example.com"
self._cmd_args.server = invalid_server
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(self._no_server))
self.assertEqual(
str(cm.exception),
"The specified server is not configured: %s" % invalid_server)
def test_server_no_save_timeout(self):
"""Test server config with no SaveTimeoutMinutes
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_config(self._cmd_args,
imageroller.test.get_config_parser(
self._server_no_save_timeout))
self.assertEqual(
str(cm.exception),
"Server Config for %s is missing SaveTimeoutMinutes" %
CONFIG_DATA["TestServerFQDN"])
def test_server_no_retain_image(self):
"""Test server config with no RetainImageMinutes
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_config(self._cmd_args,
imageroller.test.get_config_parser(
self._server_no_retain_image))
self.assertEqual(
str(cm.exception),
"Server Config for %s is missing RetainImageMinutes" %
CONFIG_DATA["TestServerFQDN"])
def test_server_no_region(self):
"""Test server config with no Region
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_config(self._cmd_args,
imageroller.test.get_config_parser(
self._server_no_region))
self.assertEqual(
str(cm.exception),
"Server Config for %s is missing Region" %
CONFIG_DATA["TestServerFQDN"])
def test_server_valid_minimal(self):
"""Test config with minimal configs
Also tests auto_enable property being set properly
"""
config_data = imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(self._server_valid_minimal))
self.assertEqual(config_data.concurrent_workers,
CONFIG_DATA["ConcurrentWorkers"])
self.assertEqual(len(config_data.server_data), 1)
# Test minutes -> seconds in property getters
# pylint: disable=not-an-iterable
for server_data in config_data.server_data:
self.assertEqual(server_data.save_timeout_seconds,
int(CONFIG_DATA["SaveTimeoutMinutes"]) * 60)
self.assertEqual(server_data.retain_image_seconds,
int(CONFIG_DATA["RetainImageMinutes"]) * 60)
self.assertFalse(server_data.auto_enable)
def test_server_valid_cmdline(self):
"""Test config with minimal configs, but specified on command-line
Also tests auto_enable property being set properly
"""
self._cmd_args.server = CONFIG_DATA["TestServerFQDN"]
config_data = imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(self._server_valid_minimal))
# pylint: disable=not-an-iterable
for server_data in config_data.server_data:
self.assertEqual(server_data.save_timeout_seconds,
int(CONFIG_DATA["SaveTimeoutMinutes"]) * 60)
self.assertEqual(server_data.retain_image_seconds,
int(CONFIG_DATA["RetainImageMinutes"]) * 60)
self.assertTrue(server_data.auto_enable)
# pylint: disable=not-an-iterable
def test_server_override_general(self):
"""Test that config values are overridden properly
"""
# Sanity check our override values do not overlap
self.assertNotEqual(CONFIG_DATA["ConcurrentWorkers"],
CONFIG_DATA["OverrideConcurrentWorkers"])
self.assertNotEqual(CONFIG_DATA["SaveTimeoutMinutes"],
CONFIG_DATA["OverrideSaveTimeoutMinutes"])
self.assertNotEqual(CONFIG_DATA["RetainImageMinutes"],
CONFIG_DATA["OverrideRetainImageMinutes"])
self.assertNotEqual(CONFIG_DATA["Region"],
CONFIG_DATA["OverrideRegion"])
config_data = imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(
self._server_valid_override))
# Verify default disabled server is not included
self.assertNotIn(
CONFIG_DATA["OverrideNotExistFQDN"],
[server_data.name for server_data in
config_data.server_data])
# Sanity check we have every server's config we expect to have
self.assertSetEqual(
set([server_data.name for server_data in
config_data.server_data]),
{CONFIG_DATA["OverrideWorkersFQDN"],
CONFIG_DATA["OverrideSaveTimeoutFQDN"],
CONFIG_DATA["OverrideRetainImageFQDN"],
CONFIG_DATA["OverrideRegionFQDN"]},
)
# Smoke test they are all enabled
self.assertTrue(all([server_data.enabled
for server_data in
config_data.server_data]))
# pylint: disable=not-an-iterable
def test_server_override_workers(self):
"""Test that config values are overridden properly
"""
# Sanity check our override values do not overlap
self.assertNotEqual(CONFIG_DATA["ConcurrentWorkers"],
CONFIG_DATA["OverrideConcurrentWorkers"])
config_data = imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(self._server_valid_override))
# ConcurrentWorkers is required to be set in [DEFAULT] (globally)
# Smoke test we can't override it in the first enabled server config
self.assertEqual(config_data.concurrent_workers,
CONFIG_DATA["ConcurrentWorkers"])
# pylint: disable=not-an-iterable, invalid-name
def test_server_override_save_timeout(self):
"""Test that config values are overridden properly
"""
# Sanity check our override values do not overlap
self.assertNotEqual(CONFIG_DATA["SaveTimeoutMinutes"],
CONFIG_DATA["OverrideSaveTimeoutMinutes"])
config_data = imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(self._server_valid_override))
# Test Save Timeout Minutes was overridden
self.assertEqual(
CONFIG_DATA["OverrideSaveTimeoutMinutes"],
[server_data.save_timeout_minutes
for server_data in config_data.server_data
if server_data.name ==
CONFIG_DATA["OverrideSaveTimeoutFQDN"]]
[0])
# pylint: disable=not-an-iterable
def test_server_override_retain(self):
"""Test that config values are overridden properly
"""
# Sanity check our override values do not overlap
self.assertNotEqual(CONFIG_DATA["RetainImageMinutes"],
CONFIG_DATA["OverrideRetainImageMinutes"])
config_data = imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(
self._server_valid_override))
# Test Retain Image Minutes was overridden
self.assertEqual(
CONFIG_DATA["OverrideRetainImageMinutes"],
[server_data.retain_image_minutes
for server_data in config_data.server_data
if server_data.name ==
CONFIG_DATA["OverrideRetainImageFQDN"]]
[0])
# pylint: disable=not-an-iterable
def test_server_override_region(self):
"""Test that config values are overridden properly
"""
# Sanity check our override values do not overlap
self.assertNotEqual(CONFIG_DATA["Region"],
CONFIG_DATA["OverrideRegion"])
config_data = imageroller.main.read_config(
self._cmd_args,
imageroller.test.get_config_parser(
self._server_valid_override))
# Test Region was overridden
self.assertEqual(
CONFIG_DATA["OverrideRegion"],
[server_data.region
for server_data in config_data.server_data
if server_data.name ==
CONFIG_DATA["OverrideRegionFQDN"]]
[0])
class AuthConfigTestCase(unittest.TestCase):
"""Test Case related to reading and parsing the Auth Config
"""
@classmethod
def setUpClass(cls):
"""Gets temp file paths for our config files
"""
cls._no_section = imageroller.test.write_config(
"auth", AUTH_NO_SECTION, AUTH_DATA)
cls._no_user = imageroller.test.write_config(
"auth", AUTH_NO_USER, AUTH_DATA)
cls._blank_user = imageroller.test.write_config(
"auth", AUTH_BLANK_USER, AUTH_DATA)
cls._no_key = imageroller.test.write_config(
"auth", AUTH_NO_KEY, AUTH_DATA)
cls._blank_key = imageroller.test.write_config(
"auth", AUTH_BLANK_KEY, AUTH_DATA)
cls._valid = imageroller.test.write_config(
"auth", AUTH_VALID, AUTH_DATA)
@classmethod
def tearDownClass(cls):
"""Cleans up our test config files
"""
os.remove(cls._no_section)
os.remove(cls._no_user)
os.remove(cls._blank_user)
os.remove(cls._no_key)
os.remove(cls._blank_key)
os.remove(cls._valid)
def test_no_section(self):
"""Test auth config with no [AUTH] section
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_authconfig(
imageroller.test.get_config_parser(self._no_section))
self.assertEqual(str(cm.exception), "AuthConfig must contain [AUTH]")
def test_no_user(self):
"""Test auth config with no ApiUser key
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_authconfig(
imageroller.test.get_config_parser(self._no_user))
self.assertEqual(str(cm.exception), "AuthConfig must contain ApiUser")
def test_blank_user(self):
"""Test auth config with a blank user
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_authconfig(
imageroller.test.get_config_parser(self._blank_user))
self.assertEqual(str(cm.exception), "AuthConfig must contain ApiUser")
def test_no_key(self):
"""Test auth config with no ApiKey key
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_authconfig(
imageroller.test.get_config_parser(self._no_key))
self.assertEqual(str(cm.exception), "AuthConfig must contain ApiKey")
def test_blank_key(self):
"""Test auth config with no a blank key
"""
with self.assertRaises(ConfigError) as cm:
imageroller.main.read_authconfig(
imageroller.test.get_config_parser(self._blank_key))
self.assertEqual(str(cm.exception), "AuthConfig must contain ApiKey")
def test_valid(self):
"""Test reading the correct values from a valid auth config
"""
auth_tuple = imageroller.main.read_authconfig(
imageroller.test.get_config_parser(self._valid))
self.assertTupleEqual(auth_tuple, (AUTH_DATA["ApiUser"],
AUTH_DATA["ApiKey"]))
|
|
import unittest
from craft_ai import Client, errors as craft_err
from . import settings
from .utils import generate_entity_id
from .data import valid_data, invalid_data
NB_AGENTS_TO_CREATE = 5
class TestCreateAgentsBulkSuccess(unittest.TestCase):
"""Checks that the client succeeds when creating
an/multiple agent(s) with OK input"""
@classmethod
def setUpClass(cls):
cls.client = Client(settings.CRAFT_CFG)
cls.agent_id1 = generate_entity_id("test_create_agents_bulk")
cls.agent_id2 = generate_entity_id("test_create_agents_bulk")
cls.agent_name = generate_entity_id("test_create_agents_bulk")
@classmethod
def tearDownClass(cls):
for agent_id in cls.client.list_agents():
try:
cls.client.delete_agent(agent_id)
except craft_err.CraftAiError:
continue
def setUp(self):
# Makes sure that no agent with the same ID already exists
resp1 = self.client.delete_agent(self.agent_id1)
resp2 = self.client.delete_agent(self.agent_id2)
self.assertIsInstance(resp1, dict)
self.assertIsInstance(resp2, dict)
def clean_up_agent(self, aid):
# Makes sure that no agent with the standard ID remains
try:
self.client.delete_agent(aid)
except craft_err.CraftAiError:
return
def clean_up_agents(self, aids):
# Makes sure that no agent with the standard ID remains
for aid in aids:
self.clean_up_agent(aid)
def test_create_one_agent_generated_agent_id(self):
"""create_agents_bulk should succeed when given an empty `id` field.
It should give a proper JSON response with a list containing a dict with `id` and
`configuration` fields being strings.
"""
payload = [{"configuration": valid_data.VALID_CONFIGURATION}]
resp = self.client.create_agents_bulk(payload)
self.assertIsInstance(resp[0].get("id"), str)
self.addCleanup(self.clean_up_agent, resp[0].get("id"))
def test_create_multiple_agents_generated_agent_id(self):
"""create_agents_bulk should succeed when given agents to create with empty `id` field.
It should give a proper JSON response with a list containing dicts with `id` and
`configuration` fields being strings.
"""
payload = [
{"configuration": valid_data.VALID_CONFIGURATION},
{"configuration": valid_data.VALID_CONFIGURATION},
]
resp = self.client.create_agents_bulk(payload)
self.assertIsInstance(resp[0].get("id"), str)
self.assertIsInstance(resp[1].get("id"), str)
self.addCleanup(self.clean_up_agents, [resp[0].get("id"), resp[1].get("id")])
def test_create_one_agent_given_agent_id(self):
"""create_agents_bulk should succeed when given a valid string in the `id` field.
It should give a proper JSON response with a list containing a dict with `id` and
`configuration` fields being strings and `id` being the same as the one given as
a parameter.
"""
payload = [
{"id": self.agent_id1, "configuration": valid_data.VALID_CONFIGURATION}
]
resp = self.client.create_agents_bulk(payload)
self.assertEqual(resp[0].get("id"), self.agent_id1)
self.addCleanup(self.clean_up_agent, self.agent_id1)
def test_create_multiple_agents_given_agent_id(self):
"""create_agents_bulk should succeed when given valid strings in the `id` field.
It should give a proper JSON response with a list containing dicts with `id` and
`configuration` fields being strings and the `id`s being the same as the ones given
as parameters.
"""
payload = [
{"id": self.agent_id1, "configuration": valid_data.VALID_CONFIGURATION},
{"id": self.agent_id2, "configuration": valid_data.VALID_CONFIGURATION},
]
resp = self.client.create_agents_bulk(payload)
self.assertEqual(resp[0].get("id"), self.agent_id1)
self.assertEqual(resp[1].get("id"), self.agent_id2)
self.addCleanup(self.clean_up_agents, [resp[0].get("id"), resp[1].get("id")])
def test_create_agents_bulk_id_given_and_generated(self):
"""create_agents_bulk should succeed when given some agents with string `id` and some
with empty `id` field.
It should give a proper JSON response with a list containing dicts with `id` and
`configuration` fields being strings and if the `id` was given as a parameter, `id`
should be the same as the one given as a parameter.
"""
payload = [
{"id": self.agent_id1, "configuration": valid_data.VALID_CONFIGURATION},
{"configuration": valid_data.VALID_CONFIGURATION},
]
resp = self.client.create_agents_bulk(payload)
self.assertEqual(resp[0].get("id"), self.agent_id1)
self.assertIsInstance(resp[1].get("id"), str)
self.addCleanup(self.clean_up_agents, [resp[0].get("id"), resp[1].get("id")])
def test_create_lot_of_agents_bulk(self):
"""create_agents_bulk should succeed when given a lot of agents to create.
It should give a proper JSON response with a list containing dicts
with `id` and `configuration` fields being strings and the first `id` being the
same as the one given as a parameter.
"""
payload = []
agents_lst = []
for i in range(NB_AGENTS_TO_CREATE):
new_agent_id = generate_entity_id("test_create_lot_of_agents_bulk")
self.client.delete_agent(new_agent_id)
payload.append(
{"id": new_agent_id, "configuration": valid_data.VALID_CONFIGURATION}
)
agents_lst.append(new_agent_id)
response = self.client.create_agents_bulk(payload)
for i, resp in enumerate(response):
self.assertEqual(resp.get("id"), agents_lst[i])
self.assertFalse("error" in resp)
self.addCleanup(self.clean_up_agents, agents_lst)
class TestCreateAgentsBulkFailure(unittest.TestCase):
"""Checks that the client fails when creating
an/multiple agent(s) with bad input"""
@classmethod
def setUpClass(cls):
cls.client = Client(settings.CRAFT_CFG)
def setUp(self):
self.agent_id1 = generate_entity_id("test_create_agents_bulk_failure")
self.agent_id2 = generate_entity_id("test_create_agents_bulk_failure")
self.agent_name = generate_entity_id("test_create_agents_bulk_failure")
# Makes sure that no agent with the same ID already exists
resp1 = self.client.delete_agent(self.agent_id1)
resp2 = self.client.delete_agent(self.agent_id2)
self.assertIsInstance(resp1, dict)
self.assertIsInstance(resp2, dict)
def tearDown(self):
# This ensures that agents are properly deleted every time
self.client.delete_agent(self.agent_id1)
self.client.delete_agent(self.agent_id2)
self.client.delete_agent(self.agent_name)
def clean_up_agent(self, aid):
# Makes sure that no agent with the standard ID remains
self.client.delete_agent(aid)
def clean_up_agents(self, aids):
# Makes sure that no agent with the standard ID remains
for aid in aids:
self.clean_up_agent(aid)
def test_create_agents_bulk_with_existing_agent_id(self):
"""create_agents_bulk should fail when given only IDs that already exist.
It should raise an error upon request for creation of a bulk of agents with IDs
that already exist, since agent IDs should always be unique.
"""
# Calling create_agents_bulk a first time
payload = [
{"id": self.agent_id1, "configuration": valid_data.VALID_CONFIGURATION},
{"id": self.agent_id2, "configuration": valid_data.VALID_CONFIGURATION},
]
self.client.create_agents_bulk(payload)
self.assertRaises(
craft_err.CraftAiBadRequestError, self.client.create_agents_bulk, payload
)
self.addCleanup(self.clean_up_agents, [self.agent_id1, self.agent_id2])
def test_create_agents_bulk_with_invalid_agent_id(self):
"""create_agents_bulk should fail when all agent IDs are invalid.
It should raise an error upon request for creation of all agents with invalid id.
"""
payload = [
{"id": "toto/tutu", "configuration": valid_data.VALID_CONFIGURATION},
{"id": "toto@tutu", "configuration": valid_data.VALID_CONFIGURATION},
]
self.assertRaises(
craft_err.CraftAiBadRequestError, self.client.create_agents_bulk, payload
)
def test_create_agents_with_an_empty_payload(self):
"""create_agents_bulk should fail when given payload is empty.
It should raise an error of invalid given payload.
"""
payload = []
self.assertRaises(
craft_err.CraftAiBadRequestError, self.client.create_agents_bulk, payload
)
def test_create_agents_bulk_with_invalid_context(self):
"""create_agents_bulk should fail when all agent contexts are invalid or the `context`
field doesn't exist.
It should raise an error upon request for creation of all agents with invalid context.
"""
payload = []
agents_lst = []
# Add all the invalid context to check
for i, invalid_context in enumerate(invalid_data.INVALID_CONTEXTS):
new_agent_id = generate_entity_id(
"test_create_agents_bulk_with_invalid_context" + str(i)
)
invalid_configuration = {
"context": invalid_data.INVALID_CONTEXTS[invalid_context],
"output": ["lightbulbColor"],
"time_quantum": 100,
}
self.client.delete_agent(new_agent_id)
payload.append({"id": new_agent_id, "configuration": invalid_configuration})
agents_lst.append(new_agent_id)
# Add an agent with no context field
new_agent_id = self.agent_name.format(len(agents_lst))
self.client.delete_agent(new_agent_id)
invalid_configuration = {"output": ["lightbulbColor"], "time_quantum": 100}
payload.append({"id": new_agent_id, "configuration": invalid_configuration})
agents_lst.append(new_agent_id)
self.assertRaises(
craft_err.CraftAiBadRequestError, self.client.create_agents_bulk, payload
)
self.addCleanup(self.clean_up_agents, agents_lst)
def test_create_agents_bulk_undefined_config(self):
"""create_agents_bulk should fail when the configuration is undefined or the
`configuration` field doesn't exist.
It should raise an error upon request for creation of all agents with no
configuration key in the request body, since it is a mandatory field to
create an agent.
"""
payload = []
agents_lst = []
# Add all the invalid context to check
for i, empty_configuration in enumerate(invalid_data.UNDEFINED_KEY):
new_agent_id = generate_entity_id(
"test_create_agents_bulk_undef_conf_" + str(i)
)
self.client.delete_agent(new_agent_id)
payload.append(
{
"id": new_agent_id,
"configuration": invalid_data.UNDEFINED_KEY[empty_configuration],
}
)
agents_lst.append(new_agent_id)
# Add agent with no configuration
new_agent_id = self.agent_name.format(len(agents_lst))
self.client.delete_agent(new_agent_id)
payload.append({"id": new_agent_id})
agents_lst.append(new_agent_id)
self.assertRaises(
craft_err.CraftAiBadRequestError, self.client.create_agents_bulk, payload
)
self.addCleanup(self.clean_up_agents, agents_lst)
def test_create_agents_bulk_invalid_time_quantum(self):
"""create_agents_bulk should fail when given invalid time quantums.
It should raise an error upon request for creation of all agent with incorrect time
quantum in the configuration, since it is essential to perform any action with craft
ai.
"""
payload = []
agents_lst = []
# Add all the invalid time quantum to check
for i, inv_tq in enumerate(invalid_data.INVALID_TIME_QUANTA):
new_agent_id = generate_entity_id(
"test_create_agents_bulk_invalid_time_quantum"
)
invalid_configuration = {
"context": valid_data.VALID_CONTEXT,
"output": valid_data.VALID_OUTPUT,
"time_quantum": invalid_data.INVALID_TIME_QUANTA[inv_tq],
}
self.client.delete_agent(new_agent_id)
payload.append({"id": new_agent_id, "configuration": invalid_configuration})
agents_lst.append(new_agent_id)
self.assertRaises(
craft_err.CraftAiBadRequestError, self.client.create_agents_bulk, payload
)
self.addCleanup(self.clean_up_agents, agents_lst)
class TestCreateAgentsBulkSomeFailure(unittest.TestCase):
"""Checks that the client succeed when creating an/multiple agent(s)
with bad input and an/multiple agent(s) with valid input"""
@classmethod
def setUpClass(cls):
cls.client = Client(settings.CRAFT_CFG)
@classmethod
def tearDownClass(cls):
for agent_id in cls.client.list_agents():
try:
cls.client.delete_agent(agent_id)
except craft_err.CraftAiError:
continue
def setUp(self):
self.agent_id = generate_entity_id("test_create_agents_bulk_SomeFail")
self.agent_name = generate_entity_id("test_create_agents_bulk_SomeFail")
# Makes sure that no agent with the same ID already exists
resp = self.client.delete_agent(self.agent_id)
self.assertIsInstance(resp, dict)
def clean_up_agent(self, aid):
# Makes sure that no agent with the standard ID remains
self.client.delete_agent(aid)
def clean_up_agents(self, aids):
# Makes sure that no agent with the standard ID remains
for aid in aids:
try:
self.clean_up_agent(aid)
except craft_err.CraftAiError:
continue
def test_create_some_agents_with_existing_agent_id(self):
"""create_agents_bulk should succeed when some of the ID given already exist
and the others doesn't.
It should give a proper JSON response with a list containing dicts.
The ones having existing IDs have the `error` field being a CraftAiBadRequestError.
The ones having valid IDs have `configuration` field being strings.
In either case they should have 'id' being the same as the one given as a parameter.
"""
payload = [
{"id": self.agent_id, "configuration": valid_data.VALID_CONFIGURATION},
{"configuration": valid_data.VALID_CONFIGURATION},
]
resp1 = self.client.create_agents_bulk(payload)
resp2 = self.client.create_agents_bulk(payload)
self.assertEqual(resp2[0].get("id"), self.agent_id)
self.assertIsInstance(resp2[0].get("error"), craft_err.CraftAiBadRequestError)
self.assertFalse("configuration" in resp2[0])
self.assertIsInstance(resp1[1].get("id"), str)
self.assertTrue("configuration" in resp1[1])
self.assertIsInstance(resp2[1].get("id"), str)
self.assertTrue("configuration" in resp2[1])
self.addCleanup(
self.clean_up_agents,
[self.agent_id, resp1[1].get("id"), resp2[1].get("id")],
)
def test_create_some_agents_with_invalid_agent_id(self):
"""create_agents_bulk should succeed when some of the ID given are invalid
and the others are valid.
It should give a proper JSON response with a list containing dicts.
The ones having invalid IDs have the `error` field being a CraftAiBadRequestError.
The ones having valid IDs have `configuration` field being strings.
In either case they should have 'id' being the same as the one given as a parameter.
"""
payload = [
{"id": "toto/tutu", "configuration": valid_data.VALID_CONFIGURATION},
{"id": self.agent_id, "configuration": valid_data.VALID_CONFIGURATION},
]
resp = self.client.create_agents_bulk(payload)
self.assertEqual(resp[0].get("id"), "toto/tutu")
self.assertIsInstance(resp[0].get("error"), craft_err.CraftAiBadRequestError)
self.assertFalse("configuration" in resp[0])
self.assertEqual(resp[1].get("id"), self.agent_id)
self.assertTrue("configuration" in resp[1])
self.addCleanup(self.clean_up_agent, self.agent_id)
def test_create_same_agents_in_bulk(self):
"""create_agents_bulk should succeed when agents in a bulk have the same ID given.
It should give a proper JSON response with a list containing two dicts.
The first one should have 'id' being the same as the one given as a parameter,
and the `configuration` field being strings.
The second one should have `id` being the same as the one given as a parameter
'error' field being a CraftAiBadRequestError.
"""
# Calling create_agents_bulk a first time
payload = [
{"id": self.agent_id, "configuration": valid_data.VALID_CONFIGURATION},
{"id": self.agent_id, "configuration": valid_data.VALID_CONFIGURATION},
]
resp = self.client.create_agents_bulk(payload)
self.assertEqual(resp[0].get("id"), self.agent_id)
self.assertEqual(resp[1].get("id"), self.agent_id)
self.assertTrue("configuration" in resp[0] or "configuration" in resp[1])
if "configuration" in resp[0]:
self.assertIsInstance(
resp[1].get("error"), craft_err.CraftAiBadRequestError
)
elif "configuration" in resp[1]:
self.assertIsInstance(
resp[0].get("error"), craft_err.CraftAiBadRequestError
)
self.addCleanup(self.clean_up_agent, self.agent_id)
def test_create_some_agents_bulk_invalid_context(self):
"""create_agents_bulk should succeed with some agents with invalid context
and some with valid context.
It should give a proper JSON response with a list containing dicts.
The ones having invalid context have the `error` field being a CraftAiBadRequestError.
The ones having valid ids have the `id` field being string and 'configuration' field
being a dict.
"""
# Add valid agent with a valid configuration
payload = [
{"id": self.agent_id, "configuration": valid_data.VALID_CONFIGURATION}
]
agents_lst = [self.agent_id]
# Add all the invalid context to check
for i, invalid_context in enumerate(invalid_data.INVALID_CONTEXTS):
new_agent_id = generate_entity_id(
"test_create_some_agents_bulk_invalid_context"
)
invalid_configuration = {
"context": invalid_data.INVALID_CONTEXTS[invalid_context],
"output": ["lightbulbColor"],
"time_quantum": 100,
}
self.client.delete_agent(new_agent_id)
payload.append({"id": new_agent_id, "configuration": invalid_configuration})
agents_lst.append(new_agent_id)
# Add an agent with no context field
new_agent_id = self.agent_name.format(len(agents_lst))
self.client.delete_agent(new_agent_id)
invalid_configuration = {"output": ["lightbulbColor"], "time_quantum": 100}
resp = self.client.create_agents_bulk(payload)
self.assertEqual(resp[0].get("id"), self.agent_id)
self.assertTrue("configuration" in resp[0])
self.assertFalse("error" in resp[0])
for i in range(1, len(resp)):
self.assertEqual(resp[i].get("id"), agents_lst[i])
self.assertTrue("error" in resp[i])
self.assertFalse("configuration" in resp[i])
self.addCleanup(self.clean_up_agents, agents_lst)
def test_create_some_agents_undef_config(self):
"""create_agents_bulk should succeed with some agents with undefined configuration
and some with valid configuration.
It should give a proper JSON response with a list containing dicts.
The ones having invalid configuration have the `error` field being a CraftAiBadRequestError.
The ones having valid ids have the `id` field being string and 'configuration' field
being a dict.
The valid ones should have `id` and `configuration` fields being strings.
The invalid ones should have 'id' and 'error' fields.
"""
# Add valid agent with a valid configuration
payload = [
{"id": self.agent_id, "configuration": valid_data.VALID_CONFIGURATION}
]
agents_lst = [self.agent_id]
# Add all the invalid configuration to check
for i, empty_configuration in enumerate(invalid_data.UNDEFINED_KEY):
new_agent_id = generate_entity_id(
"test_create_some_agents_undef_config" + str(i)
)
self.client.delete_agent(new_agent_id)
payload.append(
{
"id": new_agent_id,
"configuration": invalid_data.UNDEFINED_KEY[empty_configuration],
}
)
agents_lst.append(new_agent_id)
# Add agent with no configuration
new_agent_id = self.agent_name.format(len(agents_lst))
self.client.delete_agent(new_agent_id)
payload.append({"id": new_agent_id})
agents_lst.append(new_agent_id)
self.assertRaises(
craft_err.CraftAiBadRequestError, self.client.create_agents_bulk, payload,
)
def test_create_some_agents_inval_time_quant(self):
"""create_agents_bulk should succeed with some agents with invalid time quantum
in the configuration and some with valid configuration.
It should give a proper JSON response with a list containing dicts.
The ones having invalid time quantum have the `error` field being a CraftAiBadRequestError.
The ones having valid ids have the `id` field being string and 'configuration' field
being a dict.
"""
# Add invalid configuration with invalid time quantum
new_agent_id = generate_entity_id("test_create_some_agents_inval_time_quant")
invalid_configuration = {
"context": valid_data.VALID_CONTEXT,
"output": valid_data.VALID_OUTPUT,
"time_quantum": invalid_data.INVALID_TIME_QUANTA["negative_tq"],
}
payload = [{"id": new_agent_id, "configuration": invalid_configuration}]
self.assertRaises(
craft_err.CraftAiBadRequestError, self.client.create_agents_bulk, payload,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.