gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
"""
eve.utils
~~~~~~~~~
Utility functions and classes.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import eve
import hashlib
from flask import request
from flask import current_app as app
from datetime import datetime, timedelta
from bson.json_util import dumps
import werkzeug.exceptions
class Config(object):
""" Helper class used trorough the code to access configuration settings.
If the main flaskapp object is not instantiated yet, returns the default
setting in the eve __init__.py module, otherwise returns the flaskapp
config value (which value might override the static defaults).
"""
def __getattr__(self, name):
try:
# will return 'working outside of application context' if the
# current_app is not available yet
return app.config.get(name)
except:
# fallback to the module-level default value
return getattr(eve, name)
# makes an instance of the Config helper class available to all the modules
# importing eve.utils.
config = Config()
class ParsedRequest(object):
""" This class, by means of its attributes, describes a client request.
.. versonchanged:: 0.1.0
'embedded' keyword.
.. versionchanged:: 0.0.6
Projection queries ('?projection={"name": 1}')
"""
# `where` value of the query string (?where). Defaults to None.
where = None
# `projection` value of the query string (?projection). Defaults to None.
projection = None
# `sort` value of the query string (?sort). Defaults to None.
sort = None
# `page` value of the query string (?page). Defaults to 1.
page = 1
# `max_result` value of the query string (?max_results). Defaults to
# `PAGINATION_DEFAULT` unless pagination is disabled.
max_results = 0
# `If-Modified-Since` request header value. Defaults to None.
if_modified_since = None
# `If-None_match` request header value. Defaults to None.
if_none_match = None
# `If-Match` request header value. Default to None.
if_match = None
# `embedded` value of the query string (?embedded). Defaults to None.
embedded = None
def parse_request(resource):
""" Parses a client request, returning instance of :class:`ParsedRequest`
containing relevant request data.
:param resource: the resource currently being accessed by the client.
.. versionchagend:: 0.1.0
Support for embedded documents.
.. versionchanged:: 0.0.6
projection queries ('?projection={"name": 1}')
.. versionchanged: 0.0.5
Support for optional filters, sorting and pagination.
"""
args = request.args
headers = request.headers
r = ParsedRequest()
if config.DOMAIN[resource]['allowed_filters']:
r.where = args.get('where')
if config.DOMAIN[resource]['projection']:
r.projection = args.get('projection')
if config.DOMAIN[resource]['sorting']:
r.sort = args.get('sort')
if config.DOMAIN[resource]['embedding']:
r.embedded = args.get('embedded')
max_results_default = config.PAGINATION_DEFAULT if \
config.DOMAIN[resource]['pagination'] else 0
try:
r.max_results = int(float(args['max_results']))
assert r.max_results > 0
except (ValueError, werkzeug.exceptions.BadRequestKeyError,
AssertionError):
r.max_results = max_results_default
if config.DOMAIN[resource]['pagination']:
# TODO should probably return a 400 if 'page' is < 1 or non-numeric
if 'page' in args:
try:
r.page = abs(int(args.get('page'))) or 1
except ValueError:
pass
# TODO should probably return a 400 if 'max_results' < 1 or
# non-numeric
if r.max_results > config.PAGINATION_LIMIT:
r.max_results = config.PAGINATION_LIMIT
if headers:
r.if_modified_since = weak_date(headers.get('If-Modified-Since'))
# TODO if_none_match and if_match should probably be validated as
# valid etags, returning 400 on fail. Not sure however since
# we're just going to use these for string-type comparision
r.if_none_match = headers.get('If-None-Match')
r.if_match = headers.get('If-Match')
return r
def weak_date(date):
""" Returns a RFC-1123 string corresponding to a datetime value plus
a 1 second timedelta. This is needed because when saved, documents
LAST_UPDATED values have higher resolution than If-Modified-Since's, which
is limited to seconds.
:param date: the date to be adjusted.
"""
return str_to_date(date) + timedelta(seconds=1) if date else None
def str_to_date(string):
""" Converts a RFC-1123 string to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.strptime(string, config.DATE_FORMAT) if string else None
def date_to_str(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.strftime(date, config.DATE_FORMAT) if date else None
def collection_link(resource):
""" Returns a link to a resource endpoint.
:param resource: the resource name.
.. versionchanged:: 0.0.3
Now returning a JSON link
"""
return {'title': '%s' % config.URLS[resource],
'href': '%s' % resource_uri(resource)}
def document_link(resource, document_id):
""" Returns a link to a document endpoint.
:param resource: the resource name.
:param document_id: the document unique identifier.
.. versionchanged:: 0.1.0
No more trailing slashes in links.
.. versionchanged:: 0.0.3
Now returning a JSON link
"""
return {'title': '%s' % config.DOMAIN[resource]['item_title'],
'href': '%s/%s' % (resource_uri(resource), document_id)}
def home_link():
""" Returns a link to the API entry point/home page.
.. versionchanged:: 0.1.1
Handle the case of SERVER_NAME being None.
.. versionchanged:: 0.0.3
Now returning a JSON link.
"""
server_name = config.SERVER_NAME if config.SERVER_NAME else ''
return {'title': 'home',
'href': '%s%s' % (server_name, api_prefix())}
def resource_uri(resource):
""" Returns the absolute URI to a resource.
.. versionchanged:: 0.1.1
URL prefixes are now included in config.URLS items, no more need to
explicitly add them to resource links.
Handle the case of SERVER_NAME being None.
.. versionchanged:: 0.1.0
No more trailing slashes in links.
:param resource: the resource name.
"""
server_name = config.SERVER_NAME if config.SERVER_NAME else ''
return '%s/%s' % (server_name, config.URLS[resource])
def api_prefix(url_prefix=None, api_version=None):
""" Returns the prefix to API endpoints, according to the URL_PREFIX and
API_VERSION configuration settings.
:param url_prefix: the prefix string. If `None`, defaults to the current
:class:`~eve.flaskapp` configuration setting.
The class itself will call this function while
initializing. In that case, it will pass its settings
as arguments (as they are not externally available yet)
:param api_version: the api version string. If `None`, defaults to the
current :class:`~eve.flaskapp` configuration setting.
The class itself will call this function while
initializing. In that case, it will pass its settings
as arguments (as they are not externally available yet)
.. versionadded:: 0.0.3
"""
if url_prefix is None:
url_prefix = config.URL_PREFIX
if api_version is None:
api_version = config.API_VERSION
prefix = '/%s' % url_prefix if url_prefix else ''
version = '/%s' % api_version if api_version else ''
return prefix + version
def querydef(max_results=config.PAGINATION_DEFAULT, where=None, sort=None,
page=None):
""" Returns a valid query string.
:param max_results: `max_result` part of the query string. Defaults to
`PAGINATION_DEFAULT`
:param where: `where` part of the query string. Defaults to None.
:param sort: `sort` part of the query string. Defaults to None.
:param page: `page` parte of the query string. Defaults to None.
"""
where_part = '&where=%s' % where if where else ''
sort_part = '&sort=%s' % sort if sort else ''
page_part = '&page=%s' % page if page and page > 1 else ''
max_results_part = 'max_results=%s' % max_results \
if max_results != config.PAGINATION_DEFAULT else ''
return ('?' + ''.join([max_results_part, where_part, sort_part,
page_part]).lstrip('&')).rstrip('?')
def document_etag(value):
""" Computes and returns a valid ETag for the input value.
:param value: the value to compute the ETag with.
.. versionchanged:: 0.0.4
Using bson.json_util.dumps over str(value) to make etag computation
consistent between different runs and/or server instances (#16).
"""
h = hashlib.sha1()
h.update(dumps(value, sort_keys=True).encode('utf-8'))
return h.hexdigest()
def extract_key_values(key, d):
""" Extracts all values that match a key, even in nested dicts.
:param key: the lookup key.
:param d: the dict to scan.
.. versionadded: 0.0.7
"""
if key in d:
yield d[key]
for k in d:
if isinstance(d[k], dict):
for j in extract_key_values(key, d[k]):
yield j
def request_method():
""" Returns the proper request method, also taking into account the
possibile override requested by the client (via 'X-HTTP-Method-Override'
header).
.. versionchanged: 0.1.0
Supports overriding of any HTTP Method (#95).
.. versionadded: 0.0.7
"""
return request.headers.get('X-HTTP-Method-Override', request.method)
def debug_error_message(msg):
""" Returns the error message `msg` if config.DEBUG is True
otherwise returns `None` which will cause Werkzeug to provide
a generic error message
:param msg: The error message to return if config.DEBUG is True
.. versionadded: 0.0.9
"""
if getattr(config, 'DEBUG', False):
return msg
return None
def validate_filters(where, resource):
""" Report any filter which is not allowed by `allowed_filters`
:param where: the where clause, as a dict.
:param resource: the resource being inspected.
.. versionadded: 0.0.9
"""
allowed = config.DOMAIN[resource]['allowed_filters']
if '*' not in allowed:
for filt, cond in where.items():
if filt not in allowed:
return "filter on '%s' not allowed" % filt
return None
|
|
# -*- coding: utf-8 -*-
import bpy
from bpy.types import PropertyGroup
from bpy.props import StringProperty
from bpy.props import IntProperty
from bpy.props import FloatVectorProperty
from bpy.props import FloatProperty
from bpy.props import CollectionProperty
from bpy.props import EnumProperty
from mmd_tools.core.model import Model as FnModel
from mmd_tools.core.bone import FnBone
from mmd_tools.core.material import FnMaterial
def _get_bone(prop):
bone_id = prop.get('bone_id', -1)
if bone_id < 0:
return ''
root = prop.id_data
fnModel = FnModel(root)
arm = fnModel.armature()
fnBone = FnBone.from_bone_id(arm, bone_id)
if not fnBone:
return ''
return fnBone.pose_bone.name
def _set_bone(prop, value):
root = prop.id_data
fnModel = FnModel(root)
arm = fnModel.armature()
if value not in arm.pose.bones.keys():
prop['bone_id'] = -1
return
pose_bone = arm.pose.bones[value]
fnBone = FnBone(pose_bone)
prop['bone_id'] = fnBone.bone_id
class BoneMorphData(PropertyGroup):
"""
"""
bone = StringProperty(
name='Bone',
set=_set_bone,
get=_get_bone,
)
bone_id = IntProperty(
name='Bone ID',
)
location = FloatVectorProperty(
name='Location',
subtype='TRANSLATION',
size=3,
)
rotation = FloatVectorProperty(
name='Rotation',
subtype='QUATERNION',
size=4,
)
class BoneMorph(PropertyGroup):
"""Bone Morph
"""
name_e = StringProperty(
name='Name(Eng)',
description='English Name',
default='',
)
category = EnumProperty(
name='Category',
items = [
('SYSTEM', 'System', '', 0),
('EYEBROW', 'Eye Brow', '', 1),
('EYE', 'Eye', '', 2),
('MOUTH', 'Mouth', '', 3),
('OTHER', 'Other', '', 4),
],
default='OTHER',
)
data = CollectionProperty(
name='Morph Data',
type=BoneMorphData,
)
active_bone_data = IntProperty(
name='Active Bone Data',
default=0,
)
def _get_material(prop):
mat_id = prop.get('material_id')
if mat_id < 0:
return ''
fnMat = FnMaterial.from_material_id(mat_id)
if not fnMat:
return ''
return fnMat.material.name
def _set_material(prop, value):
if value not in bpy.data.materials.keys():
prop['material_id'] = -1
return
mat = bpy.data.materials[value]
fnMat = FnMaterial(mat)
prop['material_id'] = fnMat.material_id
class MaterialMorphData(PropertyGroup):
"""
"""
offset_type = EnumProperty(
name='Offset Type',
items=[
('MULT', 'Multiply', '', 0),
('ADD', 'Add', '', 1)
],
default='ADD'
)
material = StringProperty(
name='Material',
get=_get_material,
set=_set_material,
)
material_id = IntProperty(
name='Material ID',
)
diffuse_color = FloatVectorProperty(
name='Diffuse Color',
subtype='COLOR',
size=4,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0, 1],
)
specular_color = FloatVectorProperty(
name='Specular Color',
subtype='COLOR',
size=4,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0, 1],
)
ambient_color = FloatVectorProperty(
name='Ambient',
subtype='COLOR',
size=3,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0],
)
edge_color = FloatVectorProperty(
name='Edge Color',
subtype='COLOR',
size=4,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0, 1],
)
edge_weight = FloatProperty(
name='Edge Weight',
min=0,
max=100,
step=0.1,
default=0,
)
texture_factor = FloatVectorProperty(
name='Texture factor',
subtype='COLOR',
size=4,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0, 1],
)
sphere_texture_factor = FloatVectorProperty(
name='Sphere Texture factor',
subtype='COLOR',
size=4,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0, 1],
)
toon_texture_factor = FloatVectorProperty(
name='Toon Texture factor',
subtype='COLOR',
size=4,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0, 1],
)
class MaterialMorph(PropertyGroup):
""" Material Morph
"""
name_e = StringProperty(
name='Name(Eng)',
description='English Name',
default='',
)
category = EnumProperty(
name='Category',
items = [
('SYSTEM', 'System', '', 0),
('EYEBROW', 'Eye Brow', '', 1),
('EYE', 'Eye', '', 2),
('MOUTH', 'Mouth', '', 3),
('OTHER', 'Other', '', 4),
],
default='OTHER',
)
data = CollectionProperty(
name='Morph Data',
type=MaterialMorphData,
)
active_material_data = IntProperty(
name='Active Material Data',
default=0,
)
class VertexMorph(PropertyGroup):
"""Vertex Morph
"""
name_e = StringProperty(
name='Name(Eng)',
description='English Name',
default=''
)
category = EnumProperty(
name='Category',
items = [
('SYSTEM', 'System', '', 0),
('EYEBROW', 'Eye Brow', '', 1),
('EYE', 'Eye', '', 2),
('MOUTH', 'Mouth', '', 3),
('OTHER', 'Other', '', 4),
],
default='OTHER',
)
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import linecache
import distutils.core
import fileinput
import socket
from weather_config import *
from weather_data_files import *
# Weather data files created to manage the conversion process.
# Allows partition and picking up where you left off.
#
# benchmark_name/
# data/
# queries/
# logs/
class WeatherBenchmark:
DATA_LINKS_FOLDER = "data_links/"
LARGE_FILE_ROOT_TAG = WeatherDataFiles.LARGE_FILE_ROOT_TAG
QUERY_REPLACEMENT_KEY = "/tmp/1.0_partition_ghcnd_all_xml/"
QUERY_MASTER_FOLDER = "../queries/"
QUERY_FILE_LIST = [
"q00.xq",
"q01.xq",
"q02.xq",
"q03.xq",
"q04.xq",
"q05.xq",
"q06.xq",
"q07.xq"
]
QUERY_UTILITY_LIST = [
"no_result.xq",
"count_sensor.xq",
"count_station.xq",
"q04_count_join.xq",
"q04_count_sensor.xq",
"q04_count_station.xq",
"q05_count_join.xq",
"q05_count_sensor.xq",
"q05_count_station.xq",
"q06_count_join.xq",
"q06_count_sensor.xq",
"q06_count_station.xq",
"q07_count_1940.xq",
"q07_count_1960.xq",
"q07_count_1980.xq",
"q07_count_2000.xq",
"q07_count_join.xq",
"q07_count_left.xq",
"q07_count_tmin.xq",
"q07_count_tmax.xq",
"q07_data_tmin.xq",
"q07_data_tmax.xq",
"q07_filter_1940.xq",
"q07_filter_1960.xq",
"q07_filter_1980.xq",
"q07_filter_2000.xq",
]
BENCHMARK_LOCAL_TESTS = ["local_speed_up", "local_batch_scale_out"]
BENCHMARK_CLUSTER_TESTS = ["speed_up", "batch_scale_out"]
QUERY_COLLECTIONS = ["sensors", "stations"]
SEPERATOR = "|"
def __init__(self, base_paths, partitions, dataset, nodes):
self.base_paths = base_paths
self.partitions = partitions
self.dataset = dataset
self.nodes = nodes
def print_partition_scheme(self):
if (len(self.base_paths) == 0):
return
for test in self.dataset.get_tests():
if test in self.BENCHMARK_LOCAL_TESTS:
self.print_local_partition_schemes(test)
elif test in self.BENCHMARK_CLUSTER_TESTS:
self.print_cluster_partition_schemes(test)
else:
print "Unknown test."
exit()
def print_local_partition_schemes(self, test):
node_index = 0
virtual_disk_partitions = get_local_virtual_disk_partitions(self.partitions)
for p in self.partitions:
scheme = self.get_local_partition_scheme(test, p)
self.print_partition_schemes(virtual_disk_partitions, scheme, test, p, node_index)
def print_cluster_partition_schemes(self, test):
node_index = self.get_current_node_index()
virtual_disk_partitions = get_cluster_virtual_disk_partitions(self.nodes, self.partitions)
for p in self.partitions:
scheme = self.get_cluster_partition_scheme(test, p)
self.print_partition_schemes(virtual_disk_partitions, scheme, test, p, node_index)
def print_partition_schemes(self, virtual_partitions, scheme, test, partitions, node_id):
print
print "---------------- Partition Scheme --------------------"
print " Test: " + test
print " Virtual Partitions: " + str(virtual_partitions)
print " Disks: " + str(len(self.base_paths))
print " Partitions: " + str(partitions)
print " Node Id: " + str(node_id)
if isinstance(scheme, (tuple, list, dict, set)) and len(scheme) > 0:
folder_length = len(scheme[0][3]) + 5
row_format = "{:>5} {:>5} {:>5} {:<" + str(folder_length) + "} {:<" + str(folder_length) + "}"
HEADER = ("Disk", "Index", "Link", "Data Path", "Link Path")
print row_format.format(*HEADER)
for row in scheme:
print row_format.format(*row)
print
else:
print " Scheme is EMPTY."
def get_local_partition_scheme(self, test, partition):
scheme = []
virtual_disk_partitions = get_local_virtual_disk_partitions(self.partitions)
data_schemes = get_disk_partition_scheme(0, virtual_disk_partitions, self.base_paths)
link_base_schemes = get_disk_partition_scheme(0, partition, self.base_paths, self.DATA_LINKS_FOLDER + test)
# Match link paths to real data paths.
group_size = len(data_schemes) / len(link_base_schemes)
for d in range(len(self.base_paths)):
offset = 0
for link_node, link_disk, link_virtual, link_index, link_path in link_base_schemes:
if d == link_disk:
# Only consider a single disk at a time.
for data_node, data_disk, data_virtual, data_index, data_path in data_schemes:
if test == "local_speed_up" and data_disk == link_disk \
and offset <= data_index and data_index < offset + group_size:
scheme.append([data_disk, data_index, link_index, data_path, link_path])
elif test == "local_batch_scale_out" and data_disk == link_disk \
and data_index == link_index:
scheme.append([data_disk, data_index, link_index, data_path, link_path])
offset += group_size
return scheme
def get_cluster_partition_scheme(self, test, partition):
node_index = self.get_current_node_index()
if node_index == -1:
print "Unknown host."
return
scheme = []
virtual_disk_partitions = get_cluster_virtual_disk_partitions(self.nodes, self.partitions)
data_schemes = get_disk_partition_scheme(node_index, virtual_disk_partitions, self.base_paths)
link_base_schemes = get_cluster_link_scheme(len(self.nodes), partition, self.base_paths, self.DATA_LINKS_FOLDER + test)
# Match link paths to real data paths.
for link_node, link_disk, link_virtual, link_index, link_path in link_base_schemes:
# Prep
if test == "speed_up":
group_size = virtual_disk_partitions / (link_node + 1) / partition
elif test == "batch_scale_out":
group_size = virtual_disk_partitions / len(self.nodes) / partition
else:
print "Unknown test."
return
node_offset = group_size * node_index * partition
node_offset += group_size * link_index
has_data = True
if link_node < node_index:
has_data = False
# Make links
for date_node, data_disk, data_virtual, data_index, data_path in data_schemes:
if has_data and data_disk == link_disk \
and node_offset <= data_index and data_index < node_offset + group_size:
scheme.append([link_disk, data_index, link_index, data_path, link_path])
scheme.append([link_disk, -1, link_index, "", link_path])
return scheme
def build_data_links(self, reset):
if (len(self.base_paths) == 0):
return
if reset:
shutil.rmtree(self.base_paths[0] + self.DATA_LINKS_FOLDER)
for test in self.dataset.get_tests():
if test in self.BENCHMARK_LOCAL_TESTS:
for i in self.partitions:
scheme = self.get_local_partition_scheme(test, i)
self.build_data_links_scheme(scheme)
if 1 in self.partitions and len(self.base_paths) > 1:
scheme = self.build_data_links_local_zero_partition(test)
self.build_data_links_scheme(scheme)
elif test in self.BENCHMARK_CLUSTER_TESTS:
for i in self.partitions:
scheme = self.get_cluster_partition_scheme(test, i)
self.build_data_links_scheme(scheme)
if 1 in self.partitions and len(self.base_paths) > 1:
scheme = self.build_data_links_cluster_zero_partition(test)
self.build_data_links_scheme(scheme)
else:
print "Unknown test."
exit()
def build_data_links_scheme(self, scheme):
'''Build all the data links based on the scheme information.'''
for (data_disk, data_index, partition, data_path, link_path) in scheme:
self.add_collection_links_for(data_path, link_path, data_index)
def build_data_links_cluster_zero_partition(self, test):
'''Build a scheme for all data in one symbolically linked folder. (0 partition)'''
scheme = []
link_base_schemes = get_cluster_link_scheme(len(self.nodes), 1, self.base_paths, self.DATA_LINKS_FOLDER + test)
for link_node, link_disk, link_virtual, link_index, link_path in link_base_schemes:
new_link_path = self.get_zero_partition_path(link_node, self.DATA_LINKS_FOLDER + test + "/" + str(link_node) + "nodes")
scheme.append([0, link_disk, 0, link_path, new_link_path])
return scheme
def build_data_links_local_zero_partition(self, test):
'''Build a scheme for all data in one symbolically linked folder. (0 partition)'''
scheme = []
index = 0
link_base_schemes = get_disk_partition_scheme(0, 1, self.base_paths, self.DATA_LINKS_FOLDER + test)
for link_node, link_disk, link_virtual, link_index, link_path in link_base_schemes:
if test == "local_batch_scale_out" and index > 0:
continue
new_link_path = self.get_zero_partition_path(link_node, self.DATA_LINKS_FOLDER + test)
scheme.append([0, index, 0, link_path, new_link_path])
index += 1
return scheme
def get_zero_partition_path(self, node, key):
'''Return a partition path for the zero partition.'''
base_path = self.base_paths[0]
new_link_path = get_disk_partition_scheme(node, 1, [base_path], key)[0][PARTITION_INDEX_PATH]
return new_link_path.replace("p1", "p0")
def get_current_node_index(self):
found = False
node_index = 0
for machine in self.nodes:
if socket.gethostname().startswith(machine.get_node_name()):
found = True
break
node_index += 1
if found:
return node_index
else:
return -1
def add_collection_links_for(self, real_path, link_path, index):
for collection in self.QUERY_COLLECTIONS:
collection_path = link_path + collection + "/"
collection_index = collection_path + "index" + str(index)
if not os.path.isdir(collection_path):
os.makedirs(collection_path)
if index >= 0:
if os.path.islink(collection_index):
os.unlink(collection_index)
os.symlink(real_path + collection + "/", collection_index)
def copy_query_files(self, reset):
for test in self.dataset.get_tests():
if test in self.BENCHMARK_LOCAL_TESTS:
self.copy_local_query_files(test, reset)
elif test in self.BENCHMARK_CLUSTER_TESTS:
self.copy_cluster_query_files(test, reset)
else:
print "Unknown test."
exit()
def copy_cluster_query_files(self, test, reset):
'''Determine the data_link path for cluster query files and copy with
new location for collection.'''
if 1 in self.partitions and len(self.base_paths) > 1:
for n in range(len(self.nodes)):
query_path = get_cluster_query_path(self.base_paths, test, 0, n)
prepare_path(query_path, reset)
# Copy query files.
new_link_path = self.get_zero_partition_path(n, self.DATA_LINKS_FOLDER + test + "/" + str(n) + "nodes")
self.copy_and_replace_query(query_path, [new_link_path])
for n in range(len(self.nodes)):
for p in self.partitions:
query_path = get_cluster_query_path(self.base_paths, test, p, n)
prepare_path(query_path, reset)
# Copy query files.
partition_paths = get_disk_partition_paths(n, p, self.base_paths, self.DATA_LINKS_FOLDER + test + "/" + str(n) + "nodes")
self.copy_and_replace_query(query_path, partition_paths)
def copy_local_query_files(self, test, reset):
'''Determine the data_link path for local query files and copy with
new location for collection.'''
if 1 in self.partitions and len(self.base_paths) > 1:
query_path = get_local_query_path(self.base_paths, test, 0)
prepare_path(query_path, reset)
# Copy query files.
new_link_path = self.get_zero_partition_path(0, self.DATA_LINKS_FOLDER + test)
self.copy_and_replace_query(query_path, [new_link_path])
for p in self.partitions:
query_path = get_local_query_path(self.base_paths, test, p)
prepare_path(query_path, reset)
# Copy query files.
partition_paths = get_disk_partition_paths(0, p, self.base_paths, self.DATA_LINKS_FOLDER + test)
self.copy_and_replace_query(query_path, partition_paths)
def copy_and_replace_query(self, query_path, replacement_list):
'''Copy the query files over to the query_path and replace the path
for the where the collection data is located.'''
for query_file in self.QUERY_FILE_LIST + self.QUERY_UTILITY_LIST:
shutil.copyfile(self.QUERY_MASTER_FOLDER + query_file, query_path + query_file)
# Make a search replace for each collection.
for collection in self.QUERY_COLLECTIONS:
replacement_list_with_type = []
for replace in replacement_list:
replacement_list_with_type.append(replace + collection)
replace_string = self.SEPERATOR.join(replacement_list_with_type)
for line in fileinput.input(query_path + query_file, True):
sys.stdout.write(line.replace(self.QUERY_REPLACEMENT_KEY + collection, replace_string))
# Make a search replace for partition type.
if self.dataset.get_partition_type() == "large_files":
for line in fileinput.input(query_path + query_file, True):
sys.stdout.write(line.replace("/stationCollection", "/" + self.LARGE_FILE_ROOT_TAG + "/stationCollection"))
for line in fileinput.input(query_path + query_file, True):
sys.stdout.write(line.replace("/dataCollection", "/" + self.LARGE_FILE_ROOT_TAG + "/dataCollection"))
def get_number_of_slices_per_disk(self):
if len(self.dataset.get_tests()) == 0:
print "No test has been defined in config file."
else:
for test in self.dataset.get_tests():
if test in self.BENCHMARK_LOCAL_TESTS:
return get_local_virtual_disk_partitions(self.partitions)
elif test in self.BENCHMARK_CLUSTER_TESTS:
return get_cluster_virtual_disk_partitions(self.nodes, self.partitions)
else:
print "Unknown test."
exit()
def get_cluster_link_scheme(nodes, partition, base_paths, key="partitions"):
link_paths = []
for n in range(0, nodes):
new_link_path = get_disk_partition_scheme(n, partition, base_paths, key + "/" + str(n) + "nodes")
link_paths.extend(new_link_path)
return link_paths
def get_local_query_path(base_paths, test, partition):
return base_paths[0] + "queries/" + test + "/" + get_local_query_folder(len(base_paths), partition) + "/"
def get_local_query_folder(disks, partitions):
return "d" + str(disks) + "_p" + str(partitions)
def get_cluster_query_path(base_paths, test, partition, nodes):
return base_paths[0] + "queries/" + test + "/" + str(nodes) + "nodes/" + get_local_query_folder(len(base_paths), partition) + "/"
def get_cluster_virtual_disk_partitions(nodes, partitions):
vp = get_local_virtual_disk_partitions(partitions)
vn = calculate_partitions(range(1, len(nodes) + 1, 1))
return vp * vn
def get_local_virtual_disk_partitions(partitions):
return calculate_partitions(partitions)
def calculate_partitions(list):
x = 1
for i in list:
if x % i != 0:
if i % x == 0:
x = i
else:
x *= i
return x
|
|
import time
import RPi.GPIO as GPIO
GND_PIN = 6 # not used by program but needs to be connected!
LED_PIN = 12
SWITCH_PIN = 18
def test_output():
print('OUTPUT test')
for i in range(10):
time.sleep(0.1)
GPIO.output(LED_PIN, GPIO.HIGH)
if GPIO.input(LED_PIN) != GPIO.HIGH:
print('Read back of output failed.')
time.sleep(0.1)
GPIO.output(LED_PIN, GPIO.LOW)
if GPIO.input(LED_PIN) != GPIO.LOW:
print('Read back of output failed.')
def test_input():
print('INPUT test (Ctrl-C to stop)')
try:
while 1:
GPIO.output(LED_PIN, GPIO.input(SWITCH_PIN))
time.sleep(0.02) # 20 ms
except KeyboardInterrupt:
return
def test_rising():
def cb(chan):
xprint('Callback 1 - this should produce an exception')
def cb2(chan):
print('Callback 2 called - channel %s'%chan)
print('Rising edge test')
print('5 second sample for event_detected function')
try:
GPIO.add_event_detect(LED_PIN, GPIO.RISING)
print('Fail - added event to an output, not produced RuntimeError')
except RuntimeError:
pass
try:
GPIO.add_event_detect(SWITCH_PIN, GPIO.HIGH)
print('Fail- managed to set HIGH as an event')
except ValueError:
pass
GPIO.add_event_detect(SWITCH_PIN, GPIO.RISING)
time.sleep(5)
if GPIO.event_detected(SWITCH_PIN):
print('Event detected')
else:
print('Event not detected')
print('5 seconds for callback function (which should produce exceptions)')
input('Press return to start: ')
GPIO.add_event_callback(SWITCH_PIN, cb)
GPIO.add_event_callback(SWITCH_PIN, cb2)
time.sleep(5)
GPIO.remove_event_detect(SWITCH_PIN);
print('Blocking wait for rising edge...')
GPIO.wait_for_edge(SWITCH_PIN, GPIO.RISING)
def test_falling():
def cb(chan):
print('Callback called - channel %s'%chan)
print('Falling edge test')
try:
GPIO.add_event_detect(SWITCH_PIN, GPIO.LOW)
print('Fail- managed to set LOW as an event')
except ValueError:
pass
print('5 second sample for event_detected function')
GPIO.add_event_detect(SWITCH_PIN, GPIO.FALLING)
time.sleep(5)
if GPIO.event_detected(SWITCH_PIN):
print('Event detected')
else:
print('Event not detected')
print('5 seconds for callback function')
input('Press return to start: ')
GPIO.remove_event_detect(SWITCH_PIN);
GPIO.add_event_detect(SWITCH_PIN, GPIO.FALLING, callback=cb)
time.sleep(5)
GPIO.remove_event_detect(SWITCH_PIN);
try:
GPIO.wait_for_edge(SWITCH_PIN, GPIO.LOW)
print('Fail- managed to wait for a LOW as an event')
except ValueError:
pass
print('Blocking wait for falling edge...')
GPIO.wait_for_edge(SWITCH_PIN, GPIO.FALLING)
def test_switchbounce():
global count
count = 0
def cb(chan):
global count
count += 1
print('Switch on channel %s pressed %s!'%(chan,count))
print('Switch bounce test - Ctrl-C to stop...')
GPIO.add_event_detect(SWITCH_PIN, GPIO.FALLING, callback=cb, bouncetime=200)
try:
while 1:
time.sleep(3600)
except KeyboardInterrupt:
pass
GPIO.remove_event_detect(SWITCH_PIN);
def test_gpio_function():
for chan in range(54):
f = GPIO.gpio_function(chan)
if f == GPIO.IN:
func = 'INPUT'
elif f == GPIO.OUT:
func = 'OUTPUT'
elif f == GPIO.ALT0:
func = 'ALT0'
else:
func = 'UNKNOWN'
print('chan=%s func=%s'%(chan,func))
def test_warnings():
GPIO.setwarnings(False)
print('No warning should be produced vvv')
GPIO.setup(8, GPIO.OUT) # is ALT0 serial TXD by default
print('Done!')
GPIO.setwarnings(True)
print('Warning should be produced vvv')
GPIO.setup(10, GPIO.OUT) # is ALT0 serial RXD by default
print('Done!')
def test_setup():
print('Running setup tests...')
print('GPIO.cleanup() - warning should be produced here vvv')
GPIO.cleanup()
print('^^^')
GPIO.setmode(GPIO.BOARD)
GPIO.setup(26, GPIO.OUT, initial=GPIO.HIGH) # or True
if not GPIO.input(26):
print('Initial state test failed')
GPIO.setup(LED_PIN, GPIO.OUT, initial=GPIO.LOW) # or False
if GPIO.input(LED_PIN):
print('Initial state test failed')
GPIO.setup(LED_PIN, GPIO.OUT)
try:
GPIO.setup(SWITCH_PIN, GPIO.IN, pull_up_down=GPIO.HIGH)
print('Fail - Managed to set pull_up_down of HIGH')
except ValueError:
pass
GPIO.setup(SWITCH_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def test_soft_pwm():
print('Running software PWM tests (ctrl-c to stop)...')
pwm = GPIO.PWM(LED_PIN, 50) # frequency - 50Hz
pwm.start(0) # duty cycle - 0 (off)
try:
while 1: # make the LED fade in and out
for x in range(0,101,5):
pwm.ChangeDutyCycle(x)
time.sleep(0.1)
for x in range(100,-1,-5):
pwm.ChangeDutyCycle(x)
time.sleep(0.1)
except KeyboardInterrupt:
pass
pwm.stop()
def test_hard_pwm():
print('Hardware PWM - not yet implemented')
# main program starts here
while 1:
print('1 - Setup')
print('O - Output')
print('I - Input')
print('R - Rising edge')
print('F - Falling edge')
print('P - Software PWM')
print('H - Hardware PWM')
print('S - Switchbounce')
print('G - gpio_function')
print('B - Board revision')
print('W - Test warnings')
print('V - Version')
print('X - eXit')
command = input('Enter your choice: ').upper()
if command.startswith('1'):
test_setup()
elif command.startswith('O'):
test_output()
elif command.startswith('I'):
test_input()
elif command.startswith('R'):
test_rising()
elif command.startswith('F'):
test_falling()
elif command.startswith('P'):
test_soft_pwm()
elif command.startswith('H'):
test_hard_pwm()
elif command.startswith('S'):
test_switchbounce()
elif command.startswith('G'):
test_gpio_function()
elif command.startswith('W'):
test_warnings()
elif command.startswith('B'):
print('Board revision -', GPIO.RPI_REVISION)
elif command.startswith('V'):
print('RPi.GPIO Version',GPIO.VERSION)
elif command.startswith('X'):
break
GPIO.cleanup()
|
|
#!/usr/bin/env python
'''USAGE: bigsmall.py KO_expressionfile --name organism_name --iters 1000
The function of this script is to convert lists of genes to unweighted,
directed graphs and compute importance of each compound to metabolism
based on the expression of adjacent enzyme nodes.
'''
# Written by Matthew Jenior, University of Michigan, Schloss Laboratory, 2016-2017
# Dependencies:
# The script itself needs to be run from from a directory containing the /support/ sub-directory
# The only argument is a 2 column matrix text file containing a column of KO codes with corresponding expression
# Example:
# K00045 0
# K03454 4492
# K10021 183
# ...
# Knnnnn n
# Generate files: A new directory in ./ ending in ".bipartite.files" that contains all output including:
# A 2 column directed, bipartite network file of compounds and enzymes
# A text file containing reference errors thrown during the translation of KOs to chemical equations
# A text file containing user defined parameters
# List of unique compound nodes
# List of unique enzymes nodes
# Table containing network topology
# Table containing importance values and significance (when applicable)
#---------------------------------------------------------------------------------------#
# Import python modules
import sys
import os
import pickle
import math
import argparse
import random
import numpy
import time
import datetime
import scipy
import scipy.stats
#---------------------------------------------------------------------------------------#
# Start timer
start = time.time()
#---------------------------------------------------------------------------------------#
# User defined arguments
parser = argparse.ArgumentParser(description='Generate bipartite metabolic models and calculates importance of substrate nodes based on gene expression.')
parser.add_argument('input_file')
parser.add_argument('--name', default='default', help='Organism or other name for KO+expression file (default is organism)')
parser.add_argument('--iters', default='1000', help='Number of iterations of probability distribution for score comparison')
args = parser.parse_args()
# Assign variables
KO_input_file = str(args.input_file)
file_name = str(args.name)
iterations = int(args.iters)
#---------------------------------------------------------------------------------------#
# Check for input errors
if KO_input_file == 'input_file':
print('No KO+expression file provided. Aborting.')
sys.exit()
elif os.stat(KO_input_file).st_size == 0:
print('Empty input file provided. Aborting.')
sys.exit()
elif file_name == '':
print('Invalid names argument provided. Aborting.')
sys.exit()
elif iterations < 0:
print('Invalid iterations value. Aborting.')
sys.exit()
# Make sure no spaces are in the name argument
file_name = file_name.replace(' ', '_')
#---------------------------------------------------------------------------------------#
# Define the functions
# Calculates mean and confidence intervals at the desired level for a given distribution
def calc_confidence(raw_distribution, confidence):
raw_array = 1.0 * numpy.array(raw_distribution)
array_length = len(raw_array)
array_mean = numpy.mean(raw_array),
array_standarderror = scipy.stats.sem(raw_array)
array_confidence = array_standarderror * scipy.stats.t._ppf((1 + confidence) / 2., array_length - 1)
return array_mean, array_mean - array_confidence, array_mean + array_confidence
# Create a dictionary for transcript value associated with its KO
def transcription_dictionary(KO_file):
seq_total = 0 # Total number of reads
seq_max = 0 # Highest single number of reads
transcript_dict = {} # Dictionary for transcription
for line in KO_file:
entry = line.split()
ko = str(entry[0]).strip('ko:')
expression = float(entry[1])
seq_total += expression
if not ko in transcript_dict.keys():
transcript_dict[ko] = expression
else:
transcript_dict[ko] = transcript_dict[ko] + expression
if transcript_dict[ko] > seq_max: seq_max = transcript_dict[ko]
return transcript_dict, seq_total, seq_max
# Translates a list of KOs to the bipartite graph
def network_dictionaries(KOs, ko_dict, reaction_dict):
# Set some starting points
triedCountKO = 0
excludedCountKO = 0
triedCountReact = 0
excludedCountReact = 0
totalIncludedReact = 0
network_list = []
compound_lst = []
KO_lst = []
ko_input_dict = {}
ko_output_dict = {}
# Nested loops to convert the KO list to a directed graph of input and output compounds
# Outside loop finds the biochemical reactions corresponding the the given KO
print('Translating KEGG orthologs to bipartite enzyme-to-compound graph...\n')
with open('key_error.log', 'w') as errorfile:
for current_ko in KOs:
triedCountKO += 1
if not current_ko in ko_input_dict:
ko_input_dict[current_ko] = []
ko_output_dict[current_ko] = []
try:
reaction_number = ko_dict[current_ko]
except KeyError:
errorString = 'WARNING: ' + str(current_ko) + ' not found in KO-to-Reaction dictionary. Omitting.\n'
errorfile.write(errorString)
excludedCountKO += 1
continue
# Inner loop translates the reaction codes to collections of input and output compounds
for index in reaction_number:
triedCountReact += 1
try:
reaction_collection = reaction_dict[index]
except KeyError:
errorString = 'WARNING: ' + str(index) + ' not found in Reaction-to-Compound dictionary. Omitting.\n'
errorfile.write(errorString)
excludedCountReact += 1
continue
# The innermost loop creates two columns of input and output compounds, incorporating reversibility information
KO_lst.append(current_ko)
for x in reaction_collection:
totalIncludedReact += 1
# Split reaction input and output as well as the list of compounds with each
reaction_info = x.split(':')
input_compounds = reaction_info[0].split('|')
output_compounds = reaction_info[2].split('|')
rev = reaction_info[1].split('|')
for input_index in input_compounds:
network_list.append([str(input_index), str(current_ko)])
ko_input_dict[current_ko].append(str(input_index))
if rev == 'R':
network_list.append([str(current_ko), str(input_index)])
ko_output_dict[current_ko].append(str(input_index))
compound_lst.append(str(input_index))
for output_index in output_compounds:
network_list.append([str(current_ko), str(output_index)])
ko_output_dict[current_ko].append(str(output_index))
if rev == 'R':
network_list.append([str(output_index), str(current_ko)])
ko_input_dict[current_ko].append(str(output_index))
compound_lst.append(str(output_index))
error_string = '''KOs successfully translated to Reactions: {KO_success}
KOs unsuccessfully translated to Reactions: {KO_failed}
Reactions successfully translated to Compounds: {Reaction_success}
Reactions unsuccessfully translated to Compounds: {Reaction_failed}
'''.format(KO_success = str(triedCountKO - excludedCountKO), KO_failed = str(excludedCountKO), Reaction_success = str(triedCountReact - excludedCountReact), Reaction_failed = str(excludedCountReact))
errorfile.write(error_string)
network_list = [list(x) for x in set(tuple(x) for x in network_list)] # List of unique edges (KOs and compounds)
compound_lst = list(set(compound_lst))
KO_lst = list(set(KO_lst))
errorfile.close()
print('Done.\n')
return network_list, ko_input_dict, ko_output_dict, compound_lst, KO_lst
# Compile surrounding input and output node transcripts into a dictionary, same for degree information
def compile_transcripts(transcript_dictionary, ko_input_dict, ko_output_dict, compound_lst, KO_lst):
compound_transcript_dict = {}
compound_degree_dict = {}
for compound in compound_lst:
if compound[0] != 'C': continue
compound_transcript_dict[compound] = [0, 0] # [input, output]
compound_degree_dict[compound] = [0, 0] # [indegree, outdegree]
for ko in KO_lst:
transcription = transcript_dictionary[ko]
input_compounds = ko_input_dict[ko]
output_compounds = ko_output_dict[ko]
# Add transcription
for compound in input_compounds:
if compound[0] != 'C': continue
compound_transcript_dict[compound][0] = compound_transcript_dict[compound][0] + transcription
compound_degree_dict[compound][1] = compound_degree_dict[compound][1] + 1
for compound in output_compounds:
if compound[0] != 'C': continue
compound_transcript_dict[compound][1] = compound_transcript_dict[compound][1] + transcription
compound_degree_dict[compound][0] = compound_degree_dict[compound][0] + 1
return compound_transcript_dict, compound_degree_dict
# Calculate input and output scores and well as degree of each compound node
def calculate_score(compound_transcript_dict, compound_degree_dict, compound_name_dict, compound_lst):
score_dict = {}
degree_dict = {}
# Calculate metabolite scores integrating input and output reactions weightings
for compound in compound_lst:
if compound[0] != 'C': continue
score_dict[compound] = []
degree_dict[compound] = []
compound_name = compound_name_dict[compound]
indegree = compound_degree_dict[compound][0]
outdegree = compound_degree_dict[compound][1]
input_transcription = compound_transcript_dict[compound][0]
output_transcription = compound_transcript_dict[compound][1]
if outdegree == 0.0:
input_score = 0.0
else:
input_score = input_transcription / outdegree
if indegree == 0.0:
output_score = 0.0
else:
output_score = output_transcription / indegree
score_difference = input_score - output_score
# Log transform final scores
if score_difference == 0:
final_score = 0.0
elif score_difference < 0:
final_score = math.log(abs(score_difference - 1), 2) * -1
else:
final_score = math.log((score_difference + 1), 2)
final_score = float("%.3f" % final_score)
score_dict[compound].extend((compound_name, final_score))
degree_dict[compound].extend((compound_name, indegree, outdegree))
return score_dict, degree_dict
# Perform iterative simulation to create confidence interval for compound importance values
def probability_distribution(ko_input_dict, ko_output_dict, degree_dict, kos, compound_name_dict, seq_total, seq_max, compound_lst, transcription_dict, iterations):
# Screen transcript distribution for those KOs included in the metabolic network
transcript_distribution = []
for index in kos:
transcript_distribution.append(int(transcription_dict[index]))
print 'Permuting transcript distributions...\n'
increment = 100.0 / float(iterations)
progress = 0.0
sys.stdout.write('\rProgress: ' + str(progress) + '%')
sys.stdout.flush()
all_distributions = []
for index in range(iterations):
# Generate bootstrapped transcript distributions
transcript_distribution = random.sample(transcript_distribution, len(kos))
if not tuple(transcript_distribution) in all_distributions:
all_distributions.append(tuple(transcript_distribution))
progress += increment
progress = float("%.3f" % progress)
sys.stdout.write('\rProgress: ' + str(progress) + '%')
sys.stdout.flush()
sys.stdout.write('\rDone. \n\n')
distribution_dict = {}
for compound in compound_lst:
if compound[0] != 'C': continue
distribution_dict[compound] = []
# Memory intensive
print 'Calculating importance scores for ' + str(iterations) + ' probability distributions...\n'
progress = 0.0
sys.stdout.write('\rProgress: ' + str(progress) + '%')
sys.stdout.flush()
for index in all_distributions:
current_distribution = list(index)
sim_transcript_dict = {}
for index in range(len(kos)):
sim_transcript_dict[kos[index]] = current_distribution[index]
substrate_dict, degree_dict = compile_transcripts(sim_transcript_dict, ko_input_dict, ko_output_dict, compound_lst, kos)
score_dict, degree_dict = calculate_score(substrate_dict, degree_dict, compound_name_dict, compound_lst)
# Make dictionaries of scores for each compound for each direction
for compound in compound_lst:
if compound[0] != 'C': continue
distribution_dict[compound].append(score_dict[compound][1])
progress += increment
progress = float("%.3f" % progress)
sys.stdout.write('\rProgress: ' + str(progress) + '%')
sys.stdout.flush()
sys.stdout.write('\rDone. \n\n')
print 'Calculating summary statistics of each importance score distribution...\n'
# Compile the scores for each compound and find the median and standard deviation
interval_lst = []
m = len(compound_lst) * 0.033 # Calculate foactor to expand confidence interval by
# Needed to make a much more strict cutoff due to the random nature of the distributions
for compound in compound_lst:
if compound[0] != 'C': continue
# Get the distribution
current_dist = sorted(list(distribution_dict[compound]))
current_median = numpy.median(current_dist)
# Bonett DG & Price RM. (2002). Statistical inference for a linear function of medians: confidence intervals,
# hypothesis testing, and sample size requirements. Psychol Methods. 7(3):370-83.
n = len(current_dist)
q = 0.5
nq = n * q
current_range = m * math.sqrt(n * q * (1 - q))
j = int(math.ceil(nq - current_range) - 1)
k = int(math.ceil(nq + current_range) - 1)
lower_95 = current_dist[j]
upper_95 = current_dist[k]
interval_lst.append([compound, lower_95, current_median, upper_95])
print 'Done.\n'
return interval_lst
# Compare randomized confidence intervals and format final data structures
def confidence_interval(score_dict, interval_lst, degree_dict):
labeled_confidence = []
sig_count = 0
for index in interval_lst:
current_compound = index[0]
current_name = score_dict[current_compound][0]
current_indegree = degree_dict[current_compound][1]
current_outdegree = degree_dict[current_compound][2]
current_score = float(score_dict[current_compound][1])
current_median = float(index[2])
current_simlower_95conf = float(index[1])
current_simupper_95conf = float(index[3])
if current_score > current_median:
current_relationship = 'above'
elif current_score < current_median:
current_relationship = 'below'
else:
current_relationship = 'none'
if current_score > current_simupper_95conf:
current_sig = '<0.05'
sig_count += 1
elif current_score < current_simlower_95conf:
current_sig = '<0.05'
sig_count += 1
else:
current_sig = 'n.s.'
labeled_confidence.append([current_compound, current_name, current_score, current_sig])
print('Detected significance for ' + str(sig_count) + ' of ' + str(len(interval_lst)) + ' total metabolites.\n')
return labeled_confidence
# Function to write lists to files
def write_list(header, out_lst, file_name):
with open(file_name, 'w') as out_file:
if not header == 'none': out_file.write(header)
for index in out_lst:
index = [str(x) for x in index]
index[-1] = str(index[-1]) + '\n'
out_file.write('\t'.join(index))
out_file.close()
# Specialized version of previous function
def write_list_short(header, out_lst, file_name):
with open(file_name, 'w') as out_file:
if not header == 'none': out_file.write(header)
for index in out_lst:
index = [str(x) for x in index]
index[-1] = str(index[-1]) + '\n'
out_file.write(''.join(index))
out_file.close()
# Function to write dictionaries to files (next 2 functions are similar)
def write_dictionary(header, out_dict, file_name):
all_keys = list(set(out_dict.keys()))
with open(file_name, 'w') as out_file:
if not header == 'none': out_file.write(header)
for index in all_keys:
elements = out_dict[index]
elements.insert(0, index)
elements = [str(x) for x in elements]
elements[-1] = elements[-1] + '\n'
out_file.write('\t'.join(elements))
out_file.close()
def write_dictionary_short(header, out_dict, file_name):
all_keys = list(set(out_dict.keys()))
with open(file_name, 'w') as out_file:
if not header == 'none': out_file.write(header)
for index in all_keys:
entry = index + '\t' + str(out_dict[index]) + '\n'
out_file.write(entry)
out_file.close()
def write_dictionary_list(header, out_dict, file_name):
all_keys = list(set(out_dict.keys()))
with open(file_name, 'w') as out_file:
if not header == 'none': out_file.write(header)
for index in all_keys:
entry = index + '\t' + ','.join(out_dict[index]) + '\n'
out_file.write(entry)
out_file.close()
##########################################################################################
# #
# Do The Analysis! #
# #
##########################################################################################
# Citation text
print '''\nbigSMALL v1.4
Released: 12/1/2016
Updated: 5/17/2017
by
Matthew L. Jenior
Department of Microbiology & Immunology
University of Michigan
mljenior@umich.edu
When using, please cite:
Jenior ML, Leslie JL, Young VB, & Schloss PD. (2017). Clostridium difficile colonizes alternative
nutrient niches during infection across distinct murine gut microbiomes. mSystems. 2 (4); e00063-17.
Distributed under the GNU General Public License\n\n'''
#---------------------------------------------------------------------------------------#
# Print organism name to screen to track progress in case of loop
if file_name != 'default':
print '\nImputing metabolism for ' + file_name + '\n'
else:
current_time = datetime.datetime.now().time()
current_time = current_time.strftime('%s/%d/%m/%Y')
current_time = current_time.replace('/','_')
current_time = current_time.replace('-','')
file_name = current_time
# Read in and create dictionary for expression
with open(KO_input_file, 'r') as KO_file:
transcript_dict, total, seq_max = transcription_dictionary(KO_file)
all_KO_lst = transcript_dict.keys()
#---------------------------------------------------------------------------------------#
# Determine starting directory
starting_directory = str(os.getcwd())
script_path = str(os.path.dirname(os.path.realpath(__file__)))
# Create and navigate to new output directory
directory = str(os.getcwd()) + '/' + file_name + '.bipartite.files'
if not os.path.exists(directory):
os.makedirs(directory)
os.chdir(directory)
#---------------------------------------------------------------------------------------#
# Create a dictionary of KO expression scores and load KEGG dictionaries
print('\nReading in KEGG dictionaries...\n')
# Read in pickled KO to reaction dictionary
ko_reactionpkl_path = script_path + '/support/ko_reaction.pkl'
ko_dictionary = pickle.load(open(ko_reactionpkl_path, 'rb'))
# Read in pickled reaction to reaction_mapformula dictionary
#reaction_mapformulapkl_path = script_path + '/support/reaction_mapformula.pkl'
reaction_mapformulapkl_path = script_path + '/support/reaction_mapformula_nonrev.pkl'
reaction_dictionary = pickle.load(open(reaction_mapformulapkl_path, 'rb'))
# Read in pickled compound name dictionary
compoundpkl_path = script_path + '/support/compound.pkl'
compound_name_dictionary = pickle.load(open(compoundpkl_path, 'rb'))
print('Done.\n')
#---------------------------------------------------------------------------------------#
# Call translate function and separate output lists
reaction_graph, ko_input_dict, ko_output_dict, compound_lst, KO_lst = network_dictionaries(all_KO_lst, ko_dictionary, reaction_dictionary)
#---------------------------------------------------------------------------------------#
# Write compounds and enzymes to files
write_list_short('none', compound_lst, 'metabolite.lst')
write_list_short('none', KO_lst, 'enzyme.lst')
# Write network to a two column matrix for use in Neo4j or R
write_list('none', reaction_graph, 'graph.tsv')
#---------------------------------------------------------------------------------------#
# Calculate actual importance scores for each compound in the network
print 'Calculating metabolite connectedness and importance scores...\n'
compound_transcript_dict, compound_degree_dict = compile_transcripts(transcript_dict, ko_input_dict, ko_output_dict, compound_lst, KO_lst)
score_dict, degree_dict = calculate_score(compound_transcript_dict, compound_degree_dict, compound_name_dictionary, compound_lst)
print 'Done.\n'
#---------------------------------------------------------------------------------------#
# Calculate simulated importance values if specified
if iterations >= 1:
interval_lst = probability_distribution(ko_input_dict, ko_output_dict, degree_dict, KO_lst, compound_name_dictionary, total, seq_max, compound_lst, transcript_dict, iterations)
final_data = confidence_interval(score_dict, interval_lst, degree_dict)
# Write all the calculated data to files
print 'Writing importance scores and significance to output file...\n'
outname = 'importances.tsv'
write_list('Compound_code\tMetabolite_name\tImportance_score\tp_value\n', final_data, outname)
outname = 'confidence_intervals.tsv'
write_list('Compound_code\tLower_99_CI\tLower_95_CI\tSim_Mean\tUpper_95_CI\tUpper_99_CI\n', interval_lst, outname)
print 'Done.\n'
# If simulation not performed, write only scores calculated from measured expression to files
else:
print 'Writing importance scores to output file...\n'
outname = 'importances.tsv'
write_dictionary_short('Compound_code\tMetabolite_name\tImportance_score\n', score_dict, outname)
print 'Done.\n'
print 'Writing network topology and transcipt counts to files...\n'
outname = 'topology.tsv'
write_dictionary('Compound_code\tMetabolite_name\tIndegree\tOutdegree\n', degree_dict, outname)
outname = 'KO_mapping.tsv'
write_dictionary_short('KO_code\tTranscripts\n', transcript_dict, outname)
outname = 'input_metabolites.tsv'
write_dictionary_list('KO_code\tCompound_codes\n', ko_input_dict, outname)
outname = 'output_metabolites.tsv'
write_dictionary_list('KO_code\tCompound_codes\n', ko_output_dict, outname)
print 'Done.\n'
#---------------------------------------------------------------------------------------#
# Wrap everything up
# Report time if iterations are performed
end = time.time()
if end > 10:
duration = str(int(end - start))
print '\nCompleted in ' + duration + ' seconds.\n'
else :
print '\n'
print 'Output files located in: ' + directory + '\n\n'
# Define calculation selection with a string
if iterations > 1:
iter_str = 'yes'
else:
iter_str = 'no'
time_unit = 'seconds'
if int(duration) >= 120:
duration = int(duration) / 60
time_unit = 'minutes'
if int(duration) >= 120:
duration = int(duration) / 60
time_unit = 'hours'
# Write parameters to a file
with open('parameters.txt', 'w') as parameter_file:
outputString = '''User Defined Parameters
KO expression file: {ko}
Graph name: {name}
KEGG ortholog nodes: {kos}
Substrate nodes: {substrate}
Probability distribution generated: {iter}
Permutations: {perms}
Duration: {time} {tunit}
'''.format(ko=str(KO_input_file), name=str(file_name), iter=iter_str, kos=str(len(KO_lst)), substrate=str(len(compound_lst)), perms=str(iterations), time=str(duration), tunit=time_unit)
parameter_file.write(outputString)
# Return to the directory the script was called to
os.chdir(starting_directory)
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from distutils import versionpredicate
import netaddr
from oslo_utils import strutils
from oslo_versionedobjects import fields
import six
# TODO(berrange) Temporary import for Arch class
from nova.compute import arch
# TODO(berrange) Temporary import for CPU* classes
from nova.compute import cpumodel
# TODO(berrange) Temporary import for HVType class
from nova.compute import hv_type
# TODO(berrange) Temporary import for VMMode class
from nova.compute import vm_mode
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
# Import field errors from oslo.versionedobjects
KeyTypeError = fields.KeyTypeError
ElementTypeError = fields.ElementTypeError
# Import fields from oslo.versionedobjects
BooleanField = fields.BooleanField
UnspecifiedDefault = fields.UnspecifiedDefault
IntegerField = fields.IntegerField
UUIDField = fields.UUIDField
FloatField = fields.FloatField
StringField = fields.StringField
EnumField = fields.EnumField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
DictOfNullableStringsField = fields.DictOfNullableStringsField
DictOfIntegersField = fields.DictOfIntegersField
ListOfStringsField = fields.ListOfStringsField
SetOfIntegersField = fields.SetOfIntegersField
ListOfSetsOfIntegersField = fields.ListOfSetsOfIntegersField
ListOfDictOfNullableStringsField = fields.ListOfDictOfNullableStringsField
DictProxyField = fields.DictProxyField
# NOTE(danms): These are things we need to import for some of our
# own implementations below, our tests, or other transitional
# bits of code. These should be removable after we finish our
# conversion
Enum = fields.Enum
Field = fields.Field
FieldType = fields.FieldType
Set = fields.Set
Dict = fields.Dict
List = fields.List
class Architecture(Enum):
# TODO(berrange): move all constants out of 'nova.compute.arch'
# into fields on this class
def __init__(self, **kwargs):
super(Architecture, self).__init__(
valid_values=arch.ALL, **kwargs)
def coerce(self, obj, attr, value):
try:
value = arch.canonicalize(value)
except exception.InvalidArchitectureName:
msg = _("Architecture name '%s' is not valid") % value
raise ValueError(msg)
return super(Architecture, self).coerce(obj, attr, value)
class BlockDeviceDestinationType(Enum):
"""Represents possible destination_type values for a BlockDeviceMapping."""
LOCAL = 'local'
VOLUME = 'volume'
ALL = (LOCAL, VOLUME)
def __init__(self):
super(BlockDeviceDestinationType, self).__init__(
valid_values=BlockDeviceDestinationType.ALL)
class BlockDeviceSourceType(Enum):
"""Represents the possible source_type values for a BlockDeviceMapping."""
BLANK = 'blank'
IMAGE = 'image'
SNAPSHOT = 'snapshot'
VOLUME = 'volume'
ALL = (BLANK, IMAGE, SNAPSHOT, VOLUME)
def __init__(self):
super(BlockDeviceSourceType, self).__init__(
valid_values=BlockDeviceSourceType.ALL)
class BlockDeviceType(Enum):
"""Represents possible device_type values for a BlockDeviceMapping."""
CDROM = 'cdrom'
DISK = 'disk'
FLOPPY = 'floppy'
FS = 'fs'
LUN = 'lun'
ALL = (CDROM, DISK, FLOPPY, FS, LUN)
def __init__(self):
super(BlockDeviceType, self).__init__(
valid_values=BlockDeviceType.ALL)
class CPUAllocationPolicy(Enum):
DEDICATED = "dedicated"
SHARED = "shared"
ALL = (DEDICATED, SHARED)
def __init__(self):
super(CPUAllocationPolicy, self).__init__(
valid_values=CPUAllocationPolicy.ALL)
class CPUMode(Enum):
# TODO(berrange): move all constants out of 'nova.compute.cpumodel'
# into fields on this class
def __init__(self, **kwargs):
super(CPUMode, self).__init__(
valid_values=cpumodel.ALL_CPUMODES, **kwargs)
class CPUMatch(Enum):
# TODO(berrange): move all constants out of 'nova.compute.cpumodel'
# into fields on this class
def __init__(self, **kwargs):
super(CPUMatch, self).__init__(
valid_values=cpumodel.ALL_MATCHES, **kwargs)
class CPUFeaturePolicy(Enum):
# TODO(berrange): move all constants out of 'nova.compute.cpumodel'
# into fields on this class
def __init__(self, **kwargs):
super(CPUFeaturePolicy, self).__init__(
valid_values=cpumodel.ALL_POLICIES, **kwargs)
class DiskBus(Enum):
FDC = "fdc"
IDE = "ide"
SATA = "sata"
SCSI = "scsi"
USB = "usb"
VIRTIO = "virtio"
XEN = "xen"
ALL = (FDC, IDE, SATA, SCSI, USB, VIRTIO, XEN)
def __init__(self):
super(DiskBus, self).__init__(
valid_values=DiskBus.ALL)
class HVType(Enum):
# TODO(berrange): move all constants out of 'nova.compute.hv_type'
# into fields on this class
def __init__(self):
super(HVType, self).__init__(
valid_values=hv_type.ALL)
def coerce(self, obj, attr, value):
try:
value = hv_type.canonicalize(value)
except exception.InvalidHypervisorVirtType:
msg = _("Hypervisor virt type '%s' is not valid") % value
raise ValueError(msg)
return super(HVType, self).coerce(obj, attr, value)
class OSType(Enum):
LINUX = "linux"
WINDOWS = "windows"
ALL = (LINUX, WINDOWS)
def __init__(self):
super(OSType, self).__init__(
valid_values=OSType.ALL)
def coerce(self, obj, attr, value):
# Some code/docs use upper case or initial caps
# so canonicalize to all lower case
value = value.lower()
return super(OSType, self).coerce(obj, attr, value)
class RNGModel(Enum):
VIRTIO = "virtio"
ALL = (VIRTIO,)
def __init__(self):
super(RNGModel, self).__init__(
valid_values=RNGModel.ALL)
class SCSIModel(Enum):
BUSLOGIC = "buslogic"
IBMVSCSI = "ibmvscsi"
LSILOGIC = "lsilogic"
LSISAS1068 = "lsisas1068"
LSISAS1078 = "lsisas1078"
VIRTIO_SCSI = "virtio-scsi"
VMPVSCSI = "vmpvscsi"
ALL = (BUSLOGIC, IBMVSCSI, LSILOGIC, LSISAS1068,
LSISAS1078, VIRTIO_SCSI, VMPVSCSI)
def __init__(self):
super(SCSIModel, self).__init__(
valid_values=SCSIModel.ALL)
def coerce(self, obj, attr, value):
# Some compat for strings we'd see in the legacy
# vmware_adaptertype image property
value = value.lower()
if value == "lsilogicsas":
value = SCSIModel.LSISAS1068
elif value == "paravirtual":
value = SCSIModel.VMPVSCSI
return super(SCSIModel, self).coerce(obj, attr, value)
class VideoModel(Enum):
CIRRUS = "cirrus"
QXL = "qxl"
VGA = "vga"
VMVGA = "vmvga"
XEN = "xen"
ALL = (CIRRUS, QXL, VGA, VMVGA, XEN)
def __init__(self):
super(VideoModel, self).__init__(
valid_values=VideoModel.ALL)
class VIFModel(Enum):
LEGACY_VALUES = {"virtuale1000":
network_model.VIF_MODEL_E1000,
"virtuale1000e":
network_model.VIF_MODEL_E1000E,
"virtualpcnet32":
network_model.VIF_MODEL_PCNET,
"virtualsriovethernetcard":
network_model.VIF_MODEL_SRIOV,
"virtualvmxnet":
network_model.VIF_MODEL_VMXNET,
"virtualvmxnet3":
network_model.VIF_MODEL_VMXNET3,
}
def __init__(self):
super(VIFModel, self).__init__(
valid_values=network_model.VIF_MODEL_ALL)
def _get_legacy(self, value):
return value
def coerce(self, obj, attr, value):
# Some compat for strings we'd see in the legacy
# hw_vif_model image property
value = value.lower()
value = VIFModel.LEGACY_VALUES.get(value, value)
return super(VIFModel, self).coerce(obj, attr, value)
class VMMode(Enum):
# TODO(berrange): move all constants out of 'nova.compute.vm_mode'
# into fields on this class
def __init__(self):
super(VMMode, self).__init__(
valid_values=vm_mode.ALL)
def coerce(self, obj, attr, value):
try:
value = vm_mode.canonicalize(value)
except exception.InvalidVirtualMachineMode:
msg = _("Virtual machine mode '%s' is not valid") % value
raise ValueError(msg)
return super(VMMode, self).coerce(obj, attr, value)
class WatchdogAction(Enum):
NONE = "none"
PAUSE = "pause"
POWEROFF = "poweroff"
RESET = "reset"
ALL = (NONE, PAUSE, POWEROFF, RESET)
def __init__(self):
super(WatchdogAction, self).__init__(
valid_values=WatchdogAction.ALL)
class MonitorMetricType(Enum):
CPU_FREQUENCY = "cpu.frequency"
CPU_USER_TIME = "cpu.user.time"
CPU_KERNEL_TIME = "cpu.kernel.time"
CPU_IDLE_TIME = "cpu.idle.time"
CPU_IOWAIT_TIME = "cpu.iowait.time"
CPU_USER_PERCENT = "cpu.user.percent"
CPU_KERNEL_PERCENT = "cpu.kernel.percent"
CPU_IDLE_PERCENT = "cpu.idle.percent"
CPU_IOWAIT_PERCENT = "cpu.iowait.percent"
CPU_PERCENT = "cpu.percent"
NUMA_MEM_BW_MAX = "numa.membw.max"
NUMA_MEM_BW_CURRENT = "numa.membw.current"
ALL = (
CPU_FREQUENCY,
CPU_USER_TIME,
CPU_KERNEL_TIME,
CPU_IDLE_TIME,
CPU_IOWAIT_TIME,
CPU_USER_PERCENT,
CPU_KERNEL_PERCENT,
CPU_IDLE_PERCENT,
CPU_IOWAIT_PERCENT,
CPU_PERCENT,
NUMA_MEM_BW_MAX,
NUMA_MEM_BW_CURRENT,
)
def __init__(self):
super(MonitorMetricType, self).__init__(
valid_values=MonitorMetricType.ALL)
# NOTE(sbauza): Remove this on next release of oslo.versionedobjects
class VersionPredicate(fields.String):
@staticmethod
def coerce(obj, attr, value):
try:
versionpredicate.VersionPredicate('check (%s)' % value)
except ValueError:
raise ValueError(_('Version %(val)s is not a valid predicate in '
'field %(attr)s') %
{'val': value, 'attr': attr})
return value
class PciDeviceStatus(Enum):
AVAILABLE = "available"
CLAIMED = "claimed"
ALLOCATED = "allocated"
REMOVED = "removed" # The device has been hot-removed and not yet deleted
DELETED = "deleted" # The device is marked not available/deleted.
ALL = (AVAILABLE, CLAIMED, ALLOCATED, REMOVED, DELETED)
def __init__(self):
super(PciDeviceStatus, self).__init__(
valid_values=PciDeviceStatus.ALL)
class PciDeviceType(Enum):
# NOTE(jaypipes): It's silly that the word "type-" is in these constants,
# but alas, these were the original constant strings used...
STANDARD = "type-PCI"
SRIOV_PF = "type-PF"
SRIOV_VF = "type-VF"
ALL = (STANDARD, SRIOV_PF, SRIOV_VF)
def __init__(self):
super(PciDeviceType, self).__init__(
valid_values=PciDeviceType.ALL)
# NOTE(danms): Remove this on next release of oslo.versionedobjects
class FlexibleBoolean(fields.Boolean):
@staticmethod
def coerce(obj, attr, value):
return strutils.bool_from_string(value)
class IPAddress(FieldType):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPAddress(value)
except netaddr.AddrFormatError as e:
raise ValueError(six.text_type(e))
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
@staticmethod
def to_primitive(obj, attr, value):
return str(value)
class IPV4Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 4:
raise ValueError(_('Network "%(val)s" is not valid '
'in field %(attr)s') %
{'val': value, 'attr': attr})
return result
class IPV6Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 6:
raise ValueError(_('Network "%(val)s" is not valid '
'in field %(attr)s') %
{'val': value, 'attr': attr})
return result
class IPV4AndV6Address(IPAddress):
@staticmethod
def coerce(obj, attr, value):
result = IPAddress.coerce(obj, attr, value)
if result.version != 4 and result.version != 6:
raise ValueError(_('Network "%(val)s" is not valid '
'in field %(attr)s') %
{'val': value, 'attr': attr})
return result
class IPNetwork(IPAddress):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPNetwork(value)
except netaddr.AddrFormatError as e:
raise ValueError(six.text_type(e))
class IPV4Network(IPNetwork):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPNetwork(value, version=4)
except netaddr.AddrFormatError as e:
raise ValueError(six.text_type(e))
class IPV6Network(IPNetwork):
@staticmethod
def coerce(obj, attr, value):
try:
return netaddr.IPNetwork(value, version=6)
except netaddr.AddrFormatError as e:
raise ValueError(six.text_type(e))
# FIXME(danms): Remove this after we convert to oslo.versionedobjects' registry
class Object(FieldType):
def __init__(self, obj_name, **kwargs):
self._obj_name = obj_name
super(Object, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
try:
obj_name = value.obj_name()
except AttributeError:
obj_name = ""
if obj_name != self._obj_name:
raise ValueError(_('An object of type %(type)s is required '
'in field %(attr)s') %
{'type': self._obj_name, 'attr': attr})
return value
@staticmethod
def to_primitive(obj, attr, value):
return value.obj_to_primitive()
@staticmethod
def from_primitive(obj, attr, value):
# FIXME(danms): Avoid circular import from base.py
from nova.objects import base as obj_base
# NOTE (ndipanov): If they already got hydrated by the serializer, just
# pass them back unchanged
if isinstance(value, obj_base.NovaObject):
return value
return obj_base.NovaObject.obj_from_primitive(value, obj._context)
def describe(self):
return "Object<%s>" % self._obj_name
def stringify(self, value):
if 'uuid' in value.fields:
ident = '(%s)' % (value.obj_attr_is_set('uuid') and value.uuid or
'UNKNOWN')
elif 'id' in value.fields:
ident = '(%s)' % (value.obj_attr_is_set('id') and value.id or
'UNKNOWN')
else:
ident = ''
return '%s%s' % (self._obj_name, ident)
class NetworkModel(FieldType):
@staticmethod
def coerce(obj, attr, value):
if isinstance(value, network_model.NetworkInfo):
return value
elif isinstance(value, six.string_types):
# Hmm, do we need this?
return network_model.NetworkInfo.hydrate(value)
else:
raise ValueError(_('A NetworkModel is required in field %s') %
attr)
@staticmethod
def to_primitive(obj, attr, value):
return value.json()
@staticmethod
def from_primitive(obj, attr, value):
return network_model.NetworkInfo.hydrate(value)
def stringify(self, value):
return 'NetworkModel(%s)' % (
','.join([str(vif['id']) for vif in value]))
class NonNegativeFloat(FieldType):
@staticmethod
def coerce(obj, attr, value):
v = float(value)
if v < 0:
raise ValueError(_('Value must be >= 0 for field %s') % attr)
return v
class NonNegativeInteger(FieldType):
@staticmethod
def coerce(obj, attr, value):
v = int(value)
if v < 0:
raise ValueError(_('Value must be >= 0 for field %s') % attr)
return v
class AutoTypedField(fields.Field):
AUTO_TYPE = None
def __init__(self, **kwargs):
super(AutoTypedField, self).__init__(self.AUTO_TYPE, **kwargs)
# FIXME(danms): Remove this after oslo.versionedobjects gets it
class BaseEnumField(AutoTypedField):
'''This class should not be directly instantiated. Instead
subclass it and set AUTO_TYPE to be a SomeEnum()
where SomeEnum is a subclass of Enum.
'''
def __init__(self, **kwargs):
if self.AUTO_TYPE is None:
raise exception.EnumFieldUnset(
fieldname=self.__class__.__name__)
if not isinstance(self.AUTO_TYPE, Enum):
raise exception.EnumFieldInvalid(
typename=self.AUTO_TYPE.__class__.__name,
fieldname=self.__class__.__name__)
super(BaseEnumField, self).__init__(**kwargs)
def __repr__(self):
valid_values = self._type._valid_values
args = {
'nullable': self._nullable,
'default': self._default,
}
if valid_values:
args.update({'valid_values': valid_values})
args = OrderedDict(sorted(args.items()))
return '%s(%s)' % (self._type.__class__.__name__,
','.join(['%s=%s' % (k, v)
for k, v in args.items()]))
class ArchitectureField(BaseEnumField):
AUTO_TYPE = Architecture()
class BlockDeviceDestinationTypeField(BaseEnumField):
AUTO_TYPE = BlockDeviceDestinationType()
class BlockDeviceSourceTypeField(BaseEnumField):
AUTO_TYPE = BlockDeviceSourceType()
class BlockDeviceTypeField(BaseEnumField):
AUTO_TYPE = BlockDeviceType()
class CPUAllocationPolicyField(BaseEnumField):
AUTO_TYPE = CPUAllocationPolicy()
class CPUModeField(BaseEnumField):
AUTO_TYPE = CPUMode()
class CPUMatchField(BaseEnumField):
AUTO_TYPE = CPUMatch()
class CPUFeaturePolicyField(BaseEnumField):
AUTO_TYPE = CPUFeaturePolicy()
class DiskBusField(BaseEnumField):
AUTO_TYPE = DiskBus()
class HVTypeField(BaseEnumField):
AUTO_TYPE = HVType()
class OSTypeField(BaseEnumField):
AUTO_TYPE = OSType()
class RNGModelField(BaseEnumField):
AUTO_TYPE = RNGModel()
class SCSIModelField(BaseEnumField):
AUTO_TYPE = SCSIModel()
class VideoModelField(BaseEnumField):
AUTO_TYPE = VideoModel()
class VIFModelField(BaseEnumField):
AUTO_TYPE = VIFModel()
class VMModeField(BaseEnumField):
AUTO_TYPE = VMMode()
class WatchdogActionField(BaseEnumField):
AUTO_TYPE = WatchdogAction()
class MonitorMetricTypeField(BaseEnumField):
AUTO_TYPE = MonitorMetricType()
# FIXME(sbauza): Remove this after oslo.versionedobjects gets it
class VersionPredicateField(AutoTypedField):
AUTO_TYPE = VersionPredicate()
class PciDeviceStatusField(BaseEnumField):
AUTO_TYPE = PciDeviceStatus()
class PciDeviceTypeField(BaseEnumField):
AUTO_TYPE = PciDeviceType()
# FIXME(danms): Remove this after oslo.versionedobjects gets it
# This is a flexible interpretation of boolean
# values using common user friendly semantics for
# truth/falsehood. ie strings like 'yes', 'no',
# 'on', 'off', 't', 'f' get mapped to values you
# would expect.
class FlexibleBooleanField(AutoTypedField):
AUTO_TYPE = FlexibleBoolean()
class IPAddressField(AutoTypedField):
AUTO_TYPE = IPAddress()
class IPV4AddressField(AutoTypedField):
AUTO_TYPE = IPV4Address()
class IPV6AddressField(AutoTypedField):
AUTO_TYPE = IPV6Address()
class IPV4AndV6AddressField(AutoTypedField):
AUTO_TYPE = IPV4AndV6Address()
class IPNetworkField(AutoTypedField):
AUTO_TYPE = IPNetwork()
class IPV4NetworkField(AutoTypedField):
AUTO_TYPE = IPV4Network()
class IPV6NetworkField(AutoTypedField):
AUTO_TYPE = IPV6Network()
class ListOfIntegersField(AutoTypedField):
AUTO_TYPE = List(fields.Integer())
# FIXME(sbauza): Remove this after oslo.versionedobjects releases it
class DictOfListOfStringsField(AutoTypedField):
AUTO_TYPE = Dict(List(fields.String()))
class NonNegativeFloatField(AutoTypedField):
AUTO_TYPE = NonNegativeFloat()
class NonNegativeIntegerField(AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
# FIXME(danms): Remove this after we convert to oslo.versionedobjects' registry
class ObjectField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = Object(objtype)
super(ObjectField, self).__init__(**kwargs)
# FIXME(danms): Remove this after we convert to oslo.versionedobjects' registry
class ListOfObjectsField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = List(Object(objtype))
super(ListOfObjectsField, self).__init__(**kwargs)
|
|
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
__author__ = 'paul.tunison'
import abc
import cPickle
import logging
import numpy as np
import pymongo
import time
from .utils import SafeConfigCommentParser
from .VCDStore import VCDStoreElement, VCDStore
__all__ = [
'DistanceKernelInterface_Mongo'
]
class _timer (object):
def __init__(self, msg):
self._log = logging.getLogger('.'.join([self.__module__,
self.__class__.__name__]))
self._msg = msg
self._s = 0.0
def __enter__(self):
self._log.info(self._msg)
self._s = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
self._log.info("-> %f s", time.time() - self._s)
class DistanceKernelInterface (object):
"""
Abstract interface for SVM distance kernel matrices. Normally these are
square, but some implementations may contain rectangular matrices.
These matrices are testing-only kernels, and will not implement the square
sub-matrix method.
These objects must be picklable.
"""
__metaclass__ = abc.ABCMeta
@classmethod
def generate_config(cls, config=None):
if config is None:
config = SafeConfigCommentParser()
return config
@abc.abstractmethod
def __init__(self, config):
if config is None:
config = self.generate_config()
self._config = config
self._log = logging.getLogger('.'.join([self.__module__,
self.__class__.__name__]))
def __getstate__(self):
#noinspection PyRedundantParentheses
return (self._config,)
def __setstate__(self, state):
self._log = logging.getLogger('.'.join([self.__module__,
self.__class__.__name__]))
self._config = state[0]
@abc.abstractproperty
def edge_clip_ids(self):
"""
:return: The full matrix index-to-clipID tuple maps for the matrix.
Returns 2 tuples of M (row) and N (column) length, respectively,
when the matrix contained is of shape MxN.
:rtype: (tuple of int, tuple of int)
"""
return
@abc.abstractmethod
def get_sub_matrix(self, *clip_ids):
"""
Return a symmetric sub NxN matrix of the total distance kernel based on
the clip IDs provided. The background clips will always be included in
the matrix.
Clip IDs provided will be assumed non-background, or positive
event examples. If the clip ID of a background video is provided as an
argument, we will reconsider it as a non-background video for the
returned data.
:param clip_ids: Integer clip IDs to include in the returned matrix. The
returned matrix will always contain all the Background videos.
:type clip_ids: Iterable of int
:return: Returns the index to clip ID map (tuple), the index to isBG map
(tuple) and the symmetric NxN sub-matrix, where N is the number of
clip IDs provided as arguments, plus the number of background
videos.
:rtype: (tuple of int, tuple of bool, numpy.ndarray)
"""
return
@abc.abstractmethod
def extract_rows(self, *clipID_or_IDs):
"""
Find and return the v-stacked distance vectors, in kernel row order
(i.e. not the order given as arguments), of the kernel rows matching the
given clip IDs.
:param clipID_or_IDs: The integer clip ID or IDs of which to get the
distances for.
:type clipID_or_IDs: int or Iterable of int
:return: The row index to clip ID map (tuple), the column index to clip
ID map (tuple), and the KxL shape matrix, where K is the number of
clip IDs give to this method, and L is the total width (columns) of
the distance kernel.
:rtype: (tuple of int, tuple of int, numpy.ndarray)
"""
return
#class DistanceKernelInterface_Mongo (DistanceKernelInterface):
# """
# Interface class to the Database stored distance matrix, providing access as
# if the matrix were stored locally.
#
# Individual distances can be gotten using matrix access notation,
# i.e. self[x, y] format.
#
# """
#
# DMI_COLLECTION = "DistanceMapData"
#
# # _id of matrix row entries will be the integer index key of that row in the
# # constructed matrix
# # A metadata construct will have the string ID with 3 elements with
# # following labels.
# # - metadata document format
# DMI_MD_KEY = "DMI_METADATA"
# DMI_DTYPE = "DMI_MATRIX_DTYPE" # string
# DMI_ORDERED_CID_TUP = "DMI_ORDERED_CID_TUP" # picked tuple
# DMI_ORDERED_ISBG_TUP = "DMI_ORDERED_BG_TUP" # pickled tuple
# # MongoDB Doc formats:
# # -> Metadata:
# # { _id: <DMI_MD_KEY>,
# # <DMI_DTYPE>: <str>,
# # <DMI_IDX_CID_MAP>: <str>,
# # <DMI_IDX_ISBG_MAP>: <str> }
# # -> Row Data:
# # { _id: <int_idx>,
# # row: <str> }
#
# def __init__(self, db_info):
# """
# Construct the distance map interface.
#
# :param db_info: Database connection information. The collection
# attribute will be ignored as we specify our own.
# :type db_info: DatabaseInfo
#
# """
# self._log = logging.getLogger('.'.join([self.__module__,
# self.__class__.__name__]))
#
# self._mdb_client = pymongo.MongoClient(db_info.host, db_info.port)
# self._mdb_name = db_info.name
#
# self._is_initialized = False
# self._matrix_dtype = None # stored, only needed when storing buffer
# self._ordered_cid_tup = None # stored
# self._ordered_isBG_tup = None # stored
# self._cid_idx_map = None # constructed
# self._bg_cid_set = None # constructed
#
# # Attempt to load index maps. If either isn't there
# coll = self._get_db_collection()
# md_doc = coll.find_one({'_id': self.DMI_MD_KEY})
# if md_doc:
# self._log.info("Found existing data. Caching metadata.")
# # If both are there we assume that there is existing matrix data
# # loaded (initialize inputs matrix data before the metadata
# # structures).s
# self._is_initialized = True
#
# # Retrieve stored values
# self._matrix_dtype = md_doc[self.DMI_DTYPE]
# self._ordered_cid_tup = cPickle.loads(str(md_doc[self.DMI_ORDERED_CID_TUP]))
# self._ordered_isBG_tup = cPickle.loads(str(md_doc[self.DMI_ORDERED_ISBG_TUP]))
#
# # Construct constructed values
# self._cid_idx_map = dict((int(cid), idx)
# for idx, cid
# in enumerate(self._ordered_cid_tup))
# self._bg_cid_set = frozenset(self._ordered_cid_tup[idx]
# for idx, isBG
# in enumerate(self._ordered_isBG_tup)
# if isBG)
#
# def _get_db_collection(self):
# return self._mdb_client[self._mdb_name][self.DMI_COLLECTION]
#
# def initialize_data(self, id_index_map, bg_data_map, npy_data_file):
# """
# Initialize the data stored in the database. If this structure was
# already initialized, or if there is pre-existing data in the database,
# we delete it and replace it with the provided data.
#
# :param id_index_map: Path to the file listing the matrix index to clip
# ID relationships.
# :type id_index_map: str
# :param bg_data_map: Path to the file listing flags for matrix indices
# of whether the video for that index is considered a "background"
# video.
# :type bg_data_map: str
# :param npy_data_file: Path to the numpy binary file containing the
# matrix data.
# :type npy_data_file: str
#
# """
# self._log.info("Initializing and loading IQR distance matrix")
# coll = self._get_db_collection()
#
# self._log.info("Dropping any existing data in DB")
# coll.drop()
#
# self._log.info("Loading ID to index map")
# with open(id_index_map) as ifile:
# self._ordered_cid_tup = tuple(int(cid) for cid in ifile.readlines())
#
# self._cid_idx_map = dict((int(cid), idx)
# for idx, cid in enumerate(self._ordered_cid_tup))
#
# self._log.info("Loading index to isBD map")
# with open(bg_data_map) as ifile:
# self._ordered_isBG_tup = tuple(bool(int(isBG)) for isBG
# in ifile.readlines())
#
# self._bg_cid_set = frozenset(self._ordered_cid_tup[idx]
# for idx, isBG
# in enumerate(self._ordered_isBG_tup)
# if isBG)
#
# self._log.info("Loading IQR distance matrix")
# s = time.time()
# mat = np.load(npy_data_file)
# self._matrix_dtype = str(mat.dtype)
# self._log.info("-> Time to load: %f s", time.time() - s)
#
# self._log.info("creating matrix database documents")
# to_insert = []
# push_trigger = 1024 * 8
# for idx, row in enumerate(mat):
# if idx % 100 == 0:
# self._log.info("Num packaged: %d", idx)
#
# doc = {
# '_id': idx,
# 'row': cPickle.dumps(row)
# # Raises some invalid string error in mongo
# #'row': str(buffer(row)),
# }
# to_insert.append(doc)
#
# if len(to_insert) >= push_trigger:
# self._log.info("Inserting batch into DB (n: %d)", push_trigger)
# s = time.time()
# coll.insert(to_insert)
# self._log.info("-> Total time for matrix insertion: %f s",
# time.time() - s)
# to_insert = []
#
# self._log.info("Inserting FINAL batch into DB (n: %d)", len(to_insert))
# s = time.time()
# coll.insert(to_insert)
# self._log.info("-> Total time for matrix insertion: %f s",
# time.time() - s)
#
# self._log.info('Inserting and caching metadata')
# s = time.time()
# md_doc = {
# '_id': self.DMI_MD_KEY,
# self.DMI_DTYPE: self._matrix_dtype,
# self.DMI_ORDERED_CID_TUP: cPickle.dumps(self._ordered_cid_tup),
# self.DMI_ORDERED_ISBG_TUP: cPickle.dumps(self._ordered_isBG_tup)
# }
# coll.insert(md_doc)
# self._log.info("Metadata insertion time: %f s", time.time() - s)
#
# # NOT adding an index on _id, as it exists by default.
#
# # queue matrix clean-up from RAM
# del mat
#
# self._is_initialized = True
#
# @property
# def is_initialized(self):
# return self._is_initialized
#
# def get_sub_matrix(self, *clip_ids):
# """
# Return a sub NxN matrix of the total distance map based on the video IDs
# provided. The background clips will always be included in the matrix.
#
# :param clip_ids: Integer clip IDs to include in the returned matrix. The
# returned matrix will always contain all the Background videos.
# :type clip_ids: Iterable of int
# :return: Returns the clip ID to index map (dict), the index to isBG map
# (tuple) and the NxN sub-matrix, where N is the number of clip IDs
# provided as arguments, plus the number of background videos.
# :rtype: (dict of (int, int), tuple of bool, numpy.ndarray)
#
# """
# self._log.info("Starting sub-matrix retrieval and extraction")
# t_s = time.time()
# if not self._is_initialized:
# raise RuntimeError("No data initialized yet!")
#
# assert all((isinstance(e, int) for e in clip_ids)), \
# "Not all clip IDs given were integers! This is required."
#
# # Matrix to return should be the distance matrix of all background clips
# # as well as clips provided
# # - Determine what clips IDs are background clips
# # - create set of clip IDs that are the union of background clips and
# # those provided
# # - iteratively fetch rows from DB for clip ID set, extracting pertinent
# # columns, v-stacking row-ified columns to produce results symmetric
# # matrix.
#
# all_cids = self._bg_cid_set.union(clip_ids)
#
# # Create a list of clip IDs that are in the same relative order as the
# # total set
# self._log.info("Creating focus index sequence from master sequence.")
# s = time.time()
# focus_indices = []
# for idx, cid in enumerate(self._ordered_cid_tup):
# if cid in all_cids:
# focus_indices.append(idx)
# self._log.info("-> %f s", time.time() - s)
#
# N = len(focus_indices)
# focus_cid2idx = {}
# focus_id2isBG = []
# coll = self._get_db_collection()
#
# self._log.info("Creating metadata structures")
# s = time.time()
# for new_idx, idx in enumerate(focus_indices):
# focus_cid2idx[self._ordered_cid_tup[idx]] = new_idx
# focus_id2isBG.append(self._ordered_isBG_tup[idx])
# self._log.info("-> %f s", time.time() - s)
#
# #######################################################################
# ### single-element construction method
# self._log.info("Creating sub-matrix")
# s = time.time()
# ret_mat = np.zeros((N, N), dtype=self._matrix_dtype)
# for i, idx in enumerate(focus_indices):
# doc = coll.find_one({'_id': idx})
# assert doc, "Missing matrix row entry for index %d (cid:%d)" \
# % (idx, self._ordered_cid_tup[idx])
# row = cPickle.loads(str(doc['row']))
# for j, _idx in enumerate(focus_indices):
# ret_mat[i, j] = row[_idx]
# self._log.info("-> %f s", time.time() - s)
#
# #######################################################################
# ### row->column extraction method
# #self._log.info("Creating metadata structures")
# #s = time.time()
# #for new_idx, idx in enumerate(focus_indices):
# # focus_cid2idx[self._ordered_cid_tup[idx]] = new_idx
# # focus_id2isBG.append(self._ordered_isBG_tup[idx])
# #self._log.info("-> %f s", time.time() - s)
# #
# #self._log.info("Collecting rows")
# #s = time.time()
# #pickled_rows = []
# #for idx in focus_indices:
# # pickled_rows.append(coll.find_one({'_id': idx})['row'])
# ##ret = coll.find({'_id': {"$in": focus_indices}})
# #self._log.info('-> %f s', time.time() - s)
# #
# #self._log.info("Un-pickling data")
# #s = time.time()
# #rows = []
# #p_r = None
# #for p_r in pickled_rows:
# # rows.append(cPickle.loads(str(p_r)))
# #del pickled_rows, p_r
# #self._log.info("-> %f s", time.time() - s)
# #
# #self._log.info("Consolidating into wide matrix")
# #s = time.time()
# #wide_mat = np.mat(rows)
# #self._log.info("-> %f s", time.time() - s)
# #
# #self._log.info("Extracting columns from wide matrix")
# #s = time.time()
# #cols = []
# #for idx in focus_indices:
# # cols.append(wide_mat[:, idx])
# #self._log.info("-> %f s", time.time() - s)
# #
# #self._log.info("Constructing final matrix")
# #s = time.time()
# ## Because of the symmetric nature of the extracted sub-matrix, and since
# ## numpy row-ifies the columns when extracted
# #ret_mat = np.hstack(cols)
# #self._log.info("-> %f s", time.time() - s)
#
# self._log.info("==> Total: %f s", time.time() - t_s)
# return focus_cid2idx, focus_id2isBG, ret_mat
#
# def get_clip_distances(self, clip_id):
# """
# Find and return the vector of distance of this clip to all other clips.
#
# :param clip_id: The integer clip ID of which to get the distances for.
# :type clip_id: int
# :return: The clip ID to index map (dict), the index to isBG map
# (tuple), and the N length vector, where N is the total number of
# clip IDs in the distance map.
# :rtype: (dict of (int, int), tuple of bool, numpy.ndarray)
#
# """
# if not self._is_initialized:
# raise RuntimeError("No data initialized yet!")
#
# coll = self._get_db_collection()
# doc = coll.find_one({'_id': self._cid_idx_map[clip_id]})
# row = cPickle.loads(str(doc['row']))
# return self._cid_idx_map, self._ordered_isBG_tup, row
#
# def __getitem__(self, id_1, id_2):
# """
# Access a single distance from the matrix.
#
# :raises KeyError: If one or both IDs are not included in the distance
# map.
#
# :param id_1: An integer clip id
# :type id_1: int
# :param id_2: An integer clip id
# :type id_2: int
# :return: Distance between the specified videos.
# :rtype: float
#
# """
# if not self._is_initialized:
# raise RuntimeError("No data initialized yet!")
#
# assert isinstance(id_1, int) and isinstance(id_2, int), \
# "require integer clip IDs!"
#
# row_idx = self._cid_idx_map[id_1]
# col_idx = self._cid_idx_map[id_2]
#
# raise NotImplementedError()
#
#
#class DistanceKernelInterface_SQLite3 (DistanceKernelInterface):
# """
# Distance map shared matrix implementation using SQLite3
#
#
# Table Specification (when we get to using our own sql impl)
# ===========================================================
#
# MAT_DATA
# --------
# idx, INTEGER NOT NULL
# row, BLOB NOT NULL
#
# METADATA
# --------
# label, TEXT
# value, TEXT
#
# """
#
# DMI_COLLECTION = "DistanceMapDataSQL"
#
# # A metadata construct will have the string ID with 3 elements with
# # following labels.
# # - metadata document format
# DMI_MD_KEY = "DMI_METADATA"
# DMI_DTYPE = "DMI_MATRIX_DTYPE" # string
# DMI_ORDERED_CID_TUP = "DMI_ORDERED_CID_TUP" # picked tuple
# DMI_ORDERED_ISBG_TUP = "DMI_ORDERED_BG_TUP" # pickled tuple
# # MongoDB Doc format:
# # -> Metadata:
# # { _id: <DMI_MD_KEY>,
# # <DMI_DTYPE>: <str>,
# # <DMI_IDX_CID_MAP>: <str>,
# # <DMI_IDX_ISBG_MAP>: <str> }
#
# @classmethod
# def generate_config(cls, config=None):
# if config is None:
# config = SafeConfigCommentParser()
#
# #sect = "distance_map_interface"
# #config.add_section(sect,
# # "This interface inherits database connection "
# # "information from the instantiating agent.")
#
# return config
#
# def __init__(self, db_path, mongo_dbinfo):
# """
# Initialize DMI to use specific database location
#
# :param db_path: Path to the database file
# :type db_path: str
# :param mongo_dbinfo: Database information to mongo DB to connect to for
# metadata storage.
# :type mongo_dbinfo: DatabaseInfo
#
# """
# self._log = logging.getLogger('.'.join([self.__module__,
# self.__class__.__name__]))
#
# self._mat_vcds = VCDStore(fs_db_path=db_path)
# self._mdb_client = pymongo.MongoClient(mongo_dbinfo.host,
# mongo_dbinfo.port)
# self._mdb_name = mongo_dbinfo.name
#
# self._is_initialized = False
# self._matrix_dtype = None # stored, only needed when storing buffer
# self._ordered_cid_tup = None # stored
# self._ordered_isBG_tup = None # stored
# self._cid_idx_map = None # constructed
# self._bg_cid_set = None # constructed
#
# # Check if database is initialize by looking for table and metadata
# # existence.
# coll = self._get_mdb_collection()
# md_doc = coll.find_one({'_id': self.DMI_MD_KEY})
# if md_doc:
# self._log.info("Found existing data. Caching metadata.")
# s = time.time()
# # If both are there we assume that there is existing matrix data
# # loaded (initialize inputs matrix data before the metadata
# # structures).s
# self._is_initialized = True
#
# # Retrieve stored values
# self._matrix_dtype = md_doc[self.DMI_DTYPE]
# self._ordered_cid_tup = cPickle.loads(str(md_doc[self.DMI_ORDERED_CID_TUP]))
# self._ordered_isBG_tup = cPickle.loads(str(md_doc[self.DMI_ORDERED_ISBG_TUP]))
#
# # Construct constructed values
# self._cid_idx_map = dict((int(cid), idx)
# for idx, cid
# in enumerate(self._ordered_cid_tup))
# self._bg_cid_set = frozenset(self._ordered_cid_tup[idx]
# for idx, isBG
# in enumerate(self._ordered_isBG_tup)
# if isBG)
# self._log.info("-> %f s", time.time() - s)
#
# def _get_mdb_collection(self):
# return self._mdb_client[self._mdb_name][self.DMI_COLLECTION]
#
# @property
# def is_initialized(self):
# return self._is_initialized
#
# def initialize_data(self, id_index_map, bg_data_map, npy_data_file):
# """
# Initialize the data stored in the database. If this structure was
# already initialized, or if there is pre-existing data in the database,
# we delete it and replace it with the provided data.
#
# :param id_index_map: Path to the file listing the matrix index to clip
# ID relationships.
# :type id_index_map: str
# :param bg_data_map: Path to the file listing flags for matrix indices
# of whether the video for that index is considered a "background"
# video.
# :type bg_data_map: str
# :param npy_data_file: Path to the numpy binary file containing the
# matrix data.
# :type npy_data_file: str
#
# """
# self._log.info("Initializing and loading IQR distance matrix")
#
# self._log.info("Dropping any existing data in DB")
#
# self._log.info("Loading ID to index map")
# with open(id_index_map) as ifile:
# self._ordered_cid_tup = tuple(int(cid) for cid in ifile.readlines())
#
# self._cid_idx_map = dict((int(cid), idx)
# for idx, cid
# in enumerate(self._ordered_cid_tup))
#
# self._log.info("Loading index to isBD map")
# with open(bg_data_map) as ifile:
# self._ordered_isBG_tup = tuple(bool(int(isBG))
# for isBG in ifile.readlines())
#
# self._bg_cid_set = frozenset(self._ordered_cid_tup[idx]
# for idx, isBG
# in enumerate(self._ordered_isBG_tup)
# if isBG)
#
# self._log.info("Loading IQR distance matrix")
# s = time.time()
# mat = np.load(npy_data_file)
# self._matrix_dtype = str(mat.dtype)
# self._log.info("-> %f s", time.time() - s)
#
# self._log.info("Inserting matrix data into SQLite3 database")
#
# self._log.info("Creating store elements")
# elements = []
# for idx, row in enumerate(mat):
# e = VCDStoreElement(self.DMI_COLLECTION, idx, row)
# elements.append(e)
#
# self._log.info("Inserting elements into database")
# s = time.time()
# self._mat_vcds.store_feature(elements)
# self._log.info("Time to insert: %f s", time.time() - s)
#
# #def get_sub_matrix(self, *clip_ids):
class DistanceKernel_File_IQR (DistanceKernelInterface):
"""
Load and perform functions on a symmetric distance matrix from file intended
for IQR learning and searching.
"""
def __init__(self, id_index_map, bg_data_map, npy_data_file):
"""
:param id_index_map: Path to the file listing the matrix index to clip
ID relationships.
:type id_index_map: str
:param bg_data_map: Path to the file listing flags for matrix indices
of whether the video for that index is considered a "background"
video.
:type bg_data_map: str
:param npy_data_file: Path to the numpy binary file containing the
matrix data.
:type npy_data_file: str
"""
super(DistanceKernel_File_IQR, self).__init__(None)
self._npy_data_file = npy_data_file
with open(id_index_map) as ifile:
self._ordered_cid_tup = tuple(int(cid) for cid in ifile.readlines())
with open(bg_data_map) as ifile:
self._ordered_isBG_tup = tuple(bool(int(isBG))
for isBG in ifile.readlines())
self._bg_cid_set = frozenset(self._ordered_cid_tup[idx]
for idx, isBG
in enumerate(self._ordered_isBG_tup)
if isBG)
def __getstate__(self):
return (
super(DistanceKernel_File_IQR, self).__getstate__(),
self._npy_data_file,
self._ordered_cid_tup,
self._ordered_isBG_tup,
self._bg_cid_set
)
def __setstate__(self, state):
super(DistanceKernel_File_IQR, self).__setstate__(state[0])
self._npy_data_file = state[1]
self._ordered_cid_tup = state[2]
self._ordered_isBG_tup = state[3]
self._bg_cid_set = state[4]
@property
def edge_clip_ids(self):
return self._ordered_cid_tup, self._ordered_cid_tup
def get_sub_matrix(self, *clip_ids):
"""
Return a symmetric sub NxN matrix of the total distance kernel based on
the clip IDs provided. The background clips will always be included in
the matrix.
Clip IDs provided will be assumed non-background, or positive
event examples. If the clip ID of a background video is provided as an
argument, we will reconsider it as a non-background video for the
returned data.
:param clip_ids: Integer clip IDs to include in the returned matrix. The
returned matrix will always contain all the Background videos.
:type clip_ids: Iterable of int
:return: Returns the index to clip ID map (tuple), the index to isBG map
(tuple) and the symmetric NxN sub-matrix, where N is the number of
clip IDs provided as arguments, plus the number of background
videos.
:rtype: (tuple of int, tuple of bool, numpy.ndarray)
"""
self._log.info("Starting symmetric sub-matrix retrieval and extraction")
s_t = time.time()
assert all((isinstance(e, int) for e in clip_ids)), \
"Not all clip IDs given were integers! This is required."
assert not set(clip_ids).difference(self._ordered_cid_tup), \
"Not all clip IDs provided are represented in the distance " \
"kernel matrix row map! (difference: %s)" \
% set(clip_ids).difference(self._ordered_cid_tup)
all_cids = self._bg_cid_set.union(clip_ids)
# Create a list of clip IDs that are in the same relative order as the
# total set. If there are duplicates, we only pick the first one.
with _timer("Creating focus index sequence from master sequence"):
focus_indices = []
focus_clipIDs = []
for idx, cid in enumerate(self._ordered_cid_tup):
# Filtering out IDs that we have already seen
if cid in all_cids and (cid not in focus_clipIDs):
focus_indices.append(idx)
focus_clipIDs.append(cid)
focus_id2isBG = []
with _timer("Creating metadata structures"):
for idx in focus_indices:
cid = self._ordered_cid_tup[idx]
# IDs provided as arguments are considered non-background, while
# all other are considered background (those added with the set
# union above)
focus_id2isBG.append(False if cid in clip_ids else True)
with _timer("Loading full kernel matrix"):
full_kernel_mat = np.load(self._npy_data_file)
with _timer("Creating sub-matrix"):
# Apparently this is just some special super efficient syntax for np
ret_mat = full_kernel_mat[focus_indices, :][:, focus_indices]
self._log.info("==> Total: %f s", time.time() - s_t)
return focus_clipIDs, focus_id2isBG, ret_mat
def extract_rows(self, *clipID_or_IDs):
"""
Find and return the v-stacked distance vectors, in kernel row order
(i.e. not the order given as arguments), of the kernel rows matching the
given clip IDs.
:param clipID_or_IDs: The integer clip ID or IDs of which to get the
distances for.
:type clipID_or_IDs: int or Iterable of int
:return: The row index to clip ID map (tuple), the column index to clip
ID map (tuple), and the KxL shape matrix, where K is the number of
clip IDs give to this method, and L is the total width (columns) of
the distance kernel.
:rtype: (tuple of int, tuple of int, numpy.ndarray)
"""
self._log.info("Starting kernel row retrieval and extraction")
s_t = time.time()
assert all((isinstance(e, int) for e in clipID_or_IDs)), \
"Not all clip IDs given were integers! This is required."
assert not set(clipID_or_IDs).difference(self._ordered_cid_tup), \
"Not all clip IDs provided are represented in the distance " \
"kernel matrix row map! (difference: %s)" \
% set(clipID_or_IDs).difference(self._ordered_cid_tup)
with _timer("Loading full kernel matrix"):
full_kernel_mat = np.load(self._npy_data_file)
# Create ordered tuple of clip IDs that are in the same relative order
# as the kernel matrix's edge order.
with _timer("Creating focus index/cid sequence"):
focus_indices = []
focus_clipIDs = []
for idx, cid in enumerate(self._ordered_cid_tup):
# Filtering out IDs that we have already seen
if cid in clipID_or_IDs and (cid not in focus_clipIDs):
focus_indices.append(idx)
focus_clipIDs.append(cid)
with _timer("Cropping kernel to focus indices"):
wide_mat = full_kernel_mat[focus_indices, :]
self._log.info("==> Total: %f s", time.time() - s_t)
return focus_clipIDs, self._ordered_cid_tup, wide_mat
class DistanceKernel_File_Archive (DistanceKernelInterface):
"""
Load and perform functions on a distance matrix from file intended for IQR
learning and searching. There is no guarantee that this matrix will be
symmetric or that clip IDs will be at all shared along axes.
"""
def __init__(self, id_index_map_rows, id_index_map_cols, npy_data_file):
"""
:param id_index_map_rows: Path to the file detailing the index-to-clipID
relationship of the rows of the matrix.
:type id_index_map_rows: str
:param id_index_map_cols: Path to the file detailing the index-to-clipID
relationship of the columns of the matrix.
:type id_index_map_cols: str
:param npy_data_file: Path to the numpy binary file containing the
matrix data.
:type npy_data_file: str
"""
super(DistanceKernel_File_Archive, self).__init__(None)
self._npy_data_file = npy_data_file
with open(id_index_map_rows) as ifile:
self._ordered_row_cid_tup = tuple(int(cid) for cid
in ifile.readlines())
with open(id_index_map_cols) as ifile:
self._ordered_col_cid_tup = tuple(int(cid) for cid
in ifile.readlines())
def __getstate__(self):
return (
super(DistanceKernel_File_Archive, self).__getstate__(),
self._npy_data_file,
self._ordered_row_cid_tup,
self._ordered_col_cid_tup
)
def __setstate__(self, state):
super(DistanceKernel_File_Archive, self).__setstate__(state[0])
self._npy_data_file = state[1]
self._ordered_row_cid_tup = state[2]
self._ordered_col_cid_tup = state[3]
@property
def edge_clip_ids(self):
return self._ordered_row_cid_tup, self._ordered_col_cid_tup
def get_sub_matrix(self, *clip_ids):
"""
Undefined functionality for non-symmetric matrix
"""
raise NotImplementedError("Symmetric matrix sub-duvision undefined "
"for non-symmetric matrices")
def extract_rows(self, *clipID_or_IDs):
"""
Find and return the v-stacked distance vectors, in kernel row order
(i.e. not the order given as arguments), of the kernel rows matching the
given clip IDs.
:param clipID_or_IDs: The integer clip ID or IDs of which to get the
distances for.
:type clipID_or_IDs: int or Iterable of int
:return: The row index to clip ID map (tuple), the column index to clip
ID map (tuple), and the KxL shape matrix, where K is the number of
clip IDs give to this method, and L is the total width (columns) of
the distance kernel.
:rtype: (tuple of int, tuple of int, numpy.ndarray)
"""
self._log.info("Starting kernel row retrieval and extraction")
s_t = time.time()
assert all((isinstance(e, int) for e in clipID_or_IDs)), \
"Not all clip IDs given were integers! This is required."
assert not set(clipID_or_IDs).difference(self._ordered_row_cid_tup), \
"Not all clip IDs provided are represented in the distance " \
"kernel matrix row map! (difference: %s)" \
% set(clipID_or_IDs).difference(self._ordered_row_cid_tup)
with _timer("Loading full kernel matrix"):
full_kernel_mat = np.load(self._npy_data_file)
# Create ordered tuple of clip IDs that are in the same relative order
# as the kernel matrix's edge order.
with _timer("Creating focus index/cid sequences"):
focus_row_indices = []
focus_row_clipIDs = []
for idx, cid in enumerate(self._ordered_row_cid_tup):
# Filtering out IDs that we have already seen
if cid in clipID_or_IDs and (cid not in focus_row_clipIDs):
focus_row_indices.append(idx)
focus_row_clipIDs.append(cid)
with _timer("Cropping kernel to focus indices"):
wide_mat = full_kernel_mat[focus_row_indices, :]
self._log.info("==> Total: %f s", time.time() - s_t)
return focus_row_clipIDs, self._ordered_col_cid_tup, wide_mat
|
|
# -*- coding: utf8 -*-
import time
from nose.tools import eq_
import amo
import amo.tests
from editors.models import RereviewQueue, ReviewerScore
from users.models import UserProfile
class TestReviewerScore(amo.tests.TestCase):
fixtures = ['base/users']
def setUp(self):
self.addon = amo.tests.addon_factory(status=amo.STATUS_NOMINATED)
self.app = amo.tests.app_factory(status=amo.STATUS_NOMINATED)
self.user = UserProfile.objects.get(email='editor@mozilla.com')
def _give_points(self, user=None, addon=None, status=None):
user = user or self.user
addon = addon or self.addon
ReviewerScore.award_points(user, addon, status or addon.status)
def check_event(self, type, status, event, **kwargs):
self.addon.type = type
eq_(ReviewerScore.get_event(self.addon, status, **kwargs), event, (
'Score event for type:%s and status:%s was not %s' % (
type, status, event)))
def test_events_addons(self):
types = {
amo.ADDON_ANY: None,
amo.ADDON_EXTENSION: 'ADDON',
amo.ADDON_THEME: 'THEME',
amo.ADDON_DICT: 'DICT',
amo.ADDON_SEARCH: 'SEARCH',
amo.ADDON_LPAPP: 'LP',
amo.ADDON_LPADDON: 'LP',
amo.ADDON_PLUGIN: 'ADDON',
amo.ADDON_API: 'ADDON',
amo.ADDON_PERSONA: 'PERSONA',
# WEBAPP is special cased below.
}
statuses = {
amo.STATUS_NULL: None,
amo.STATUS_UNREVIEWED: 'PRELIM',
amo.STATUS_PENDING: None,
amo.STATUS_NOMINATED: 'FULL',
amo.STATUS_PUBLIC: 'UPDATE',
amo.STATUS_DISABLED: None,
amo.STATUS_BETA: None,
amo.STATUS_LITE: 'PRELIM',
amo.STATUS_LITE_AND_NOMINATED: 'FULL',
amo.STATUS_PURGATORY: None,
amo.STATUS_DELETED: None,
amo.STATUS_REJECTED: None,
amo.STATUS_PUBLIC_WAITING: None,
amo.STATUS_REVIEW_PENDING: None,
amo.STATUS_BLOCKED: None,
}
for tk, tv in types.items():
for sk, sv in statuses.items():
try:
event = getattr(amo, 'REVIEWED_%s_%s' % (tv, sv))
except AttributeError:
try:
event = getattr(amo, 'REVIEWED_%s' % tv)
except AttributeError:
event = None
self.check_event(tk, sk, event)
def test_events_webapps(self):
self.addon = amo.tests.app_factory()
self.check_event(self.addon.type, amo.STATUS_PENDING,
amo.REVIEWED_WEBAPP_HOSTED)
RereviewQueue.objects.create(addon=self.addon)
self.check_event(self.addon.type, amo.STATUS_PUBLIC,
amo.REVIEWED_WEBAPP_REREVIEW, in_rereview=True)
RereviewQueue.objects.all().delete()
self.addon.is_packaged = True
self.check_event(self.addon.type, amo.STATUS_PENDING,
amo.REVIEWED_WEBAPP_PACKAGED)
self.check_event(self.addon.type, amo.STATUS_PUBLIC,
amo.REVIEWED_WEBAPP_UPDATE)
def test_award_points(self):
self._give_points()
eq_(ReviewerScore.objects.all()[0].score,
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
def test_award_moderation_points(self):
ReviewerScore.award_moderation_points(self.user, self.addon, 1)
score = ReviewerScore.objects.all()[0]
eq_(score.score, amo.REVIEWED_SCORES.get(amo.REVIEWED_ADDON_REVIEW))
eq_(score.note_key, amo.REVIEWED_ADDON_REVIEW)
def test_get_total(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
eq_(ReviewerScore.get_total(self.user),
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL] +
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
eq_(ReviewerScore.get_total(user2),
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
def test_get_recent(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
self._give_points()
time.sleep(1) # Wait 1 sec so ordering by created is checked.
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2)
scores = ReviewerScore.get_recent(self.user)
eq_(len(scores), 2)
eq_(scores[0].score, amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
eq_(scores[1].score, amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
def test_get_leaderboards(self):
user2 = UserProfile.objects.get(email='regular@mozilla.com')
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
leaders = ReviewerScore.get_leaderboards(self.user)
eq_(leaders['user_rank'], 1)
eq_(leaders['leader_near'], [])
eq_(leaders['leader_top'][0]['rank'], 1)
eq_(leaders['leader_top'][0]['user_id'], self.user.id)
eq_(leaders['leader_top'][0]['total'],
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL] +
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
eq_(leaders['leader_top'][1]['rank'], 2)
eq_(leaders['leader_top'][1]['user_id'], user2.id)
eq_(leaders['leader_top'][1]['total'],
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL])
self._give_points(
user=user2, addon=amo.tests.addon_factory(type=amo.ADDON_PERSONA))
leaders = ReviewerScore.get_leaderboards(
self.user, addon_type=amo.ADDON_PERSONA)
eq_(len(leaders['leader_top']), 1)
eq_(leaders['leader_top'][0]['user_id'], user2.id)
def test_no_admins_or_staff_in_leaderboards(self):
user2 = UserProfile.objects.get(email='admin@mozilla.com')
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
leaders = ReviewerScore.get_leaderboards(self.user)
eq_(leaders['user_rank'], 1)
eq_(leaders['leader_near'], [])
eq_(leaders['leader_top'][0]['user_id'], self.user.id)
eq_(len(leaders['leader_top']), 1) # Only the editor is here.
assert user2.id not in [l['user_id'] for l in leaders['leader_top']], (
'Unexpected admin user found in leaderboards.')
def test_no_marketplace_points_in_amo_leaderboards(self):
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(addon=self.app, status=amo.STATUS_NOMINATED)
leaders = ReviewerScore.get_leaderboards(self.user,
types=amo.REVIEWED_AMO)
eq_(leaders['leader_top'][0]['total'],
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_FULL] +
amo.REVIEWED_SCORES[amo.REVIEWED_ADDON_PRELIM])
def test_no_amo_points_in_marketplace_leaderboards(self):
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(addon=self.app, status=amo.STATUS_NOMINATED)
leaders = ReviewerScore.get_leaderboards(
self.user, types=amo.REVIEWED_MARKETPLACE)
eq_(leaders['leader_top'][0]['total'],
amo.REVIEWED_SCORES[amo.REVIEWED_WEBAPP_HOSTED])
def test_get_breakdown(self):
self._give_points()
self._give_points(addon=amo.tests.app_factory())
breakdown = ReviewerScore.get_breakdown(self.user)
eq_(len(breakdown), 2)
eq_(set([b.atype for b in breakdown]),
set([amo.ADDON_EXTENSION, amo.ADDON_WEBAPP]))
def test_get_breakdown_since(self):
self._give_points()
self._give_points(addon=amo.tests.app_factory())
rs = list(ReviewerScore.objects.all())
rs[0].update(created=self.days_ago(50))
breakdown = ReviewerScore.get_breakdown_since(self.user,
self.days_ago(30))
eq_(len(breakdown), 1)
eq_([b.atype for b in breakdown], [rs[1].addon.type])
def test_get_leaderboards_last(self):
users = []
for i in range(6):
users.append(UserProfile.objects.create(username='user-%s' % i))
last_user = users.pop(len(users) - 1)
for u in users:
self._give_points(user=u)
# Last user gets lower points by reviewing a persona.
addon = self.addon
addon.type = amo.ADDON_PERSONA
self._give_points(user=last_user, addon=addon)
leaders = ReviewerScore.get_leaderboards(last_user)
eq_(leaders['user_rank'], 6)
eq_(len(leaders['leader_top']), 3)
eq_(len(leaders['leader_near']), 2)
def test_all_users_by_score(self):
user2 = UserProfile.objects.get(email='regular@mozilla.com')
amo.REVIEWED_LEVELS[0]['points'] = 180
self._give_points()
self._give_points(status=amo.STATUS_LITE)
self._give_points(user=user2, status=amo.STATUS_NOMINATED)
users = ReviewerScore.all_users_by_score()
eq_(len(users), 2)
# First user.
eq_(users[0]['total'], 180)
eq_(users[0]['user_id'], self.user.id)
eq_(users[0]['level'], amo.REVIEWED_LEVELS[0]['name'])
# Second user.
eq_(users[1]['total'], 120)
eq_(users[1]['user_id'], user2.id)
eq_(users[1]['level'], '')
def test_caching(self):
self._give_points()
with self.assertNumQueries(1):
ReviewerScore.get_total(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_total(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_recent(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_recent(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_leaderboards(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_leaderboards(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_breakdown(self.user)
with self.assertNumQueries(0):
ReviewerScore.get_breakdown(self.user)
# New points invalidates all caches.
self._give_points()
with self.assertNumQueries(1):
ReviewerScore.get_total(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_recent(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_leaderboards(self.user)
with self.assertNumQueries(1):
ReviewerScore.get_breakdown(self.user)
|
|
from __future__ import unicode_literals
import logging
import pykka
import pyconnman
import dbus, gobject
from mopidy import service
from mopidy.utils.jsonrpc import private_method
logger = logging.getLogger(__name__)
CONNMAN_SERVICE_NAME = 'connman'
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
gobject.threads_init()
def api_protect(f):
"""
This decorator protects API calls by ensuring a valid
pyconnman manager instance is running beforehand. It
will raise an exception if pyconnman manager is not
ready.
"""
def wrapper(*args, **kwargs):
if (args[0].manager is not None):
return f(*args, **kwargs)
else:
raise Exception('Service not ready')
return wrapper
def make_string(str):
return dbus.String(str, variant_level=1)
def convert_dbus(obj):
if (type(obj) is dbus.Byte):
return int(obj)
return obj
class ConnectionManager(pykka.ThreadingActor, service.Service):
"""
ConnectionManager is a network connection manager service.
The main use-case of this service is to allow the network settings
to be configured through mopidy thus allowing new/existing WiFi
networks to be joined or switch over to a LAN connection.
"""
name = CONNMAN_SERVICE_NAME
public = True
agent_path = '/mopidy/wifi_agent'
# Refer to https://pythonhosted.org/pyconnman for a description of
# the configuration properties
readonly_properties = ['State', 'Error', 'Name', 'Type', 'Security',
'Strength', 'Nameservers', 'Timeservers',
'Domains', 'IPv4', 'IPv6', 'Ethernet']
readwrite_properties = ['Autoconnect', 'Nameservers.Configuration',
'Timeservers.Configuration', 'Domains.Configuration',
'IPv4.Configuration', 'IPv6.Configuration']
def __init__(self, config, core):
super(ConnectionManager, self).__init__()
self.config = dict(config['connman'])
self.manager = None
self.agent = None
def _services_changed_handler(self, signal, user_arg, changed, removed):
"""
Helper to notify when the available connman connections
has changed
"""
if (changed):
for i in changed:
(_, props) = i
if 'Name' in props:
ret_props = {}
for i in props.keys():
if (i in self.readonly_properties + self.readwrite_properties):
ret_props[i] = convert_dbus(props[i])
service.ServiceListener.send('connman_connection_changed', service=self.name,
connection=props.get('Name'), properties=ret_props)
def _property_changed_handler(self, signal, user_arg, prop_name, prop_value):
"""
Helper to notify when a connman property has changed
"""
props = { prop_name: prop_value }
service.ServiceListener.send('connman_property_changed', service=self.name,
properties=props)
def _get_service_by_name(self, name):
"""
Helper to find a service (aka connection) by its name
and return its ConnService object
"""
for (path, params) in self.manager.get_services():
if (params.get('Name') == name):
return pyconnman.ConnService(path)
def _unregister_wifi_agent(self):
"""
Helper to unregister a wifi agent if once is registered
"""
try:
if (self.agent):
self.agent.remove_from_connection()
self.manager.unregister_agent(self.agent_path)
self.agent = None
except:
pass
@private_method
def on_start(self):
"""
Activate the service
"""
if (self.manager):
return
# Create connman manager
manager = pyconnman.ConnManager()
manager.add_signal_receiver(self._services_changed_handler,
pyconnman.ConnManager.SIGNAL_SERVICES_CHANGED,
None)
manager.add_signal_receiver(self._property_changed_handler,
pyconnman.ConnManager.SIGNAL_PROPERTY_CHANGED,
None)
self.manager = manager
# Create agent for authenticating WiFi connections
self._unregister_wifi_agent()
self.agent = pyconnman.SimpleWifiAgent(self.agent_path)
self.manager.register_agent(self.agent_path)
# Enable the services listed in default configuration -
# anything listed is powered on. Otherwise it is
# powered off.
for (path,_) in self.manager.get_technologies():
tech = pyconnman.ConnTechnology(path)
if (tech.Type in self.config['powered'] and not tech.Powered):
tech.Powered = True
# Try APIPA if it is enable and the connection is idle
if (self.get_connection_state() == 'idle' and self.config['apipa_enabled']):
config = {'Method': make_string('manual'),
'Address': make_string(self.config['apipa_ipaddr']),
'Netmask': make_string(self.config['apipa_netmask'])}
s = self._get_service_by_name(self.config['apipa_interface'])
if (s is not None):
s.set_property('IPv4.Configuration', config)
s.connect()
# Notify listeners
self.state = service.ServiceState.SERVICE_STATE_STARTED
service.ServiceListener.send('service_started', service=self.name)
logger.info('ConnectionManager started')
@private_method
def on_stop(self):
"""
Deactivate the service
"""
if (self.manager is None):
return
# Remove previously installed wifi agent and signal handlers
self._unregister_wifi_agent()
self.manager.remove_signal_receiver(pyconnman.ConnManager.SIGNAL_SERVICES_CHANGED)
self.manager.remove_signal_receiver(pyconnman.ConnManager.SIGNAL_PROPERTY_CHANGED)
self.manager = None
# Notify listeners
self.state = service.ServiceState.SERVICE_STATE_STOPPED
service.ServiceListener.send('service_stopped', service=self.name)
logger.info('ConnectionManager stopped')
@private_method
def on_failure(self, *args):
pass
@private_method
def stop(self, *args, **kwargs):
return pykka.ThreadingActor.stop(self, *args, **kwargs)
@api_protect
def get_connections(self):
"""
Obtain a list of existing network connections
:return: list of network connections
:rtype: list of 'Name' strings of each connection
"""
return [params.get('Name') for (_, params) in self.manager.get_services()]
@api_protect
def scan(self):
"""
Scan and refresh the list of available network connections
(all compatible technologies are scanned).
This will result in the SIGNAL_SERVICES_CHANGED signal
being posted for each different technology scanned
"""
for (path,_) in self.manager.get_technologies():
tech = pyconnman.ConnTechnology(path)
if tech.Type in self.config['scannable']:
tech.scan()
@api_protect
def get_connection_state(self):
"""
Get the global connection state of a system.
:return: Possible values are "offline", "idle", "ready" and "online".
:rtype: string
"""
return self.manager.State
@api_protect
def connect(self, conn):
"""
Connect an available connection
:param conn: Connection name as returned by :class:`get_connections`
"""
s = self._get_service_by_name(conn)
if (s is not None):
s.connect()
@api_protect
def disconnect(self, conn):
"""
Disconnect an available connection
:param conn: Connection name as returned by :class:`get_connections`
"""
s = self._get_service_by_name(conn)
if (s is not None):
s.disconnect()
@api_protect
def get_connection_properties(self, conn):
"""
Get available connection properties
:param conn: Connection name as returned by :class:`get_connections`
:return: dictionary of properties
"""
s = self._get_service_by_name(conn)
if (s is not None):
ret_props = {}
props = s.get_property()
for i in props.keys():
if (i in self.readonly_properties + self.readwrite_properties):
ret_props[i] = convert_dbus(props[i])
return ret_props
@api_protect
def set_connection_properties(self, conn, set_props):
"""
Set connection properties
:param conn: Connection name as returned by :class:`get_connections`
:param set_props: dictionary of readwrite properties
"""
s = self._get_service_by_name(conn)
if (s is not None):
props = s.get_property()
for i in props.keys():
if (i in self.readwrite_properties):
s.set_property(i, set_props[i])
@api_protect
def set_wifi_config(self, conn, config):
"""
Set WiFi configuration properties dictionary:
* name: AP name (string)
* ssid: SSID (string)
* passphrase: WPS/WEP passphrase (string)
* wpspin: None or PIN (string)
Note that 'wpspin' and 'passphrase' are mutually exclusive, i.e.,
if you are using WPS then you don't need a passphrase.
For hidden WiFi connections then either a name or SSID
must be supplied. If the SSID is not hidden then this may
be omitted.
:param conn: Connection name as returned by :class:`get_connections`
or '*' to denote a wild card connection.
:param config: configuration properties dictionary
"""
allowed_config = ['name', 'ssid', 'passphrase', 'wpspin']
set_config = {}
for i in config.keys():
if i in allowed_config:
set_config[i] = config[i]
if (conn is not None):
s = self._get_service_by_name(conn)
if (s is not None):
path = s._object.__dbus_object_path__
else:
path = None
else:
path = '*'
if (path):
self.agent.set_service_params(path, **set_config)
def set_property(self, name, value):
"""
Set a config property of the plugin/service
"""
if (name in self.config):
self.config[name] = value
service.ServiceListener.send('service_property_changed',
service=self.name,
props={ name: value })
self.enable()
self.disable()
def get_property(self, name):
"""
Get a config property of the plugin/service
"""
if (name is None):
return self.config
else:
try:
value = self.config[name]
return { name: value }
except:
return None
def enable(self):
"""
Enable the service
"""
self.on_start()
def disable(self):
"""
Disable the service
"""
self.on_stop()
|
|
# -*- coding: utf-8 -*-
"""Views tests for the Box addon."""
import unittest
from django.utils import timezone
from nose.tools import * # noqa (PEP8 asserts)
import mock
import httplib
from datetime import datetime
from framework.auth import Auth
from website.util import api_url_for
from urllib3.exceptions import MaxRetryError
from box.client import BoxClientException
from tests.factories import AuthUserFactory
from website.addons.box.model import BoxNodeSettings
from website.addons.box.serializer import BoxSerializer
from website.addons.base import testing
from website.addons.box.tests.utils import (
BoxAddonTestCase,
MockBox,
patch_client
)
mock_client = MockBox()
class TestAuthViews(BoxAddonTestCase, testing.views.OAuthAddonAuthViewsTestCaseMixin):
def setUp(self):
self.mock_refresh = mock.patch("website.addons.box.model.Box.refresh_oauth_key")
self.mock_refresh.return_value = True
self.mock_refresh.start()
super(TestAuthViews, self).setUp()
def tearDown(self):
self.mock_refresh.stop()
super(TestAuthViews, self).tearDown()
@mock.patch(
'website.addons.box.model.BoxUserSettings.revoke_remote_oauth_access',
mock.PropertyMock()
)
def test_delete_external_account(self):
super(TestAuthViews, self).test_delete_external_account()
class TestConfigViews(BoxAddonTestCase, testing.views.OAuthAddonConfigViewsTestCaseMixin):
folder = {
'path': '/Foo',
'id': '12234'
}
Serializer = BoxSerializer
client = mock_client
def setUp(self):
self.mock_data = mock.patch.object(
BoxNodeSettings,
'_folder_data',
return_value=(self.folder['id'], self.folder['path'])
)
self.mock_data.start()
super(TestConfigViews, self).setUp()
def tearDown(self):
self.mock_data.stop()
super(TestConfigViews, self).tearDown()
@mock.patch.object(BoxSerializer, 'credentials_are_valid', return_value=True)
def test_import_auth(self, *args):
super(TestConfigViews, self).test_import_auth()
class TestFilebrowserViews(BoxAddonTestCase):
def setUp(self):
super(TestFilebrowserViews, self).setUp()
self.user.add_addon('box')
self.node_settings.external_account = self.user_settings.external_accounts[0]
self.node_settings.save()
self.patcher_refresh = mock.patch('website.addons.box.model.Box.refresh_oauth_key')
self.patcher_refresh.return_value = True
self.patcher_refresh.start()
def tearDown(self):
self.patcher_refresh.stop()
def test_box_list_folders(self):
with patch_client('website.addons.box.model.BoxClient'):
url = self.project.api_url_for('box_folder_list', folder_id='foo')
res = self.app.get(url, auth=self.user.auth)
contents = mock_client.get_folder('', list=True)['item_collection']['entries']
expected = [each for each in contents if each['type']=='folder']
assert_equal(len(res.json), len(expected))
first = res.json[0]
assert_in('kind', first)
assert_equal(first['name'], contents[0]['name'])
@mock.patch('website.addons.box.model.BoxNodeSettings.folder_id')
def test_box_list_folders_if_folder_is_none(self, mock_folder):
# If folder is set to none, no data are returned
mock_folder.__get__ = mock.Mock(return_value=None)
url = self.project.api_url_for('box_folder_list')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json), 1)
def test_box_list_folders_if_folder_is_none_and_folders_only(self):
with patch_client('website.addons.box.model.BoxClient'):
self.node_settings.folder_name = None
self.node_settings.save()
url = api_url_for('box_folder_list',
pid=self.project._primary_key, foldersOnly=True)
res = self.app.get(url, auth=self.user.auth)
contents = mock_client.get_folder('', list=True)['item_collection']['entries']
expected = [each for each in contents if each['type']=='folder']
assert_equal(len(res.json), len(expected))
def test_box_list_folders_folders_only(self):
with patch_client('website.addons.box.model.BoxClient'):
url = self.project.api_url_for('box_folder_list', foldersOnly=True)
res = self.app.get(url, auth=self.user.auth)
contents = mock_client.get_folder('', list=True)['item_collection']['entries']
expected = [each for each in contents if each['type']=='folder']
assert_equal(len(res.json), len(expected))
def test_box_list_folders_doesnt_include_root(self):
with patch_client('website.addons.box.model.BoxClient'):
url = self.project.api_url_for('box_folder_list', folder_id=0)
res = self.app.get(url, auth=self.user.auth)
contents = mock_client.get_folder('', list=True)['item_collection']['entries']
expected = [each for each in contents if each['type'] == 'folder']
assert_equal(len(res.json), len(expected))
@mock.patch('website.addons.box.model.BoxClient.get_folder')
def test_box_list_folders_deleted(self, mock_metadata):
# Example metadata for a deleted folder
mock_metadata.return_value = {
u'bytes': 0,
u'contents': [],
u'hash': u'e3c62eb85bc50dfa1107b4ca8047812b',
u'icon': u'folder_gray',
u'is_deleted': True,
u'is_dir': True,
u'modified': u'Sat, 29 Mar 2014 20:11:49 +0000',
u'path': u'/tests',
u'rev': u'3fed844002c12fc',
u'revision': 67033156,
u'root': u'box',
u'size': u'0 bytes',
u'thumb_exists': False
}
url = self.project.api_url_for('box_folder_list', folder_id='foo')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, httplib.NOT_FOUND)
@mock.patch('website.addons.box.model.BoxClient.get_folder')
def test_box_list_folders_returns_error_if_invalid_path(self, mock_metadata):
mock_metadata.side_effect = BoxClientException(status_code=404, message='File not found')
url = self.project.api_url_for('box_folder_list', folder_id='lolwut')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, httplib.NOT_FOUND)
@mock.patch('website.addons.box.model.BoxClient.get_folder')
def test_box_list_folders_handles_max_retry_error(self, mock_metadata):
mock_response = mock.Mock()
url = self.project.api_url_for('box_folder_list', folder_id='fo')
mock_metadata.side_effect = MaxRetryError(mock_response, url)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, httplib.BAD_REQUEST)
class TestRestrictions(BoxAddonTestCase):
def setUp(self):
super(BoxAddonTestCase, self).setUp()
# Nasty contributor who will try to access folders that he shouldn't have
# access to
self.contrib = AuthUserFactory()
self.project.add_contributor(self.contrib, auth=Auth(self.user))
self.project.save()
self.user.add_addon('box')
settings = self.user.get_addon('box')
settings.access_token = '12345abc'
settings.last_refreshed = timezone.now()
settings.save()
self.patcher = mock.patch('website.addons.box.model.BoxNodeSettings.fetch_folder_name')
self.patcher.return_value = 'foo bar/baz'
self.patcher.start()
@mock.patch('website.addons.box.model.BoxNodeSettings.has_auth')
def test_restricted_hgrid_data_contents(self, mock_auth):
mock_auth.__get__ = mock.Mock(return_value=False)
# tries to access a parent folder
url = self.project.api_url_for('box_folder_list',
path='foo bar')
res = self.app.get(url, auth=self.contrib.auth, expect_errors=True)
assert_equal(res.status_code, httplib.FORBIDDEN)
def test_restricted_config_contrib_no_addon(self):
url = api_url_for('box_set_config', pid=self.project._primary_key)
res = self.app.put_json(url, {'selected': {'path': 'foo'}},
auth=self.contrib.auth, expect_errors=True)
assert_equal(res.status_code, httplib.BAD_REQUEST)
def test_restricted_config_contrib_not_owner(self):
# Contributor has box auth, but is not the node authorizer
self.contrib.add_addon('box')
self.contrib.save()
url = api_url_for('box_set_config', pid=self.project._primary_key)
res = self.app.put_json(url, {'selected': {'path': 'foo'}},
auth=self.contrib.auth, expect_errors=True)
assert_equal(res.status_code, httplib.FORBIDDEN)
|
|
# file openpyxl/tests/test_write.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
# stdlib imports
import decimal
import os.path
# compatibility imports
from openpyxl.shared.compat import BytesIO, StringIO
# 3rd party imports
from nose.tools import eq_, with_setup, raises
# package imports
from openpyxl.tests.helper import (
TMPDIR,
DATADIR,
clean_tmpdir,
make_tmpdir,
compare_xml,
)
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook
from openpyxl.writer.excel import save_workbook, save_virtual_workbook, \
ExcelWriter
from openpyxl.writer.workbook import write_workbook, write_workbook_rels
from openpyxl.writer.worksheet import write_worksheet, write_worksheet_rels
from openpyxl.writer.strings import write_string_table
from openpyxl.writer.styles import StyleWriter
@with_setup(setup = make_tmpdir, teardown = clean_tmpdir)
def test_write_empty_workbook():
wb = Workbook()
dest_filename = os.path.join(TMPDIR, 'empty_book.xlsx')
save_workbook(wb, dest_filename)
assert os.path.isfile(dest_filename)
def test_write_virtual_workbook():
old_wb = Workbook()
saved_wb = save_virtual_workbook(old_wb)
new_wb = load_workbook(BytesIO(saved_wb))
assert new_wb
def test_write_workbook_rels():
wb = Workbook()
content = write_workbook_rels(wb)
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'workbook.xml.rels')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_workbook():
wb = Workbook()
content = write_workbook(wb)
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'workbook.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_string_table():
table = {'hello': 1, 'world': 2, 'nice': 3}
content = write_string_table(table)
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sharedStrings.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_worksheet():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
content = write_worksheet(ws, {'hello': 0}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_hidden_worksheet():
wb = Workbook()
ws = wb.create_sheet()
ws.sheet_state = ws.SHEETSTATE_HIDDEN
ws.cell('F42').value = 'hello'
content = write_worksheet(ws, {'hello': 0}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_bool():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = False
ws.cell('F43').value = True
content = write_worksheet(ws, {}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_bool.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_formula():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F1').value = 10
ws.cell('F2').value = 32
ws.cell('F3').value = '=F1+F2'
ws.cell('A4').value = '=A1+A2+A3'
ws.formula_attributes['A4'] = {'t': 'shared', 'ref': 'A4:C4', 'si': '0'}
ws.cell('B4').value = '='
ws.formula_attributes['B4'] = {'t': 'shared', 'si': '0'}
ws.cell('C4').value = '='
ws.formula_attributes['C4'] = {'t': 'shared', 'si': '0'}
content = write_worksheet(ws, {}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_formula.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_style():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F1').value = '13%'
ws.column_dimensions['F'].style_index = ws._styles['F1']
style_id_by_hash = StyleWriter(wb).get_style_by_hash()
content = write_worksheet(ws, {}, style_id_by_hash)
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_style.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_height():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F1').value = 10
ws.row_dimensions[ws.cell('F1').row].height = 30
content = write_worksheet(ws, {}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_height.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_hyperlink():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = "test"
ws.cell('A1').hyperlink = "http://test.com"
content = write_worksheet(ws, {'test': 0}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_hyperlink.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_write_hyperlink_rels():
wb = Workbook()
ws = wb.create_sheet()
eq_(0, len(ws.relationships))
ws.cell('A1').value = "test"
ws.cell('A1').hyperlink = "http://test.com/"
eq_(1, len(ws.relationships))
ws.cell('A2').value = "test"
ws.cell('A2').hyperlink = "http://test2.com/"
eq_(2, len(ws.relationships))
content = write_worksheet_rels(ws, 1)
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_hyperlink.xml.rels')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_hyperlink_value():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').hyperlink = "http://test.com"
eq_("http://test.com", ws.cell('A1').value)
ws.cell('A1').value = "test"
eq_("test", ws.cell('A1').value)
def test_write_auto_filter():
wb = Workbook()
ws = wb.worksheets[0]
ws.cell('F42').value = 'hello'
ws.auto_filter = 'A1:F1'
content = write_worksheet(ws, {'hello': 0}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_auto_filter.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None
content = write_workbook(wb)
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'workbook_auto_filter.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_freeze_panes_horiz():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
ws.freeze_panes = 'A4'
content = write_worksheet(ws, {'hello': 0}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_freeze_panes_horiz.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_freeze_panes_vert():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
ws.freeze_panes = 'D1'
content = write_worksheet(ws, {'hello': 0}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_freeze_panes_vert.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_freeze_panes_both():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
ws.freeze_panes = 'D4'
content = write_worksheet(ws, {'hello': 0}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'sheet1_freeze_panes_both.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_long_number():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = 9781231231230
content = write_worksheet(ws, {}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'long_number.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_decimal():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = decimal.Decimal('3.14')
content = write_worksheet(ws, {}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'decimal.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
def test_short_number():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = 1234567890
content = write_worksheet(ws, {}, {})
reference_file = os.path.join(DATADIR, 'writer', 'expected', 'short_number.xml')
with open(reference_file) as expected:
diff = compare_xml(content, expected.read())
assert diff is None, diff
|
|
# Bundles for JS/CSS Minification
PIPELINE_CSS = {
'common': {
'source_filenames': (
'normalize-css/normalize.css',
'sumo/less/main.less',
'sumo/less/search.less',
),
'output_filename': 'build/common-min.css'
},
'community': {
'source_filenames': (
'sumo/less/wiki-content.less',
'community/less/community.less',
'community/less/select.less',
),
'output_filename': 'build/community-min.css'
},
'community-new': {
'source_filenames': (
'fontawesome/css/font-awesome.css',
'pikaday/css/pikaday.css',
'sumo/less/wiki-content.less',
'community/less/community-new.less',
),
'output_filename': 'build/community-new-min.css'
},
'mobile-common': {
'source_filenames': (
'normalize-css/normalize.css',
'sumo/less/mobile/main.less',
),
'output_filename': 'build/mobile-common-min.css'
},
'print': {
'source_filenames': (
'sumo/css/print.css',
),
'output_filename': 'build/print-min.css',
'extra_context': {
'media': 'print',
}
},
# TODO: remove dependency on jquery ui CSS and use our own
'jqueryui': {
'source_filenames': (
'sumo/css/jqueryui/jqueryui.css',
),
'output_filename': 'build/jqueryui-min.css'
},
'forums': {
'source_filenames': (
'sumo/less/forums.less',
'sumo/less/reportabuse.less',
),
'output_filename': 'build/forums-min.css'
},
'questions': {
'source_filenames': (
'sumo/less/questions.less',
'sumo/css/cannedresponses.css',
'sumo/less/reportabuse.less',
),
'output_filename': 'build/questions-min.css'
},
'questions.metrics': {
'source_filenames': (
'sumo/less/questions.metrics.less',
),
'output_filename': 'build/questions.metrics-min.css'
},
'questions.aaq.react': {
'source_filenames': (
'questions/less/questions.aaq.react.less',
),
'output_filename': 'build/questions.aaq.react-min.css'
},
'mobile-questions': {
'source_filenames': (
'sumo/less/mobile/questions.less',
),
'output_filename': 'build/mobile-questions-min.css'
},
'mobile-aaq': {
'source_filenames': (
'sumo/less/mobile/aaq.less',
),
'output_filename': 'build/mobile-aaq-min.css'
},
'rickshaw': {
'source_filenames': (
'sumo/css/jqueryui/jqueryui.css',
'sumo/css/rickshaw.css',
'sumo/less/rickshaw.sumo.less',
),
'output_filename': 'build/rickshaw-min.css'
},
'mobile-search': {
'source_filenames': (
'sumo/less/mobile/search.less',
),
'output_filename': 'build/mobile-search-min.css'
},
'coolsearch': {
'source_filenames': (
'react-swipe-views/lib/react-swipe-views.css',
'coolsearch/less/search.less',
),
'output_filename': 'build/coolsearch-min.css'
},
'wiki': {
'source_filenames': (
'sumo/css/users.autocomplete.css',
'sumo/css/users.list.css',
'sumo/less/wiki.less',
'sumo/less/wiki-content.less',
'sumo/css/screencast.css',
),
'output_filename': 'build/wiki-min.css'
},
'wiki-editor': {
'source_filenames': (
'codemirror/lib/codemirror.css',
'codemirror/addon/hint/show-hint.css',
),
'output_filename': 'wiki-editor-min.css'
},
'mobile-wiki': {
'source_filenames': (
'sumo/less/mobile/wiki.less',
'sumo/less/wiki-content.less',
),
'output_filename': 'build/mobile-wiki-min.css'
},
'mobile-wiki-minimal': {
'source_filenames': (
'normalize-css/normalize.css',
'sumo/less/mobile/main.less',
'sumo/less/mobile/wiki.less',
'sumo/less/wiki-content.less',
),
'output_filename': 'build/mobile-wiki-minimal-min.css'
},
'home': {
'source_filenames': (
'sumo/less/home.less',
),
'output_filename': 'build/home-min.css'
},
'gallery': {
'source_filenames': (
'sumo/less/gallery.less',
),
'output_filename': 'build/gallery-min.css'
},
'ie': {
'source_filenames': (
'sumo/css/ie.css',
'sumo/css/ie8.css',
),
'output_filename': 'build/ie-min.css'
},
'ie8': {
'source_filenames': ( # IE 8 needs some specific help.
'sumo/css/ie8.css',
),
'output_filename': 'build/ie8-min.css'
},
'customercare': {
'source_filenames': (
'sumo/less/customercare.less',
),
'output_filename': 'build/customercare-min.css'
},
'users': {
'source_filenames': (
'sumo/less/users.less',
'sumo/less/reportabuse.less',
),
'output_filename': 'build/users-min.css'
},
'mobile-users': {
'source_filenames': (
'sumo/less/mobile/users.less',
),
'output_filename': 'build/mobile-users-min.css'
},
'monitor': {
'source_filenames': (
'sumo/css/monitor.css',
),
'output_filename': 'build/monitor-min.css'
},
'messages': {
'source_filenames': (
'sumo/css/users.autocomplete.css',
'sumo/less/messages.less',
),
'output_filename': 'build/messages-min.css'
},
'mobile-messages': {
'source_filenames': (
'sumo/less/mobile/messages.less',
),
'output_filename': 'build/mobile-messages-min.css'
},
'products': {
'source_filenames': (
'sumo/less/products.less',
),
'output_filename': 'build/products-min.css'
},
'mobile-products': {
'source_filenames': (
'sumo/less/mobile/products.less',
),
'output_filename': 'build/mobile-products-min.css'
},
'groups': {
'source_filenames': (
'sumo/css/users.autocomplete.css',
'sumo/css/users.list.css',
'sumo/css/groups.css',
'sumo/css/wiki_syntax.css',
),
'output_filename': 'build/groups-min.css'
},
'kpi.dashboard': {
'source_filenames': (
'sumo/less/kpi.dashboard.less',
),
'output_filename': 'build/kpi.dashboard-min.css'
},
'locale-switcher': {
'source_filenames': (
'sumo/less/locale-switcher.less',
),
'output_filename': 'build/locale-switcher-min.css'
},
'mobile-locale-switcher': {
'source_filenames': (
'sumo/less/mobile/locales.less',
),
'output_filename': 'build/mobile-locale-switcher-min.css'
},
'kbdashboards': {
'source_filenames': (
'sumo/less/kbdashboards.less',
),
'output_filename': 'build/kbdashboards-min.css'
},
'landings-get-involved': {
'source_filenames': (
'sumo/less/landings/get-involved.less',
),
'output_filename': 'build/landings-get-involved-min.css'
},
'mobile-landings-get-involved': {
'source_filenames': (
'sumo/less/mobile/landings/get-involved.less',
),
'output_filename': 'build/mobile-landings-get-involved-min.css'
},
'badges': {
'source_filenames': (
'sumo/less/badges.less',
),
'output_filename': 'build/badges-min.css'
}
}
PIPELINE_JS = {
'common': {
'source_filenames': (
'sumo/js/i18n.js',
'underscore/underscore.js',
'moment/moment.js',
'jquery/jquery.min.js',
'jquery/jquery-migrate.js',
'sumo/js/libs/jquery.cookie.js',
'sumo/js/libs/jquery.placeholder.js',
'sumo/js/templates/macros.js',
'sumo/js/templates/search-results-list.js',
'sumo/js/templates/search-results.js',
'nunjucks/browser/nunjucks-slim.js',
'sumo/js/nunjucks.js',
'sumo/js/cached_xhr.js',
'sumo/js/search_utils.js',
'sumo/js/browserdetect.js',
'sumo/js/libs/uitour.js',
'sumo/js/kbox.js',
'sumo/js/main.js',
'sumo/js/format.js',
'modernizr/modernizr.js',
'sumo/js/geoip-locale.js',
'mailcheck/src/mailcheck.js',
'sumo/js/ui.js',
'sumo/js/analytics.js',
'sumo/js/surveygizmo.js',
'sumo/js/instant_search.js',
),
'output_filename': 'build/common-min.js'
},
'community': {
'source_filenames': (
'jquery/jquery.min.js',
'jquery/jquery-migrate.js',
'community/js/community.js',
'community/js/select.js',
),
'output_filename': 'build/community-min.js'
},
'community-new-questions': {
'source_filenames': (
# This uses the minified version because it is optimized to leave
# out lots of debug stuff, so it is significantly smaller than
# just minifying react.js.
# TODO: Figure out how to include the full sized version in dev,
# because it produces much nicer error messages.
'react/react.min.js',
# 'react/react.js',
'pikaday/pikaday.js',
'community/js/community-questions.browserify.js',
),
'output_filename': 'build/community-questions-min.js'
},
'community-new-l10n': {
'source_filenames': (
# This uses the minified version because it is optimized to leave
# out lots of debug stuff, so it is significantly smaller than
# just minifying react.js.
# TODO: Figure out how to include the full sized version in dev,
# because it produces much nicer error messages.
'react/react.min.js',
# 'react/react.js',
'pikaday/pikaday.js',
'community/js/community-l10n.browserify.js',
),
'output_filename': 'build/community-l10n-min.js'
},
'mobile-common': {
'source_filenames': (
'sumo/js/i18n.js',
'underscore/underscore.js',
'jquery/jquery.min.js',
'jquery/jquery-migrate.js',
'modernizr/modernizr.js',
'sumo/js/browserdetect.js',
'sumo/js/aaq.js',
'sumo/js/mobile/ui.js',
'sumo/js/analytics.js',
),
'output_filename': 'build/mobile-common-min.js'
},
'ie6-8': {
'source_filenames': (
'nwmatcher/src/nwmatcher.js',
'sumo/js/libs/selectivizr-1.0.2.js',
),
'output_filename': 'build/ie6-8-min.js'
},
'jqueryui': {
'source_filenames': (
'jquery-ui/ui/jquery.ui.core.js',
'jquery-ui/ui/jquery.ui.widget.js',
'jquery-ui/ui/jquery.ui.mouse.js',
'jquery-ui/ui/jquery.ui.position.js',
'jquery-ui/ui/jquery.ui.sortable.js',
'jquery-ui/ui/jquery.ui.accordion.js',
'jquery-ui/ui/jquery.ui.autocomplete.js',
'jquery-ui/ui/jquery.ui.datepicker.js',
'jquery-ui/ui/jquery.ui.menu.js',
'jquery-ui/ui/jquery.ui.slider.js',
'jquery-ui/ui/jquery.ui.tabs.js',
),
'output_filename': 'build/jqueryui-min.js'
},
'questions': {
'source_filenames': (
'sumo/js/markup.js',
'sumo/js/ajaxvote.js',
'sumo/js/ajaxpreview.js',
'sumo/js/remote.js',
'sumo/js/aaq.js',
'sumo/js/questions.js',
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/tags.filter.js',
'sumo/js/tags.js',
'sumo/js/reportabuse.js',
'sumo/js/questions.metrics.js',
'sumo/js/libs/jquery.ajaxupload.js',
'sumo/js/upload.js',
),
'output_filename': 'build/questions-min.js'
},
'questions.metrics': {
'source_filenames': (
'sumo/js/questions.metrics-dashboard.js',
),
'output_filename': 'build/questions.metrics-min.js'
},
'questions.aaq.react': {
'source_filenames': (
# This uses the minified version because it is optimized to leave
# out lots of debug stuff, so it is significantly smaller than
# just minifying react.js.
# TODO: Figure out how to include the full sized version in dev,
# because it produces much nicer error messages.
'react/react.min.js',
# 'react/react.js',
'flux/dist/Flux.js',
'underscore/underscore.js',
'questions/js/aaq.browserify.js',
),
'output_filename': 'build/questions.aaq.react-min.js',
},
'mobile-questions': {
'source_filenames': (
'sumo/js/mobile/questions.js',
'sumo/js/questions.metrics.js',
),
'output_filename': 'build/mobile-questions-min.js'
},
'mobile-aaq': {
'source_filenames': (
'sumo/js/aaq.js',
'sumo/js/mobile/aaq.js',
),
'output_filename': 'build/mobile-aaq-min.js'
},
'products': {
'source_filenames': (
'sumo/js/compare_versions.js',
'sumo/js/products.js',
),
'output_filename': 'build/products-min.js'
},
'mobile-products': {
'source_filenames': (
'sumo/js/templates/mobile-product-search-results.js',
'nunjucks/browser/nunjucks-slim.js',
'sumo/js/nunjucks.js',
'moment/moment.js',
'sumo/js/cached_xhr.js',
'sumo/js/search_utils.js',
'sumo/js/instant_search.js',
'sumo/js/mobile/products.js',
),
'output_filename': 'build/mobile-products-min.js'
},
'search': {
'source_filenames': (
'sumo/js/search.js',
),
'output_filename': 'build/search-min.js'
},
'coolsearch': {
'source_filenames': (
# This uses the minified version because it is optimized to leave
# out lots of debug stuff, so it is significantly smaller than
# just minifying react.js.
# TODO: Figure out how to include the full sized version in dev,
# because it produces much nicer error messages.
# 'react/react.min.js',
'react/react.js',
'flux/dist/Flux.js',
'underscore/underscore.js',
'coolsearch/js/search.browserify.js',
),
'output_filename': 'build/search.react-min.js'
},
'forums': {
'source_filenames': (
'sumo/js/markup.js',
'sumo/js/ajaxpreview.js',
'sumo/js/forums.js',
'sumo/js/reportabuse.js',
),
'output_filename': 'build/forums-min.js'
},
'gallery': {
'source_filenames': (
'sumo/js/libs/jquery.ajaxupload.js',
'sumo/js/gallery.js',
),
'output_filename': 'build/gallery-min.js'
},
'wiki': {
'source_filenames': (
'sumo/js/markup.js',
'sumo/js/libs/django/urlify.js',
'sumo/js/libs/django/prepopulate.js',
'sumo/js/libs/jquery.lazyload.js',
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/users.autocomplete.js',
'sumo/js/screencast.js',
'sumo/js/showfor.js',
'sumo/js/ajaxvote.js',
'sumo/js/ajaxpreview.js',
'sumo/js/wiki.js',
'sumo/js/tags.js',
'sumo/js/dashboards.js',
'sumo/js/editable.js',
'sumo/js/wiki.metrics.js',
'sumo/js/templates/wiki-related-doc.js',
'sumo/js/templates/wiki-search-results.js',
'sumo/js/wiki_search.js',
),
'output_filename': 'build/wiki-min.js'
},
'rickshaw': {
'source_filenames': (
'd3/d3.js',
'sumo/js/libs/d3.layout.min.js',
'sumo/js/libs/rickshaw.js',
'sumo/js/rickshaw_utils.js',
),
'output_filename': 'build/rickshaw-min.js'
},
'mobile-wiki': {
'source_filenames': (
'underscore/underscore.js',
'sumo/js/libs/jquery.cookie.js',
'sumo/js/libs/jquery.lazyload.js',
'sumo/js/browserdetect.js',
'sumo/js/showfor.js',
'sumo/js/ajaxform.js',
'sumo/js/mobile/wiki.js',
'sumo/js/wiki.metrics.js',
),
'output_filename': 'build/mobile-wiki-min.js'
},
'mobile-wiki-minimal': {
'source_filenames': (
'sumo/js/i18n.js',
'underscore/underscore.js',
'jquery/jquery.min.js',
'jquery/jquery-migrate.js',
'modernizr/modernizr.js',
'sumo/js/browserdetect.js',
'sumo/js/mobile/ui.js',
'sumo/js/analytics.js',
'sumo/js/libs/jquery.cookie.js',
'sumo/js/libs/jquery.lazyload.js',
'sumo/js/showfor.js',
'sumo/js/ajaxform.js',
'sumo/js/mobile/wiki.js',
'sumo/js/wiki.metrics.js',
),
'output_filename': 'build/mobile-wiki-minimal-min.js'
},
'wiki.history': {
'source_filenames': (
'sumo/js/historycharts.js',
),
'output_filename': 'build/wiki.history-min.js'
},
'wiki.diff': {
'source_filenames': (
'sumo/js/libs/diff_match_patch_uncompressed.js',
'sumo/js/diff.js',
),
'output_filename': 'build/wiki.diff-min.js'
},
'wiki.editor': {
'source_filenames': (
'codemirror/lib/codemirror.js',
'codemirror/addon/mode/simple.js',
'codemirror/addon/hint/show-hint.js',
'sumo/js/codemirror.sumo-hint.js',
'sumo/js/codemirror.sumo-mode.js',
),
'output_filename': 'build/wiki.editor-min.js'
},
'wiki.dashboard': {
'source_filenames': (
'sumo/js/wiki.dashboard.js',
),
'output_filename': 'build/wiki.dashboard-min.js'
},
'customercare': {
'source_filenames': (
'sumo/js/libs/jquery.cookie.js',
'sumo/js/libs/jquery.bullseye-1.0.min.js',
'sumo/js/libs/twitter-text.js',
'sumo/js/customercare.js',
'sumo/js/users.js',
),
'output_filename': 'build/customercare-min.js'
},
'users': {
'source_filenames': (
'sumo/js/users.js',
'sumo/js/reportabuse.js',
),
'output_filename': 'build/users-min.js'
},
'messages': {
'source_filenames': (
'sumo/js/markup.js',
'sumo/js/libs/jquery.autoresize.js',
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/users.autocomplete.js',
'sumo/js/ajaxpreview.js',
'sumo/js/messages.js',
),
'output_filename': 'build/messages-min.js'
},
'mobile-messages': {
'source_filenames': (
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/users.autocomplete.js',
),
'output_filename': 'build/mobile-messages-min.js'
},
'groups': {
'source_filenames': (
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/users.autocomplete.js',
'sumo/js/markup.js',
'sumo/js/groups.js',
'sumo/js/editable.js',
),
'output_filename': 'build/groups-min.js'
},
'kpi.dashboard': {
'source_filenames': (
'sumo/js/kpi.dashboard.js',
),
'output_filename': 'build/kpi.dashboard-min.js'
}
}
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains policies used in MAML."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
class Policy(object):
r"""Abstract class for different policies \Pi: S -> A.
Class is responsible for creating different policies and provides an interface
for computing actions recommended by policies in different input states.
In particular, this class provides an interface that accepts compressed
vectorized form of the policy and decompresses it.
Standard procedure for improving the parameters of the policy with an
interface given by the class:
policy = policies.ParticularClassThatInheritsFromBaseClass(...)
vectorized_network = policy.get_initial()
while(...):
new_vectorized_network = SomeTransformationOf(vectorized_network)
policy.update(new_vectorized_network)
and SomeTransformationOf is a single step of some optimization procedure such
as gradient descent that sees the policy in the vectorized form.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def update(self, vectorized_parameters):
"""Updates the policy using new parameters from <vectorized_parameters>.
Updates the parameters of the policy using new parameters encoded by
<vectorized_parameters>. The size of the vector <vectorized_parameters>
should be the number of all biases and weights of the neural network.
We use the convention where parameters encoding matrices of connections of
the neural network come in <vectorized_parameters> before parameters
encoding biases and furthermore the order in <vectorized_parameters> of
parameters encoding weights for different matrices/biases-vectors is
inherited from the order of these matrices/biases-vectors in the
decompressed neural network. Details regarding compression depend on
different neural network architectures used (such as: structured and
unstructured) and are given in the implementations of that abstract method
in specific classes that inherit from Policy.
Args:
vectorized_parameters: parameters of the neural network in the vectorized
form.
Returns:
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_action(self, state):
"""Returns the action proposed by a policy in a given state.
Returns an action proposed by the policy in <state>.
Args:
state: input state
Returns:
Action proposed by the policy represented by an object of the class in a
given state.
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_initial(self):
"""Returns the default parameters of the policy in the vectorized form.
Initial parameters of the policy are output in the vectorized form.
Args:
Returns:
Numpy array encoding in the vectorized form initial parameters of the
policy.
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_total_num_parameters(self):
"""Outputs total number of parameters of the policy.
Args:
Returns:
Total number of parameters used by the policy.
"""
raise NotImplementedError('Abstract method')
class BasicTFPolicy(Policy):
"""Basic Policy implemented in Tensorflow."""
def __init__(self, state_dimensionality, action_dimensionality, hidden_layers,
scope):
self.state_dimensionality = state_dimensionality
self.action_dimensionality = action_dimensionality
self.input_ph = tf.placeholder(
dtype=tf.float32, shape=[None, self.state_dimensionality])
self.output_ph = tf.placeholder(
dtype=tf.float32, shape=[None, self.action_dimensionality])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
self.out = self.input_ph
for i, layer_size in enumerate(hidden_layers):
self.out = tf.layers.dense(
self.out, layer_size, activation=tf.nn.relu, name='h' + str(i))
self.main_out = tf.layers.dense(
self.out, self.action_dimensionality, name='main_out')
self.secondary_out = tf.layers.dense(
self.out, self.action_dimensionality, name='secondary_out')
self.action = tfp.distributions.Normal(
loc=self.main_out, scale=self.secondary_out).sample()
self.loss = tf.losses.mean_squared_error(self.main_out, self.output_ph)
self.obj_tensor = -1.0 * self.loss
self.tf_params = tf.trainable_variables(scope)
self.shapes = [v.shape.as_list() for v in self.tf_params]
self.sizes = [int(np.prod(s)) for s in self.shapes]
self.total_nb_parameters = sum(self.sizes)
self.assign_ph_dict = {
v: tf.placeholder(dtype=tf.float32, shape=v.shape.as_list())
for v in self.tf_params
}
self.assign_ops = []
for v in self.tf_params:
self.assign_ops.append(v.assign(self.assign_ph_dict[v]))
with tf.control_dependencies(self.assign_ops):
# This is needed to input Numpy Params into network temporarily
self.action = tf.identity(self.action)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.np_params = np.concatenate([
self.sess.run(tf.reshape(tf_param, [-1])) for tf_param in self.tf_params
])
def update(self, flattened_weights):
self.np_params = flattened_weights
def get_action(self, state):
ph_dict = {}
for ind, v in enumerate(self.tf_params):
numpy_flat_val = self.np_params[sum(self.sizes[:ind]
):sum(self.sizes[:ind + 1])]
numpy_reshaped = np.reshape(numpy_flat_val, self.shapes[ind])
v_ph = self.assign_ph_dict[v]
ph_dict[v_ph] = numpy_reshaped
ph_dict[self.input_ph] = state.reshape(-1, self.state_dimensionality)
action_numpy = self.sess.run(self.action, feed_dict=ph_dict)
return action_numpy.flatten()
def get_initial(self):
return self.np_params
def get_total_num_parameters(self):
return self.total_nb_parameters
class DeterministicNumpyPolicy(Policy):
"""Deterministic Policy implemented in Numpy."""
def __init__(self,
state_dimensionality,
action_dimensionality,
hidden_layers,
init_sd=None):
self.state_dimensionality = state_dimensionality
self.action_dimensionality = action_dimensionality
self.layers = hidden_layers + [action_dimensionality]
self.layers.insert(0, state_dimensionality)
self.weights = []
self.biases = []
self.weight_positions = []
self.bias_positions = []
self.init_params = []
flat_pos = 0
for dims in zip(self.layers[:-1], self.layers[1:]):
in_size = dims[0]
out_size = dims[1]
if init_sd is None:
init_sd = np.sqrt(2.0 / (in_size))
init_weights = init_sd * np.random.normal(0, 1, size=(out_size * in_size))
self.init_params.extend(init_weights.tolist())
self.weights.append(np.reshape(init_weights, (out_size, in_size)))
self.weight_positions.append(flat_pos)
flat_pos += out_size * in_size
init_biases = np.zeros(out_size)
self.init_params.extend(init_biases.tolist())
self.biases.append(init_biases)
self.bias_positions.append(flat_pos)
flat_pos += out_size
self.weight_positions.append(flat_pos)
def update(self, flat_weights):
for i, dims in enumerate(zip(self.layers[:-1], self.layers[1:])):
in_size = dims[0]
out_size = dims[1]
start_pos = self.weight_positions[i]
end_pos = start_pos + (out_size * in_size)
self.weights[i] = np.reshape(
np.array(flat_weights[start_pos:end_pos]), (out_size, in_size))
start_pos = self.bias_positions[i]
end_pos = start_pos + out_size
self.biases[i] = np.reshape(
np.array(flat_weights[start_pos:end_pos]), (out_size))
def get_action(self, state):
neuron_values = np.reshape(np.array(state), (self.state_dimensionality))
for i in range(len(self.weights)):
neuron_values = np.matmul(self.weights[i], neuron_values)
neuron_values += self.biases[i]
if i < len(self.weights) - 1:
np.maximum(neuron_values, 0, neuron_values)
np.tanh(neuron_values, neuron_values) # this is sometimes not needed
return neuron_values
def get_initial(self):
return np.array(self.init_params)
def get_total_num_parameters(self):
return self.weight_positions[-1]
|
|
# -*- coding: utf-8 -*-
import json
import logging
import vobject
from datetime import datetime
from contextlib import contextmanager
from radicale import ical
from yats.shortcuts import get_ticket_model, build_ticket_search_ext, touch_ticket, remember_changes, mail_ticket, jabber_ticket, check_references, add_history, mail_comment, jabber_comment
from yats.models import tickets_reports, UserProfile, get_flow_end, tickets_comments, ticket_resolution, get_default_resolution, convertPrio
from yats.forms import SimpleTickets
from django.contrib.auth.models import AnonymousUser, User
from django.http import QueryDict
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext as _
from djradicale.models import DBProperties
logger = logging.getLogger('djradicale')
ICAL_TYPES = (
ical.Event,
ical.Todo,
ical.Journal,
# ical.Card,
ical.Timezone,
)
class FakeRequest:
def __init__(self):
self.GET = {}
self.POST = {}
self.session = {}
self.user = AnonymousUser()
class Collection(ical.Collection):
@property
def headers(self):
return (
ical.Header('PRODID:-//YATS//NONSGML Radicale Server//EN'),
ical.Header('VERSION:%s' % self.version))
def delete(self):
repid = self._getReportFromUrl(self.path)
tickets_reports.objects.get(pk=repid).delete()
def append(self, name, text):
import pydevd
pydevd.settrace('192.168.33.1', 5678)
new_items = self._parse(text, ICAL_TYPES, name)
timezones = list(filter(
lambda x: x.tag == ical.Timezone.tag, new_items.values()))
request = self._getRequestFromUrl(self.path)
for new_item in new_items.values():
if new_item.tag == ical.Timezone.tag:
continue
if new_item.name not in self.items:
self.items[new_item.name] = new_item
text = ical.serialize(self.tag, self.headers, [new_item] + timezones)
cal = vobject.readOne(text)
# close ticket
if hasattr(cal.vtodo, 'status') and cal.vtodo.status.value == 'COMPLETED':
ticket = get_ticket_model()
try:
flow_end = get_flow_end()
resolution = get_default_resolution()
close_comment = _('closed via CalDAV')
tic = ticket.objects.get(uuid=cal.vtodo.uid.value)
tic.resolution = resolution
tic.closed = True
tic.close_date = timezone.now()
tic.state = flow_end
tic.save(user=request.user)
com = tickets_comments()
com.comment = _('ticket closed - resolution: %(resolution)s\n\n%(comment)s') % {'resolution': resolution.name, 'comment': close_comment}
com.ticket = tic
com.action = 1
com.save(user=request.user)
check_references(request, com)
touch_ticket(request.user, tic.id)
add_history(request, tic, 1, close_comment)
mail_comment(request, com.pk)
jabber_comment(request, com.pk)
except Exception:
pass
# change or new
else:
params = {
'caption': cal.vtodo.summary.value,
'description': cal.vtodo.description.value if hasattr(cal.vtodo, 'description') else None,
'uuid': cal.vtodo.uid.value,
'show_start': cal.vtodo.due.value if hasattr(cal.vtodo, 'due') else None,
'priority': convertPrio(cal.vtodo.priority.value) if hasattr(cal.vtodo, 'priority') else None
}
fakePOST = QueryDict(mutable=True)
fakePOST.update(params)
form = SimpleTickets(fakePOST)
if form.is_valid():
cd = form.cleaned_data
ticket = get_ticket_model()
# change ticket
try:
tic = ticket.objects.get(uuid=cal.vtodo.uid.value)
tic.caption = cd['caption']
tic.description = cd['description']
tic.priority = cd['priority']
# tic.assigned = cd['assigned']
tic.show_start = cd['show_start']
tic.save(user=request.user)
# new ticket
except ticket.DoesNotExist:
tic = ticket()
tic.caption = cd['caption']
tic.description = cd['description']
if 'priority' not in cd or not cd['priority']:
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_PRIORITY') and settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY:
tic.priority_id = settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY
else:
tic.priority = cd['priority']
tic.assigned = request.user
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_CUSTOMER') and settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER:
if settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER == -1:
tic.customer = request.organisation
else:
tic.customer_id = settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOME
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_COMPONENT') and settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT:
tic.component_id = settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT
tic.show_start = cd['show_start']
tic.uuid = cal.vtodo.uid.value
tic.save(user=request.user)
if tic.assigned:
touch_ticket(tic.assigned, tic.pk)
for ele in form.changed_data:
form.initial[ele] = ''
remember_changes(request, form, tic)
touch_ticket(request.user, tic.pk)
mail_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_MAIL_RCPT, is_api=True)
jabber_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_JABBER_RCPT, is_api=True)
else:
raise Exception(form.errors)
def remove(self, name):
pass
def replace(self, name, text):
self.append(name, text)
@property
def text(self):
return ical.serialize(self.tag, self.headers, self.items.values())
@classmethod
def children(cls, path):
"""Yield the children of the collection at local ``path``."""
request = cls._getRequestFromUrl(path)
children = list(tickets_reports.objects.filter(active_record=True, c_user=request.user).values_list('slug', flat=True))
children = ['%s/%s.ics' % (request.user.username, itm) for itm in children]
return map(cls, children)
@classmethod
def is_node(cls, path):
"""Return ``True`` if relative ``path`` is a node.
A node is a WebDAV collection whose members are other collections.
"""
request = cls._getRequestFromUrl(path)
if path == request.user.username:
return True
else:
return False
@classmethod
def is_leaf(cls, path):
"""Return ``True`` if relative ``path`` is a leaf.
A leaf is a WebDAV collection whose members are not collections.
"""
result = False
if '.ics' in path:
try:
request = cls._getRequestFromUrl(path)
rep = tickets_reports.objects.get(active_record=True, pk=cls._getReportFromUrl(path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
result = (tic.exists())
except Exception:
import sys
a = sys.exc_info()
return result
@property
def last_modified(self):
try:
request = self._getRequestFromUrl(self.path)
rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
date = tic.latest('u_date')
return datetime.strftime(
date.last_action_date, '%a, %d %b %Y %H:%M:%S %z')
except Exception:
import sys
a = sys.exc_info()
@property
def tag(self):
with self.props as props:
if 'tag' not in props:
props['tag'] = 'VCALENDAR'
return props['tag']
@property
@contextmanager
def props(self):
# On enter
properties = {}
try:
props = DBProperties.objects.get(path=self.path)
except DBProperties.DoesNotExist:
pass
else:
properties.update(json.loads(props.text))
old_properties = properties.copy()
yield properties
# On exit
if old_properties != properties:
props, created = DBProperties.objects.get_or_create(path=self.path)
props.text = json.dumps(properties)
props.save()
@property
def items(self):
itms = {}
try:
request = self._getRequestFromUrl(self.path)
if self.path == request.user.username:
return itms
rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
for item in tic:
text = self._itemToICal(item)
itms.update(self._parse(text, ICAL_TYPES))
except Exception:
import sys
a = sys.exc_info()
return itms
@classmethod
def _getRequestFromUrl(cls, path):
user = path.split('/')[0]
request = FakeRequest()
request.user = User.objects.get(username=user)
request.organisation = UserProfile.objects.get(user=request.user).organisation
return request
@classmethod
def _getReportFromUrl(cls, path):
if '.ics' in path:
file = path.split('/')[-1]
file = file.replace('.ics', '')
repid = tickets_reports.objects.get(active_record=True, slug=file).pk
return repid
return 0
@classmethod
def _itemToICal(cls, item):
cal = vobject.iCalendar()
cal.add('vtodo')
cal.vtodo.add('summary').value = item.caption
cal.vtodo.add('uid').value = str(item.uuid)
cal.vtodo.add('created').value = item.c_date
if item.closed:
cal.vtodo.add('status').value = 'COMPLETED'
if item.priority:
cal.vtodo.add('priority').value = str(item.priority.caldav)
else:
cal.vtodo.add('priority').value = '0'
if item.description:
cal.vtodo.add('description').value = item.description
if item.show_start:
# cal.vtodo.add('dstart').value = item.show_start
cal.vtodo.add('due').value = item.show_start
cal.vtodo.add('valarm')
cal.vtodo.valarm.add('uuid').value = '%s-%s' % (str(item.uuid), item.pk)
cal.vtodo.valarm.add('x-wr-alarmuid').value = '%s-%s' % (str(item.uuid), item.pk)
cal.vtodo.valarm.add('action').value = 'DISPLAY'
# cal.vtodo.valarm.add('x-apple-proximity').value = 'DEPART'
cal.vtodo.valarm.add('description').value = 'Erinnerung an ein Ereignis'
# cal.vtodo.valarm.add('trigger').value =
# TRIGGER;VALUE=DATE-TIME:20180821T200000Z
cal.vtodo.add('x-radicale-name').value = '%s.ics' % str(item.uuid)
return cal.serialize()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from preggy import expect
from tornado.testing import gen_test
from tests.fixtures.watermark_fixtures import (
POSITIONS,
RATIOS,
SOURCE_IMAGE_SIZES,
WATERMARK_IMAGE_SIZES,
)
from thumbor.filters import watermark
from thumbor.testing import FilterTestCase
class WatermarkFilterTestCase(FilterTestCase):
@gen_test
async def test_watermark_filter_centered(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,center,center,60)",
)
expected = self.get_fixture("watermarkCenter.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_centered_x(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,center,40,20)",
)
expected = self.get_fixture("watermarkCenterX.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_centered_y(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,80,center,50)",
)
expected = self.get_fixture("watermarkCenterY.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_repeated(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,repeat,repeat,70)",
)
expected = self.get_fixture("watermarkRepeat.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_repeated_x(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,repeat,center,70)",
)
expected = self.get_fixture("watermarkRepeatX.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_repeated_y(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,30,repeat,30)",
)
expected = self.get_fixture("watermarkRepeatY.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_detect_extension_simple(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark,30,-50,60)",
)
expected = self.get_fixture("watermarkSimple.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_simple(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,30,-50,60)",
)
expected = self.get_fixture("watermarkSimple.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,4p,-30p,60)",
)
expected = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,32,-160,60)",
)
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated_center(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,4p,center,60)",
)
expected = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,32,center,60)",
)
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated_repeat(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,repeat,30p,60)",
)
expected = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,repeat,160,60)",
)
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated_position(self):
watermark.Filter.pre_compile()
filter_instance = watermark.Filter("http://dummy,0,0,0", self.context)
for length, pos, expected in POSITIONS:
test = {
"length": length,
"pos": pos,
}
expect(
filter_instance.detect_and_get_ratio_position(pos, length)
).to_be_equal_with_additional_info(expected, **test)
@gen_test
async def test_watermark_filter_simple_big(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermarkBig.png,-10,-100,50)",
)
expected = self.get_fixture("watermarkSimpleBig.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_simple_50p_width(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,30,-50,20,50)",
)
expected = self.get_fixture("watermarkResize50pWidth.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_simple_70p_height(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,30,-50,20,none,70)",
)
expected = self.get_fixture("watermarkResize70pHeight.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_simple_60p_80p(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,-30,-200,20,60,80)",
)
expected = self.get_fixture("watermarkResize60p80p.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated_resizing(self):
watermark.Filter.pre_compile()
filter_instance = watermark.Filter("http://dummy,0,0,0", self.context)
for source_image_width, source_image_height in SOURCE_IMAGE_SIZES:
for (
watermark_source_image_width,
watermark_source_image_height,
) in WATERMARK_IMAGE_SIZES:
for w_ratio, h_ratio in RATIOS:
max_width = (
source_image_width * (float(w_ratio) / 100)
if w_ratio
else float("inf")
)
max_height = (
source_image_height * (float(h_ratio) / 100)
if h_ratio
else float("inf")
)
w_ratio = float(w_ratio) / 100.0 if w_ratio else False
h_ratio = float(h_ratio) / 100.0 if h_ratio else False
ratio = (
float(watermark_source_image_width)
/ watermark_source_image_height
)
(
watermark_image_width,
watermark_image_height,
) = filter_instance.calc_watermark_size(
(source_image_width, source_image_height),
(
watermark_source_image_width,
watermark_source_image_height,
),
w_ratio,
h_ratio,
)
watermark_image = (
float(watermark_image_width) / watermark_image_height
)
test = {
"source_image_width": source_image_width,
"source_image_height": source_image_height,
"watermark_source_image_width": watermark_source_image_width,
"watermark_source_image_height": watermark_source_image_height,
"watermark_image_width": watermark_image_width,
"watermark_image_height": watermark_image_height,
"w_ratio": w_ratio,
"h_ratio": h_ratio,
}
test["topic_name"] = "watermark_image_width"
expect(watermark_image_width).to_fit_into(
max_width, **test
)
test["topic_name"] = "watermark_image_height"
expect(watermark_image_height).to_fit_into(
max_height, **test
)
test["topic_name"] = "fill out"
expect(
(
watermark_image_width == max_width
or watermark_image_height == max_height
)
).to_be_true_with_additional_info(**test)
test["topic_name"] = "image ratio"
expect(watermark_image).to_almost_equal(ratio, 2, **test)
|
|
import docker
import os
import shlex
from datetime import datetime
from os import chdir
from subprocess import check_output, STDOUT
from vent.api.actions import Action
from vent.api.plugin_helpers import PluginHelper
from vent.api.templates import Template
from vent.helpers.logs import Logger
from vent.helpers.meta import Tools
class MenuHelper:
""" Handle helper functions in the API for the Menu """
def __init__(self, **kargs):
self.api_action = Action(**kargs)
self.plugin = self.api_action.plugin
self.p_helper = self.api_action.p_helper
self.logger = Logger(__name__)
def cores(self, action, branch="master", version='HEAD'):
"""
Supply action (install, build, start, stop, clean) for core tools
"""
self.logger.info("Starting: cores")
status = (False, None)
try:
self.logger.info("action provided: " + str(action))
core = self.tools_status(True, branch=branch, version=version)[1]
if action in ["install", "build"]:
tools = []
core_repo = 'https://github.com/cyberreboot/vent'
resp = self.p_helper.apply_path(core_repo)
if resp[0]:
cwd = resp[1]
else:
self.logger.info("apply_path failed. Exiting cores"
" with status " + str(resp))
return resp
path = os.path.join(self.plugin.path_dirs.plugins_dir,
'cyberreboot/vent')
response = self.p_helper.checkout(branch=branch,
version=version)
self.logger.info("status of plugin checkout " +
str(response))
matches = self.p_helper.available_tools(path,
version=version,
groups='core')
for match in matches:
name = match[0].rsplit('/')[-1]
constraints = {'name': name,
'repo': core_repo}
prev_installed, _ = self.p_helper. \
constraint_options(constraints, [])
if not prev_installed:
tools.append((match[0], ''))
# only add stuff not already installed or repo specification
if ((tools) or
(isinstance(matches, list) and len(matches) == 0)):
status = self.plugin.add(core_repo,
tools=tools,
branch=branch,
build=False, core=True)
self.logger.info("status of plugin add: " + str(status))
else:
self.logger.info("no new tools to install")
status = (True, "previously installed")
plugin_c = Template(template=self.plugin.manifest)
sections = plugin_c.sections()
for tool in core['normal']:
for section in sections[1]:
name = plugin_c.option(section, "name")
orig_branch = plugin_c.option(section, "branch")
namespace = plugin_c.option(section, "namespace")
version = plugin_c.option(section, "version")
if (name[1] == tool and
orig_branch[1] == branch and
namespace[1] == "cyberreboot/vent" and
version[1] == "HEAD"):
plugin_c.set_option(section,
"image_name",
"cyberreboot/vent-" +
tool.replace('_', '-') + ":" +
branch)
plugin_c.write_config()
chdir(cwd)
if action == "build":
plugin_c = Template(template=self.plugin.manifest)
sections = plugin_c.sections()
try:
for tool in core['normal']:
for section in sections[1]:
tool = tool.replace('_', '-')
image_name = plugin_c.option(section,
"image_name")
check_image = "cyberreboot/vent-"
check_image += tool + ":" + branch
if image_name[1] == check_image:
timestamp = str(datetime.utcnow()) + " UTC"
try:
# currently can't use docker-py because it
# returns a 404 on pull so no way to valid
# if it worked or didn't
image_id = None
cmd = "docker pull " + check_image
output = check_output(shlex.split(cmd),
stderr=STDOUT)
# image_name in format of (bool, image_name)
name = image_name[1]
d_client = docker.from_env()
image_attrs = d_client.images.get(name)
image_attrs = image_attrs.attrs
image_id = image_attrs['Id'].split(':')[1][:12]
if image_id:
plugin_c.set_option(section,
"built",
"yes")
plugin_c.set_option(section,
"image_id",
image_id)
plugin_c.set_option(section,
"last_updated",
timestamp)
status = (True, "Pulled " + tool)
self.logger.info(str(status))
else:
plugin_c.set_option(section,
"built",
"failed")
plugin_c.set_option(section,
"last_updated",
timestamp)
status = (False,
"Failed to pull image " +
str(output.split('\n')[-1]))
self.logger.error(str(status))
except Exception as e: # pragma: no cover
plugin_c.set_option(section,
"built",
"failed")
plugin_c.set_option(section,
"last_updated",
timestamp)
status = (False,
"Failed to pull image " + str(e))
self.logger.error(str(status))
except Exception as e: # pragma: no cover
status = (False, "Failed to pull images " + str(e))
self.logger.error(str(status))
plugin_c.write_config()
elif action == "start":
status = self.api_action.prep_start(groups="core",
branch=branch)
if status[0]:
tool_d = status[1]
status = self.api_action.start(tool_d)
elif action == "stop":
status = self.api_action.stop(groups="core", branch=branch)
elif action == "clean":
status = self.api_action.clean(groups="core", branch=branch)
except Exception as e: # pragma: no cover
self.logger.info("core failed with error: " + str(e))
status = (False, e)
self.logger.info("Status of core: " + str(status[0]))
self.logger.info("Finished: core")
return status
def repo_branches(self, repo):
""" Get the branches of a repository """
self.logger.info("Starting: repo_branches")
self.logger.info("repo given: " + str(repo))
branches = []
try:
# switch to directory where repo will be cloned to
status = self.p_helper.apply_path(repo)
if status[0]:
cwd = status[1]
else:
self.logger.info("apply_path failed. Exiting repo_branches"
" with status " + str(status))
return status
check_output(shlex.split("git pull --all"),
stderr=STDOUT,
close_fds=True)
branch_output = check_output(shlex.split("git branch -a"),
stderr=STDOUT,
close_fds=True)
branch_output = branch_output.split("\n")
for branch in branch_output:
b = branch.strip()
if b.startswith('*'):
b = b[2:]
if "/" in b:
branches.append(b.rsplit('/', 1)[1])
elif b:
branches.append(b)
branches = list(set(branches))
self.logger.info("branches found: " + str(branches))
for branch in branches:
try:
check_output(shlex.split("git checkout " + branch),
stderr=STDOUT,
close_fds=True)
except Exception as e: # pragma: no cover
self.logger.error("repo_branches failed with error: " +
str(e) + " on branch: " + str(branch))
status = (False, e)
self.logger.info("Exiting repo_branches with status: " +
str(status))
return status
chdir(cwd)
status = (True, branches)
except Exception as e: # pragma: no cover
self.logger.error("repo_branches failed with error: " + str(e))
status = (False, e)
self.logger.info("Status of repo_branches: " + str(status))
self.logger.info("Finished: repo_branches")
return status
def repo_commits(self, repo):
""" Get the commit IDs for all of the branches of a repository """
self.logger.info("Starting: repo_commits")
self.logger.info("repo given: " + str(repo))
commits = []
try:
status = self.p_helper.apply_path(repo)
# switch to directory where repo will be cloned to
if status[0]:
cwd = status[1]
else:
self.logger.info("apply_path failed. Exiting repo_commits with"
" status: " + str(status))
return status
status = self.repo_branches(repo)
if status[0]:
branches = status[1]
for branch in branches:
try:
branch_output = check_output(shlex
.split("git rev-list origin/" +
branch),
stderr=STDOUT,
close_fds=True)
branch_output = branch_output.split("\n")[:-1]
branch_output += ['HEAD']
commits.append((branch, branch_output))
except Exception as e: # pragma: no cover
self.logger.error("repo_commits failed with error: " +
str(e) + " on branch: " +
str(branch))
status = (False, e)
self.logger.info("Exiting repo_commits with status: " +
str(status))
return status
else:
self.logger.info("repo_branches failed. Exiting repo_commits"
" with status: " + str(status))
return status
chdir(cwd)
status = (True, commits)
except Exception as e: # pragma: no cover
self.logger.error("repo_commits failed with error: " + str(e))
status = (False, e)
self.logger.info("Status of repo_commits: " + str(status))
self.logger.info("Finished: repo_commits")
return status
def repo_tools(self, repo, branch, version):
""" Get available tools for a repository branch at a version """
self.logger.info("Starting: repo_tools")
self.logger.info("repo given: " + str(repo))
self.logger.info("branch given: " + str(branch))
self.logger.info("version given: " + str(version))
try:
tools = []
status = self.p_helper.apply_path(repo)
# switch to directory where repo will be cloned to
if status[0]:
cwd = status[1]
else:
self.logger.info("apply_path failed. Exiting repo_tools with"
" status: " + str(status))
return status
status = self.p_helper.checkout(branch=branch, version=version)
if status[0]:
path, _, _ = self.p_helper.get_path(repo)
tools = self.p_helper.available_tools(path, version=version)
else:
self.logger.info("checkout failed. Exiting repo_tools with"
" status: " + str(status))
return status
chdir(cwd)
status = (True, tools)
except Exception as e: # pragma: no cover
self.logger.error("repo_tools failed with error: " + str(e))
status = (False, e)
self.logger.info("Status of repo_tools: " + str(status))
self.logger.info("Finished: repo_tools")
return status
def tools_status(self, core, branch="master", version="HEAD", **kargs):
"""
Get tools that are currently installed/built/running and also the
number of repos that those tools come from; can toggle whether looking
for core tools or plugin tools
"""
# !! TODO this might need to store namespaces/branches/versions
all_tools = {'built': [], 'running': [], 'installed': [], 'normal': []}
core_repo = 'https://github.com/cyberreboot/vent'
repos = set()
tools = Tools(**kargs)
# get manifest file
manifest = os.path.join(self.api_action.plugin.path_dirs.meta_dir,
"plugin_manifest.cfg")
template = Template(template=manifest)
tools = template.sections()
# get repos
if core:
p_helper = PluginHelper(plugins_dir='.internals/plugins/')
repos.add(core_repo)
else:
p_helper = PluginHelper(plugins_dir='plugins/')
for tool in tools[1]:
repo = template.option(tool, 'repo')
if repo[0] and repo[1] != core_repo:
repos.add(repo[1])
# get normal tools
for repo in repos:
status, _ = p_helper.clone(repo)
if status:
p_helper.apply_path(repo)
p_helper.checkout(branch=branch, version=version)
path, _, _ = p_helper.get_path(repo, core=core)
matches = None
if core:
matches = p_helper.available_tools(path, version=version,
groups='core')
else:
matches = p_helper.available_tools(path, version=version)
for match in matches:
if core:
all_tools['normal'].append(match[0].split('/')[-1].replace('_', '-'))
else:
all_tools['normal'].append(match[0].split('/')[-1])
# get tools that have been installed
for tool in tools[1]:
repo = template.option(tool, "repo")
if repo[0] and repo[1] in repos:
name = template.option(tool, "name")
if name[0]:
all_tools['installed'].append(name[1].replace('_', '-'))
# get tools that have been built and/or are running
try:
d_client = docker.from_env()
images = d_client.images.list(filters={'label': 'vent'})
for image in images:
try:
core_check = ("vent.groups" in image.attrs['Config']['Labels'] and
'core' in image.attrs['Config']['Labels']['vent.groups'])
image_check = None
if core:
image_check = core_check
else:
image_check = not core_check
if image_check:
if ('vent.name' in image.attrs['Config']['Labels'] and
'hidden' not in image.attrs['Config']['Labels']['vent.groups']):
if core:
all_tools['built'].append(image.attrs['Config']['Labels']['vent.name'].replace('_', '-'))
else:
all_tools['built'].append(image.attrs['Config']['Labels']['vent.name'])
except Exception as err: # pragma: no cover
self.logger.error("image_check went wrong " + str(err))
containers = d_client.containers.list(filters={'label': 'vent'})
for container in containers:
try:
core_check = ("vent.groups" in container.attrs['Config']['Labels'] and
'core' in container.attrs['Config']['Labels']['vent.groups'])
container_check = None
if core:
container_check = core_check
else:
container_check = not core_check
if container_check:
if ('vent.name' in container.attrs['Config']['Labels'] and
'hidden' not in image.attrs['Config']['Labels']['vent.groups']):
if core:
all_tools['running'].append(container.attrs['Config']['Labels']['vent.name'].replace('_', '-'))
else:
all_tools['running'].append(container.attrs['Config']['Labels']['vent.name'])
except Exception as err: # pragma: no cover
self.logger.error("core_check went wrong " + str(err))
except Exception as e: # pragma: no cover
self.logger.error("Something with docker went wrong " + str(e))
return (len(repos), all_tools)
|
|
"""
This module contains functions to assist in the construction of URIs for views.
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
from utils.py3porting import urljoin, urlsplit, urlunsplit, quote, unquote
# From RFC 3986:
gen_delims = ":/?#[]@"
sub_delims = "!$&'()*+,;="
unreserved = "-._~"
# subset of above safe in query string (no "?", "&" or #")
query_safe = re.sub('[?&#]', '', gen_delims + sub_delims + unreserved)
def uri_quote_param(pval):
"""
Apply escaping to a supplied query parameter value for inclusion in a URI.
"""
return quote(pval, query_safe)
def uri_base(uri):
"""
Get the base URI from the supplied URI by removing any parameters and/or fragments.
"""
base_uri = uri.split("#", 1)[0]
base_uri = base_uri.split("?", 1)[0]
return base_uri
def uri_query_key_val(p):
"""
Returns a key-value pair for a supplied query parameter string.
The value part returned has %-escaping unapplied.
If no '=' is present, the value part returned is an empty string.
"""
kv = p.split("=", 1) + [""]
return (kv[0], unquote(kv[1]))
def uri_param_dict(uri):
"""
Extract parameter dictionary from the supplied URI
>>> uri_param_dict("base:path?q1=p1&q2=p2#frag") == { 'q1': "p1", 'q2': "p2"}
True
>>> uri_param_dict("base:path?q1=p1=p1&q2=p2%26p2&q3") == { 'q1': "p1=p1", 'q2': "p2&p2", 'q3': "" }
True
"""
base_uri = uri.split("#", 1)[0]
query = (base_uri.split("?", 1)+[""])[1]
return { k: v for k, v in [ uri_query_key_val(qp) for qp in query.split("&") ] }
def build_dict(*param_dicts, **param_dict):
merged_param_dict = param_dict.copy()
for d in param_dicts:
if d is not None:
# log.info("param_dicts %r"%(d,))
merged_param_dict.update(d)
return merged_param_dict
def uri_params(*param_dicts, **param_dict):
"""
Construct a URI parameter string from the supplied dictionary values
(or values which are convertible to a dictionary using `dict()`).
"""
uri_param_dict = build_dict(*param_dicts, **param_dict)
uri_param_str = ""
next_sep = "?"
for pnam in uri_param_dict:
pval = uri_param_dict[pnam]
if pval:
# log.info("pnam %s, pval %s, uri_param_dict %r"%(pnam, pval, uri_param_dict))
uri_param_str += next_sep + pnam + "=" + uri_quote_param(pval)
next_sep = "&"
return uri_param_str
def uri_with_params(base_uri, *param_dicts, **param_dict):
"""
Construct a URI from the supplied base URI (with any parameters and/or fragment removed)
and URI parameters created using the supplied dictionary values.
"""
return uri_base(base_uri) + uri_params(*param_dicts, **param_dict)
def scope_params(*param_dicts, **param_dict):
"""
Return URI parameters from the supplied dictionary specifically used for entity selection,
ignoring all others. These are the parameters which, in conjunction with a base URI,
represent a resource or set of resources to be returned.
Preserves the following query params from original request:
scope
search
Query parameters not preserved (among others):
continuation_url
info_head
info_message
error_head
error_message
add_field
type
"""
uri_param_dict = build_dict(*param_dicts, **param_dict)
return (
{ 'search': uri_param_dict.get('search_for') or
uri_param_dict.get('search') or None
, 'scope': uri_param_dict.get('scope') or None
})
def _unused_scope_params_url(base_url, type=None):
"""
Takes a supplied URL and returns a corresponding continuation URL with all
but scope parameters removed (c.f. scope_params above).
"""
url_params = uri_param_dict(base_url)
scope_url = uri_with_params(base_url, scope_params(url_params))
return scope_url
def continuation_params(*param_dicts, **param_dict):
"""
Return URI parameters from the supplied dictionary specifically needed for a continuation
URI, ignoring all others. These are the parameters which, in conjunction with a base URI,
represent application state. Parameters not included here are transient in their effect.
"""
uri_param_dict = build_dict(*param_dicts, **param_dict)
return (
{ 'continuation_url': uri_param_dict.get('continuation_url') or None
, 'search': uri_param_dict.get('search_for') or
uri_param_dict.get('search') or None
, 'scope': uri_param_dict.get('scope') or None
})
def continuation_params_url(base_url):
"""
Takes a supplied URL and returns a corresponding continuation URL with transient
query parameters removed (cf. continuation_params above).
"""
url_params = uri_param_dict(base_url)
cont_url = uri_with_params(base_url, continuation_params(url_params))
return cont_url
def continuation_url_chain(continuation_url):
"""
Disects a supplied continuation URL into its components going back up the return chain.
Thus, if:
>>> hop1 = uri_with_params("base:hop1", search="s1")
>>> hop2 = uri_with_params("base:hop2", search="s2")
>>> hop3 = uri_with_params("base:hop3", search="s3")
>>> hop4 = uri_with_params("base:hop4", search="s4")
>>> hop1p = (uri_base(hop1), uri_param_dict(hop1))
>>> hop2p = (uri_base(hop2), uri_param_dict(hop2))
>>> hop3p = (uri_base(hop3), uri_param_dict(hop3))
>>> hop4p = (uri_base(hop4), uri_param_dict(hop4))
>>> hop1p == ('base:hop1', {'search': 's1'})
True
>>> hop2p == ('base:hop2', {'search': 's2'})
True
>>> hop3p == ('base:hop3', {'search': 's3'})
True
>>> hop4p == ('base:hop4', {'search': 's4'})
True
>>> hop1c = hop1
>>> hop2c = uri_with_params("base:hop2", search="s2", continuation_url=hop1)
>>> hop3c = uri_with_params("base:hop3", search="s3", continuation_url=hop2c)
>>> hop4c = uri_with_params("base:hop4", search="s4", continuation_url=hop3c)
>>> hop1c == 'base:hop1?search=s1'
True
>>> hop2c == 'base:hop2?search=s2&continuation_url=base:hop1%3Fsearch=s1'
True
>>> hop3c == 'base:hop3?search=s3&continuation_url=base:hop2%3Fsearch=s2%26continuation_url=base:hop1%253Fsearch=s1'
True
>>> hop4c == 'base:hop4?search=s4&continuation_url=base:hop3%3Fsearch=s3%26continuation_url=base:hop2%253Fsearch=s2%2526continuation_url=base:hop1%25253Fsearch=s1'
True
>>> continuation_url_chain(hop1c) == [hop1p]
True
>>> continuation_url_chain(hop2c) == [hop2p, hop1p]
True
>>> continuation_url_chain(hop3c) == [hop3p, hop2p, hop1p]
True
>>> continuation_url_chain(hop4c) == [hop4p, hop3p, hop2p, hop1p]
True
"""
c_base = uri_base(continuation_url)
c_params = uri_param_dict(continuation_url)
if "continuation_url" in c_params:
c_cont = c_params.pop("continuation_url")
c_list = continuation_url_chain(c_cont)
c_list.insert(0, (c_base, c_params))
return c_list
return [(c_base, c_params)]
def continuation_chain_url(continuation_chain):
"""
Assembles a list of continuation components into a single continuation URL
>>> hop1 = uri_with_params("base:hop1", search="s1")
>>> hop2 = uri_with_params("base:hop2", search="s2")
>>> hop3 = uri_with_params("base:hop3", search="s3")
>>> hop4 = uri_with_params("base:hop4", search="s4")
>>> hop1p = (uri_base(hop1), uri_param_dict(hop1))
>>> hop2p = (uri_base(hop2), uri_param_dict(hop2))
>>> hop3p = (uri_base(hop3), uri_param_dict(hop3))
>>> hop4p = (uri_base(hop4), uri_param_dict(hop4))
>>> hop1c = hop1
>>> hop2c = uri_with_params("base:hop2", search="s2", continuation_url=hop1)
>>> hop3c = uri_with_params("base:hop3", search="s3", continuation_url=hop2c)
>>> hop4c = uri_with_params("base:hop4", search="s4", continuation_url=hop3c)
>>> continuation_chain_url([hop1p]) == hop1c
True
>>> continuation_chain_url([hop2p, hop1p]) == hop2c
True
>>> continuation_chain_url([hop3p, hop2p, hop1p]) == hop3c
True
>>> continuation_chain_url([hop4p, hop3p, hop2p, hop1p]) == hop4c
True
"""
u_base, u_params = continuation_chain[0]
c_tail = continuation_chain[1:]
if c_tail:
u_params.update(continuation_url=continuation_chain_url(c_tail))
return uri_with_params(u_base, u_params)
def url_update_type_entity_id(url_base,
old_type_id=None, new_type_id=None,
old_entity_id=None, new_entity_id=None
):
"""
Isolates type and entity identifiers in the supplied URL, and replaces
them with values supplied.
Entity ids are updated only if the type id is also supplied and matches.
URL path forms recognized (see also urls.py):
.../c/<coll-id>/d/<type-id>/
.../c/<coll-id>/d/<type-id>/!<scope>
.../c/<coll-id>/d/<type-id>/<entity-id>/
.../c/<coll-id>/l/<list-id>/<type-id>/
.../c/<coll-id>/l/<list-id>/<type-id>/!<scope>
.../c/<coll-id>/v/<view-id>/<type-id>/
.../c/<coll-id>/v/<view-id>/<type-id>/!action
.../c/<coll-id>/v/<view-id>/<type-id>/<entity-id>/
.../c/<coll-id>/v/<view-id>/<type-id>/<entity-id>/!action
Thus, the key patterns used for rewriting are:
^.*/d/<type-id>/(!.*])?$
^.*/d/<type-id>/<entity-id>/$
^.*/l/<list-id>/<type-id>/(!.*])?$
^.*/v/<view-id>/<type-id>/(!.*])?$
^.*/v/<view-id>/<type-id>/<entity-id>/(!.*])?$
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/d/t1/",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/d/t2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/d/t1/!all",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/d/t2/!all' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/l/list/t1/",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/l/list/t2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/l/list/t1/!all",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/l/list/t2/!all' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/v/view/t1/",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/v/view/t2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/v/view/t1/!new",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/v/view/t2/!new' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/d/t1/e1/",
... old_type_id="t1", new_type_id="t2",
... old_entity_id="e1", new_entity_id="e2")
... == 'http://example.com/base/c/coll/d/t2/e2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/v/view/t1/e1/",
... old_type_id="t1", new_type_id="t2",
... old_entity_id="e1", new_entity_id="e2")
... == 'http://example.com/base/c/coll/v/view/t2/e2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/v/view/t1/e1/",
... old_type_id="t1", new_type_id="t2",
... old_entity_id="e1", new_entity_id="e2")
... == 'http://example.com/base/c/coll/v/view/t2/e2/' )
True
"""
rewrite_type_id_patterns = (
# (<prefix>)/(<type_id>)/<suffix>)
[ re.compile(r"(^.*/d)/(?P<type_id>\w{0,32})/(!.*)?$")
, re.compile(r"(^.*/l/\w{0,32})/(?P<type_id>\w{0,32})/(!.*)?$")
, re.compile(r"(^.*/v/\w{0,32})/(?P<type_id>\w{0,32})/(!.*)?$")
])
rewrite_entity_id_patterns = (
# (<prefix>)/(<type_id>)/(<entity_id>)/<suffix>)
[ re.compile(r"(^.*/d)/(?P<type_id>\w{0,32})/(?P<entity_id>\w{0,32})/(!.*)?$")
, re.compile(r"(^.*/v/\w{0,32})/(?P<type_id>\w{0,32})/(?P<entity_id>\w{0,32})/(!.*)?$")
])
us, ua, up, uq, uf = urlsplit(url_base)
if new_type_id:
for rexp in rewrite_type_id_patterns:
match = rexp.match(up)
if match:
prefix, type_id, suffix = match.group(1, 2, 3)
if not new_entity_id:
# Rename all instances of type
if type_id == old_type_id:
up = "%s/%s/%s"%(prefix, new_type_id, suffix or "")
break
for rexp in rewrite_entity_id_patterns:
match = rexp.match(up)
if match:
prefix, type_id, entity_id, suffix = match.group(1, 2, 3, 4)
if new_entity_id:
# Rename matching type+entities only
if ( (type_id == old_type_id) and (entity_id == old_entity_id) ):
up = "%s/%s/%s/%s"%(prefix, new_type_id, new_entity_id, suffix or "")
break
else:
# Rename all instances of type
if type_id == old_type_id:
up = "%s/%s/%s/%s"%(prefix, new_type_id, entity_id, suffix or "")
break
return urlunsplit((us, ua, up, uq, uf))
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
|
#!/opt/datadog-agent/embedded/bin/python
'''
Datadog
www.datadoghq.com
----
Make sense of your IT Data
Licensed under Simplified BSD License (see LICENSE)
(C) Boxed Ice 2010 all rights reserved
(C) Datadog, Inc. 2010-2016 all rights reserved
'''
# set up logging before importing any other components
from config import initialize_logging # noqa
initialize_logging('forwarder')
# stdlib
from datetime import timedelta
import logging
import os
from Queue import Full, Queue
from socket import error as socket_error, gaierror
import sys
import threading
import zlib
# For pickle & PID files, see issue 293
os.umask(022)
# 3p
try:
import pycurl
except ImportError:
# For the source install, pycurl might not be installed
pycurl = None
from tornado.escape import json_decode
import tornado.httpclient
import tornado.httpserver
import tornado.ioloop
from tornado.options import define, options, parse_command_line
import tornado.web
# project
from checks.check_status import ForwarderStatus
from config import (
get_config,
get_logging_config,
get_url_endpoint,
get_version
)
import modules
from transaction import Transaction, TransactionManager
from util import (
get_hostname,
get_tornado_ioloop,
get_uuid,
json,
Watchdog,
)
from utils.logger import RedactedLogRecord
logging.LogRecord = RedactedLogRecord
log = logging.getLogger('forwarder')
log.setLevel(get_logging_config()['log_level'] or logging.INFO)
DD_ENDPOINT = "dd_url"
# Transactions
TRANSACTION_FLUSH_INTERVAL = 5000 # Every 5 seconds
# Watchdog settings
WATCHDOG_INTERVAL_MULTIPLIER = 10 # 10x flush interval
WATCHDOG_HIGH_ACTIVITY_THRESHOLD = 1000 # Threshold to detect pathological activity
# Misc
HEADERS_TO_REMOVE = [
'Host',
'Content-Length',
]
# Maximum delay before replaying a transaction
MAX_WAIT_FOR_REPLAY = timedelta(seconds=90)
# Maximum queue size in bytes (when this is reached, old messages are dropped)
MAX_QUEUE_SIZE = 30 * 1024 * 1024 # 30MB
THROTTLING_DELAY = timedelta(microseconds=1000000 / 2) # 2 msg/second
class EmitterThread(threading.Thread):
def __init__(self, *args, **kwargs):
self.__name = kwargs['name']
self.__emitter = kwargs.pop('emitter')()
self.__logger = kwargs.pop('logger')
self.__config = kwargs.pop('config')
self.__max_queue_size = kwargs.pop('max_queue_size', 100)
self.__queue = Queue(self.__max_queue_size)
threading.Thread.__init__(self, *args, **kwargs)
self.daemon = True
def run(self):
while True:
(data, headers) = self.__queue.get()
try:
self.__logger.debug('Emitter %r handling a packet', self.__name)
self.__emitter(data, self.__logger, self.__config)
except Exception:
self.__logger.error('Failure during operation of emitter %r', self.__name, exc_info=True)
def enqueue(self, data, headers):
try:
self.__queue.put((data, headers), block=False)
except Full:
self.__logger.warn('Dropping packet for %r due to backlog', self.__name)
class EmitterManager(object):
"""Track custom emitters"""
def __init__(self, config):
self.agentConfig = config
self.emitterThreads = []
for emitter_spec in [s.strip() for s in self.agentConfig.get('custom_emitters', '').split(',')]:
if len(emitter_spec) == 0:
continue
logging.info('Setting up custom emitter %r', emitter_spec)
try:
thread = EmitterThread(
name=emitter_spec,
emitter=modules.load(emitter_spec, 'emitter'),
logger=logging,
config=config,
)
thread.start()
self.emitterThreads.append(thread)
except Exception:
logging.error('Unable to start thread for emitter: %r', emitter_spec, exc_info=True)
logging.info('Done with custom emitters')
def send(self, data, headers=None):
if not self.emitterThreads:
return # bypass decompression/decoding
if headers and headers.get('Content-Encoding') == 'deflate':
data = zlib.decompress(data)
data = json_decode(data)
for emitterThread in self.emitterThreads:
logging.info('Queueing for emitter %r', emitterThread.name)
emitterThread.enqueue(data, headers)
class AgentTransaction(Transaction):
_application = None
_trManager = None
_endpoints = []
_emitter_manager = None
_type = None
@classmethod
def set_application(cls, app):
cls._application = app
cls._emitter_manager = EmitterManager(cls._application._agentConfig)
@classmethod
def set_tr_manager(cls, manager):
cls._trManager = manager
@classmethod
def get_tr_manager(cls):
return cls._trManager
@classmethod
def set_endpoints(cls):
"""
Set Datadog endpoint if an API key exists.
"""
if not cls._application._agentConfig.get('api_key'):
log.warning(u"No API key was found. Aborting endpoint setting.")
return
cls._endpoints.append(DD_ENDPOINT)
def __init__(self, data, headers, msg_type=""):
self._data = data
self._headers = headers
self._headers['DD-Forwarder-Version'] = get_version()
self._msg_type = msg_type
# Call after data has been set (size is computed in Transaction's init)
Transaction.__init__(self)
# Emitters operate outside the regular transaction framework
if self._emitter_manager is not None:
self._emitter_manager.send(data, headers)
# Insert the transaction in the Manager
self._trManager.append(self)
log.debug("Created transaction %d" % self.get_id())
self._trManager.flush()
def __sizeof__(self):
return sys.getsizeof(self._data)
def get_url(self, endpoint):
endpoint_base_url = get_url_endpoint(self._application._agentConfig[endpoint])
api_key = self._application._agentConfig.get('api_key')
if api_key:
return "{0}/intake/{1}?api_key={2}".format(endpoint_base_url, self._msg_type, api_key)
return "{0}/intake/{1}".format(endpoint_base_url, self._msg_type)
def flush(self):
for endpoint in self._endpoints:
url = self.get_url(endpoint)
log.debug(
u"Sending %s to endpoint %s at %s",
self._type, endpoint, url
)
# Getting proxy settings
proxy_settings = self._application._agentConfig.get('proxy_settings', None)
tornado_client_params = {
'url': url,
'method': 'POST',
'body': self._data,
'headers': self._headers,
'validate_cert': not self._application.skip_ssl_validation,
}
# Remove headers that were passed by the emitter. Those don't apply anymore
# This is pretty hacky though as it should be done in pycurl or curl or tornado
for h in HEADERS_TO_REMOVE:
if h in tornado_client_params['headers']:
del tornado_client_params['headers'][h]
log.debug("Removing {0} header.".format(h))
force_use_curl = False
if proxy_settings is not None:
force_use_curl = True
if pycurl is not None:
log.debug("Configuring tornado to use proxy settings: %s:****@%s:%s" % (proxy_settings['user'],
proxy_settings['host'], proxy_settings['port']))
tornado_client_params['proxy_host'] = proxy_settings['host']
tornado_client_params['proxy_port'] = proxy_settings['port']
tornado_client_params['proxy_username'] = proxy_settings['user']
tornado_client_params['proxy_password'] = proxy_settings['password']
if self._application._agentConfig.get('proxy_forbid_method_switch'):
# See http://stackoverflow.com/questions/8156073/curl-violate-rfc-2616-10-3-2-and-switch-from-post-to-get
tornado_client_params['prepare_curl_callback'] = lambda curl: curl.setopt(pycurl.POSTREDIR, pycurl.REDIR_POST_ALL)
if (not self._application.use_simple_http_client or force_use_curl) and pycurl is not None:
ssl_certificate = self._application._agentConfig.get('ssl_certificate', None)
tornado_client_params['ca_certs'] = ssl_certificate
req = tornado.httpclient.HTTPRequest(**tornado_client_params)
use_curl = force_use_curl or self._application._agentConfig.get("use_curl_http_client") and not self._application.use_simple_http_client
if use_curl:
if pycurl is None:
log.error("dd-agent is configured to use the Curl HTTP Client, but pycurl is not available on this system.")
else:
log.debug("Using CurlAsyncHTTPClient")
tornado.httpclient.AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
else:
log.debug("Using SimpleHTTPClient")
http = tornado.httpclient.AsyncHTTPClient()
http.fetch(req, callback=self.on_response)
def on_response(self, response):
if response.error:
log.error("Response: %s" % response)
if response.code == 413:
self._trManager.tr_error_too_big(self)
else:
self._trManager.tr_error(self)
else:
self._trManager.tr_success(self)
self._trManager.flush_next()
class MetricTransaction(AgentTransaction):
_type = "metrics"
class APIMetricTransaction(MetricTransaction):
def get_url(self, endpoint):
endpoint_base_url = get_url_endpoint(self._application._agentConfig[endpoint])
config = self._application._agentConfig
api_key = config['api_key']
url = endpoint_base_url + '/api/v1/series/?api_key=' + api_key
return url
def get_data(self):
return self._data
class APIServiceCheckTransaction(AgentTransaction):
_type = "service checks"
def get_url(self, endpoint):
endpoint_base_url = get_url_endpoint(self._application._agentConfig[endpoint])
config = self._application._agentConfig
api_key = config['api_key']
url = endpoint_base_url + '/api/v1/check_run/?api_key=' + api_key
return url
class StatusHandler(tornado.web.RequestHandler):
def get(self):
threshold = int(self.get_argument('threshold', -1))
m = MetricTransaction.get_tr_manager()
self.write("<table><tr><td>Id</td><td>Size</td><td>Error count</td><td>Next flush</td></tr>")
transactions = m.get_transactions()
for tr in transactions:
self.write("<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" %
(tr.get_id(), tr.get_size(), tr.get_error_count(), tr.get_next_flush()))
self.write("</table>")
if threshold >= 0:
if len(transactions) > threshold:
self.set_status(503)
class AgentInputHandler(tornado.web.RequestHandler):
_MSG_TYPE = ""
def post(self):
"""Read the message and forward it to the intake"""
# read message
msg = self.request.body
headers = self.request.headers
msg_type = self._MSG_TYPE
if msg is not None:
# Setup a transaction for this message
tr = MetricTransaction(msg, headers, msg_type)
else:
raise tornado.web.HTTPError(500)
self.write("Transaction: %s" % tr.get_id())
class MetricsAgentInputHandler(AgentInputHandler):
_MSG_TYPE = "metrics"
class MetadataAgentInputHandler(AgentInputHandler):
_MSG_TYPE = "metadata"
class ApiInputHandler(tornado.web.RequestHandler):
def post(self):
"""Read the message and forward it to the intake"""
# read message
msg = self.request.body
headers = self.request.headers
if msg is not None:
# Setup a transaction for this message
APIMetricTransaction(msg, headers)
else:
raise tornado.web.HTTPError(500)
class ApiCheckRunHandler(tornado.web.RequestHandler):
"""
Handler to submit Service Checks
"""
def post(self):
# read message
msg = self.request.body
headers = self.request.headers
if msg is not None:
# Setup a transaction for this message
tr = APIServiceCheckTransaction(msg, headers)
else:
raise tornado.web.HTTPError(500)
self.write("Transaction: %s" % tr.get_id())
class Application(tornado.web.Application):
def __init__(self, port, agentConfig, watchdog=True,
skip_ssl_validation=False, use_simple_http_client=False):
self._port = int(port)
self._agentConfig = agentConfig
self._metrics = {}
AgentTransaction.set_application(self)
AgentTransaction.set_endpoints()
self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
MAX_QUEUE_SIZE, THROTTLING_DELAY)
AgentTransaction.set_tr_manager(self._tr_manager)
self._watchdog = None
self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
self.use_simple_http_client = use_simple_http_client
if self.skip_ssl_validation:
log.info("Skipping SSL hostname validation, useful when using a transparent proxy")
# Monitor activity
if watchdog:
watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER / 1000
self._watchdog = Watchdog(
watchdog_timeout,
max_mem_mb=agentConfig.get('limit_memory_consumption', None),
max_resets=WATCHDOG_HIGH_ACTIVITY_THRESHOLD
)
def log_request(self, handler):
""" Override the tornado logging method.
If everything goes well, log level is DEBUG.
Otherwise it's WARNING or ERROR depending on the response code. """
if handler.get_status() < 400:
log_method = log.debug
elif handler.get_status() < 500:
log_method = log.warning
else:
log_method = log.error
request_time = 1000.0 * handler.request.request_time()
log_method(
u"%d %s %.2fms",
handler.get_status(),
handler._request_summary(), request_time
)
def appendMetric(self, prefix, name, host, device, ts, value):
if prefix in self._metrics:
metrics = self._metrics[prefix]
else:
metrics = {}
self._metrics[prefix] = metrics
if name in metrics:
metrics[name].append([host, device, ts, value])
else:
metrics[name] = [[host, device, ts, value]]
def _postMetrics(self):
if len(self._metrics) > 0:
self._metrics['uuid'] = get_uuid()
self._metrics['internalHostname'] = get_hostname(self._agentConfig)
self._metrics['apiKey'] = self._agentConfig['api_key']
MetricTransaction(json.dumps(self._metrics),
headers={'Content-Type': 'application/json'})
self._metrics = {}
def run(self):
handlers = [
(r"/intake/?", AgentInputHandler),
(r"/intake/metrics?", MetricsAgentInputHandler),
(r"/intake/metadata?", MetadataAgentInputHandler),
(r"/api/v1/series/?", ApiInputHandler),
(r"/api/v1/check_run/?", ApiCheckRunHandler),
(r"/status/?", StatusHandler),
]
settings = dict(
cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
xsrf_cookies=False,
debug=False,
log_function=self.log_request
)
non_local_traffic = self._agentConfig.get("non_local_traffic", False)
tornado.web.Application.__init__(self, handlers, **settings)
http_server = tornado.httpserver.HTTPServer(self)
try:
# non_local_traffic must be == True to match, not just some non-false value
if non_local_traffic is True:
http_server.listen(self._port)
else:
# localhost in lieu of 127.0.0.1 to support IPv6
try:
http_server.listen(self._port, address=self._agentConfig['bind_host'])
except gaierror:
log.warning("localhost seems undefined in your host file, using 127.0.0.1 instead")
http_server.listen(self._port, address="127.0.0.1")
except socket_error, e:
if "Errno 99" in str(e):
log.warning("IPv6 doesn't seem to be fully supported. Falling back to IPv4")
http_server.listen(self._port, address="127.0.0.1")
else:
raise
except socket_error, e:
log.exception("Socket error %s. Is another application listening on the same port ? Exiting", e)
sys.exit(1)
except Exception, e:
log.exception("Uncaught exception. Forwarder is exiting.")
sys.exit(1)
log.info("Listening on port %d" % self._port)
# Register callbacks
self.mloop = get_tornado_ioloop()
logging.getLogger().setLevel(get_logging_config()['log_level'] or logging.INFO)
def flush_trs():
if self._watchdog:
self._watchdog.reset()
self._postMetrics()
self._tr_manager.flush()
tr_sched = tornado.ioloop.PeriodicCallback(flush_trs, TRANSACTION_FLUSH_INTERVAL,
io_loop=self.mloop)
# Register optional Graphite listener
gport = self._agentConfig.get("graphite_listen_port", None)
if gport is not None:
log.info("Starting graphite listener on port %s" % gport)
from graphite import GraphiteServer
gs = GraphiteServer(self, get_hostname(self._agentConfig), io_loop=self.mloop)
if non_local_traffic is True:
gs.listen(gport)
else:
gs.listen(gport, address="localhost")
# Start everything
if self._watchdog:
self._watchdog.reset()
tr_sched.start()
self.mloop.start()
log.info("Stopped")
def stop(self):
self.mloop.stop()
def init(skip_ssl_validation=False, use_simple_http_client=False):
agentConfig = get_config(parse_args=False)
port = agentConfig.get('listen_port', 17123)
if port is None:
port = 17123
else:
port = int(port)
app = Application(port, agentConfig, skip_ssl_validation=skip_ssl_validation, use_simple_http_client=use_simple_http_client)
def sigterm_handler(signum, frame):
log.info("caught sigterm. stopping")
app.stop()
import signal
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
return app
def main():
# Deprecation notice
from utils.deprecations import deprecate_old_command_line_tools
deprecate_old_command_line_tools()
define("sslcheck", default=1, help="Verify SSL hostname, on by default")
define("use_simple_http_client", default=0, help="Use Tornado SimpleHTTPClient instead of CurlAsyncHTTPClient")
args = parse_command_line()
skip_ssl_validation = False
use_simple_http_client = False
if unicode(options.sslcheck) == u"0":
skip_ssl_validation = True
if unicode(options.use_simple_http_client) == u"1":
use_simple_http_client = True
# If we don't have any arguments, run the server.
if not args:
app = init(skip_ssl_validation, use_simple_http_client=use_simple_http_client)
try:
app.run()
except Exception:
log.exception("Uncaught exception in the forwarder")
finally:
ForwarderStatus.remove_latest_status()
else:
usage = "%s [help|info]. Run with no commands to start the server" % (sys.argv[0])
command = args[0]
if command == 'info':
logging.getLogger().setLevel(logging.ERROR)
return ForwarderStatus.print_latest_status()
elif command == 'help':
print usage
else:
print "Unknown command: %s" % command
print usage
return -1
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
import tensorflow as tf
import numpy as np
import pylab as plt
from dopamine.utils import *
from dopamine.values import *
from keras.models import Sequential, model_from_json, load_model
from keras.layers import Dense
from keras import backend
import logging
from mathtools.utils import Vessel
logging.basicConfig(level=logging.INFO)
from ipdb import set_trace as debug
class TRPOAgent(object):
'''Trust Region Policy Optimizer.'''
def __init__(self, name, env, policy, pdf, cfg=None, load_model=False):
'''Creates a TRPO instance.
INPUTS
name - str
The name of the TRPO agent, which will be used when saving the
weight data for policy and value function.
env - object
An environment simulation object.
policy - object
A neural network that takes in state space vectors and returns
an action parameter vector.
pdf - object
A probability density/distribution; must provide a .sample
method that takes in an action parameter vector.
'''
self.name = name.replace(' ', '_')
# Create a tensorflow session.
self.session = tf.Session()
backend.set_session(self.session)
self.info = logging.info
# Here is the environment we'll be simulating, and pdf.
self.env = env
self.pdf = pdf
self.pf = None
# Set up configuration (if None is passed, fill empty dict).
cfg = cfg if cfg else {}
# Set defaults for TRPO optimizer.
cfg.setdefault('episodes_per_step', 100)
cfg.setdefault('gamma', 0.995)
cfg.setdefault('lambda', 0.96)
cfg.setdefault('cg_damping', 0.1)
cfg.setdefault('epsilon', 0.01)
cfg.setdefault('model_file', 'weights/policy_{:s}.h5'.\
format(self.name))
cfg.setdefault('filter_file', 'weights/filters_{:s}.dat'.\
format(self.name))
cfg.setdefault('iterations_per_save', 1)
cfg.setdefault('load_weights', False)
cfg.setdefault('make_plots', False)
self.cfg = cfg
# And here is the policy that we're trying to optimize.
if load_model:
self.load_model()
else:
self.policy = policy
# Define variables of interest.
self.network_params = network_params = policy.trainable_weights
# Action vector is the [mean, std] of the Gaussian action density.
self.state_vectors = state_vectors = policy.input
# raw_vectors = tf.clip_by_value(policy.output, -15, 15)
# self.action_vectors = action_vectors = tf.nn.softmax(raw_vectors)
self.action_vectors = action_vectors = policy.output
self.action_vectors_old = action_vectors_old = \
self.pdf.parameter_vector
self.actions_taken = actions_taken = self.pdf.sample(action_vectors)
self.advantages = advantages = tf.placeholder(dtype, [None],\
name='advantages')
# Compute the surrogate loss function.
self.logp = logp = self.pdf.loglikelihood(actions_taken,\
action_vectors)
self.logp_old = logp_old = self.pdf.loglikelihood(actions_taken,\
action_vectors_old)
self.loss = -tf.reduce_mean(tf.exp(logp - logp_old) * advantages)
self.policy_gradient = flat_gradient(self.loss, network_params)
# Compute expected KL divergence (but exclude first argument from
# gradient computations).
action_vectors_fixed = tf.stop_gradient(action_vectors)
kl_first_fixed = pdf.kl(action_vectors_fixed, action_vectors)
self.expected_kl = expected_kl = tf.reduce_mean(kl_first_fixed)
self.kl_oldnew = tf.reduce_mean(pdf.kl(action_vectors,\
action_vectors_old))
# Now we compute the gradients of the expected KL divergence.
self.grads = grads = tf.gradients(expected_kl, network_params,\
name='gradients')
# Placeholder for tangent vector in the network's parameter space.
self.flat_tangent = tf.placeholder(dtype, [None])
# Set up the computation of the Fisher Vector product!
tangents = make_tangents(self.flat_tangent, network_params)
# The gradient/vector product.
self.gvp = gvp = [tf.reduce_sum(g*t) for (g,t) in zip(grads, tangents)]
# Take gradient of GVP and flatten the result to obtain the Fisher-
# vector product.
self.fvp = flat_gradient(gvp, network_params)
# Create objects to convert flat to expanded parameters, & vice/versa.
self.params_to_theta = ParamsToTheta(self.session, network_params)
self.theta_to_params = ThetaToParams(self.session, network_params)
# Estimate value function using another neural network.
if load_model:
self.vf = ValueFunction(self.name, self.session, load_model=True)
else:
input_dim = env.observation_space.shape[0]
self.vf = ValueFunction(self.name, self.session, input_dim)
# Initialize all of our variables.
init = tf.global_variables_initializer()
self.session.run(init)
self.obsfilt = ZFilter(self.env.observation_space.shape, clip=10)
self.rewfilt = ZFilter((), demean=False, clip=5)
def load_model(self):
'''Load saved weights from file.'''
self.policy = load_model(self.cfg['model_file'])
# theta = np.concatenate([np.reshape(x, np.prod(x.shape)) \
# for x in weights])
# self.theta_to_params(theta)
def save_model(self, best_model=True):
'''Save weights for policy and value function.'''
if best_model:
print('Saving Best Model...')
self.policy.save(self.cfg['model_file'])
v = Vessel(self.cfg['filter_file'])
v.filt = self.obsfilt
v.save()
self.vf.save_model()
def compute_advantages(self, paths):
'''Computes advantages, give delta estimates.'''
gamma = self.cfg['gamma']
gl = (self.cfg['gamma'] * self.cfg['lambda'])
for path in paths:
path["returns"] = discount(path["rewards"], gamma)
b = path['baseline']
b1 = np.append(b, 0)
deltas = path["rewards"].flatten() + gamma*b1[1:] - b1[:-1]
path['advantages'] = discount(deltas, gl)
def simulate(self):
'''Simulate the environment with respect to the given policy.'''
# Initialize these things!
paths = []
self.info('> Simulating episodes.')
# self.env.reset_target()
for itr in range(self.cfg['episodes_per_step']):
if np.mod(itr, 1000) == 0:
self.info('Episode {:d} of {:d}'.format(itr,\
self.cfg['episodes_per_step']))
states, actions, action_vectors, rewards = [], [], [], []
# Initialize the current state of the system.
state = self.env.reset()
done = False
position = []
nb_steps = 0
while (not done): # keep simulating until episode terminates.
# Estimate the action based on current policy!
state = self.obsfilt(state)
states.append(state)
action_vector, action = self.act(state)
action = action[0]
# action_vector = action_vector.tolist()
# action = action.tolist()
if np.random.rand() > 0.9999:
print(action)
print(action_vector)
# Now take a step!
state, reward, done, info = self.env.step(action)
# Apply filters to the states, rewards.
reward = self.rewfilt(reward)
rewards.append(reward)
# Store these things for later.
actions.append(action)
action_vectors.append(action_vector)
nb_steps += 1
# if nb_steps>200: break
# Assemble all useful path information.
path = {'state_vectors': np.vstack(states),
'rewards': np.vstack(rewards),
'action_vectors': np.vstack(action_vectors),
'actions': np.vstack(actions)}
paths.append(path)
return paths
def act(self, state):
'''Take an action, given an observed state.'''
return self.session.run([self.action_vectors, self.actions_taken], \
feed_dict={self.state_vectors: np.atleast_2d(state)})
def learn(self):
'''Learn to control an agent in an environment.'''
self.best_reward = -np.inf
reward_trajectory = []
for _itr in range(50000):
# 1. Simulate paths using current policy --------------------------
paths = self.simulate()
# self.info(paths[0]['state_vectors'][0])
# 2. Generalized Advantage Estimation -----------------------------
self.vf.predict(paths)
self.compute_advantages(paths)
# 2b. Assemble necessary data -------------------------------------
state_vectors = np.concatenate([path['state_vectors'] for \
path in paths])
advantages = np.concatenate([path['advantages'] for path in paths])
actions_taken = np.concatenate([path['actions'] for path in paths])
action_vectors = np.concatenate([path['action_vectors'] for \
path in paths])
returns = np.concatenate([path['returns'] for path in paths])
# 3. TRPO update of policy ----------------------------------------
theta_previous = 1*self.params_to_theta()
# Normalize the advantages.
advantages -= advantages.mean()
advantages /= (advantages.std() + 1e-8)
# Load up dict for the big update.
feed = {self.action_vectors_old: action_vectors,
self.actions_taken: actions_taken,
self.state_vectors: state_vectors,
self.advantages: advantages.flatten()}
def fisher_vector_product(p):
feed[self.flat_tangent] = p
return self.session.run(self.fvp, feed) + \
self.cfg['cg_damping'] * p
# Compute the current gradient (the g in Fx = g).
g = self.session.run(self.policy_gradient, feed_dict=feed)
if np.isnan(fisher_vector_product(-g)).any(): debug()
# Use conjugate gradient to find natural gradient direction.
natural_direction = conjugate_gradient(fisher_vector_product, -g)
# Determine the maximum allowable step size.
quadratic_term = 0.5 * natural_direction.dot(\
fisher_vector_product(natural_direction))
lagrange_multiplier = np.sqrt(quadratic_term/self.cfg['epsilon'])
full_step = natural_direction/lagrange_multiplier
expected_improvement_rate = -g.dot(natural_direction)/\
lagrange_multiplier
# Now line search to update theta.
def surrogate_loss(theta):
self.theta_to_params(theta)
return self.session.run(self.loss, feed_dict=feed)
# Use a linesearch to take largest useful step.
success, theta_new = linesearch(surrogate_loss, theta_previous,\
full_step, expected_improvement_rate)
# Compute the new KL divergence.
kl = self.session.run(self.kl_oldnew, feed_dict=feed)
if np.isnan(kl):
self.theta_to_params(theta_previous) # assigns old theta.
self.info('> NaN encountered. Skipping updates.')
debug()
continue
if kl > 1.5*self.cfg['epsilon']: # No big steps!
self.theta_to_params(theta_previous) # assigns old theta.
updated = False
else:
self.info ('> Updating theta.')
updated = True
self.theta_to_params(theta_new)
# Calculate some metrics.
mean_rewards = np.array(
[path["rewards"].sum() for path in paths])
reward_trajectory.append(mean_rewards.mean())
# Update the value function.
self.info('> Fitting the value function.')
self.vf.fit(paths)
self.info('> Iteration: {:d}'.format(_itr))
self.info('> KL divergence: {:.3f}'.format(kl))
if updated:
self.info('> Theta updated')
else:
self.info('> Theta not updated')
self.info('> Mean Reward: {:.4f}'.format(mean_rewards.mean()))
self.info('> Surrogate Loss: {:.4}'.format(\
surrogate_loss(theta_new)))
# Save best model to disk.
if mean_rewards.mean() > self.best_reward:
self.save_model(best_model=True)
self.best_reward = mean_rewards.mean()
# Save model parameters to disk.
if np.mod(_itr, self.cfg['iterations_per_save']) == 0:
self.info('> Saving policy and value function weights.')
self.save_model()
if self.cfg['make_plots']:
# Plot rewards over time.
plt.figure(100)
plt.clf()
plt.plot(reward_trajectory)
plt.show()
plt.pause(0.05)
return advantages
|
|
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
check_parameters_default_constructible,
check_estimator_sparse_data,
check_transformer,
check_clustering,
check_clusterer_compute_labels_predict,
check_regressors_int,
check_regressors_train,
check_regressors_pickle,
check_transformer_pickle,
check_transformers_unfitted,
check_estimators_nan_inf,
check_estimators_unfitted,
check_classifiers_one_label,
check_classifiers_train,
check_classifiers_classes,
check_classifiers_input_shapes,
check_classifiers_pickle,
check_class_weight_classifiers,
check_class_weight_auto_classifiers,
check_class_weight_auto_linear_classifier,
check_estimators_overwrite_params,
check_estimators_partial_fit_n_features,
check_sparsify_coefficients,
check_classifier_data_not_an_array,
check_regressor_data_not_an_array,
check_transformer_data_not_an_array,
check_transformer_n_iter,
check_fit_score_takes_y,
check_non_transformer_estimators_n_iter,
check_pipeline_consistency,
CROSS_DECOMPOSITION)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
# FIXME these should be done also for non-mixin estimators!
estimators = all_estimators(type_filter=['classifier', 'regressor',
'transformer', 'cluster'])
for name, Estimator in estimators:
if name not in CROSS_DECOMPOSITION:
yield check_fit_score_takes_y, name, Estimator
yield check_pipeline_consistency, name, Estimator
if name not in CROSS_DECOMPOSITION + ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf, name, Estimator
if (name not in ['CCA', '_CCA', 'PLSCanonical', 'PLSRegression',
'PLSSVD', 'GaussianProcess']):
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params, name, Estimator
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients, name, Estimator
yield check_estimator_sparse_data, name, Estimator
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
for name, Transformer in transformers:
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_pickle, name, Transformer
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array, name, Transformer
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer, name, Transformer
yield check_transformers_unfitted, name, Transformer
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
for name, Alg in clustering:
# test whether any classifier overwrites his init parameters during fit
yield check_clusterer_compute_labels_predict, name, Alg
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering, name, Alg
yield check_estimators_partial_fit_n_features, name, Alg
def test_classifiers():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array, name, Classifier
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label, name, Classifier
yield check_classifiers_classes, name, Classifier
yield check_classifiers_pickle, name, Classifier
yield check_estimators_partial_fit_n_features, name, Classifier
# basic consistency testing
yield check_classifiers_train, name, Classifier
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes, name, Classifier
# test if NotFittedError is raised
yield check_estimators_unfitted, name, Classifier
def test_regressors():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
for name, Regressor in regressors:
# basic testing
yield check_regressors_train, name, Regressor
yield check_regressor_data_not_an_array, name, Regressor
yield check_estimators_partial_fit_n_features, name, Regressor
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle, name, Regressor
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int, name, Regressor
# Test if NotFittedError is raised
yield check_estimators_unfitted, name, Regressor
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield check_class_weight_classifiers, name, Classifier
def test_class_weight_auto_classifiers():
"""Test that class_weight="auto" improves f1-score"""
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if (name != "NuSVC"
# the sparse version has a parameter that doesn't do anything
and not name.startswith("RidgeClassifier")
# RidgeClassifier behaves unexpected
# FIXME!
and not name.endswith("NB")):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
def test_class_weight_auto_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_auto_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user dashboard computations."""
__author__ = 'Sean Lip'
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_jobs_one_off
from core.platform import models
(user_models,) = models.Registry.import_models([models.NAMES.user])
taskqueue_services = models.Registry.import_taskqueue_services()
from core.tests import test_utils
class DashboardSubscriptionsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard subscriptions job."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
COLLECTION_ID_1 = 'col_id_1'
COLLECTION_ID_2 = 'col_id_2'
EXP_ID_FOR_COLLECTION_1 = 'id_of_exp_in_collection_1'
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.DashboardSubscriptionsOneOffJob.create_new()
user_jobs_one_off.DashboardSubscriptionsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
queue_name=taskqueue_services.QUEUE_NAME_DEFAULT),
1)
self.process_and_flush_pending_tasks()
def _null_fn(self, *args, **kwargs):
"""A mock for functions of the form subscribe_to_*() to represent
behavior prior to the implementation of subscriptions.
"""
pass
def setUp(self):
super(DashboardSubscriptionsOneOffJobTests, self).setUp()
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration',
self._null_fn):
# User A creates and saves a new valid exploration.
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_a_id, end_state_name='End')
def test_null_case(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
def test_feedback_thread_subscription(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self.assertEqual(user_c_subscriptions_model, None)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration',
self._null_fn):
# User B starts a feedback thread.
feedback_services.create_thread(
self.EXP_ID_1, None, self.user_b_id, 'subject', 'text')
# User C adds to that thread.
thread_id = feedback_services.get_threadlist(
self.EXP_ID_1)[0]['thread_id']
feedback_services.create_message(
thread_id, self.user_c_id, None, None, 'more text')
self._run_one_off_job()
# Both users are subscribed to the feedback thread.
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id)
self.assertEqual(user_b_subscriptions_model.activity_ids, [])
self.assertEqual(user_c_subscriptions_model.activity_ids, [])
self.assertEqual(
user_b_subscriptions_model.feedback_thread_ids, [thread_id])
self.assertEqual(
user_c_subscriptions_model.feedback_thread_ids, [thread_id])
def test_exploration_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration',
self._null_fn):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a_id, self.EXP_ID_1, self.user_b_id,
rights_manager.ROLE_EDITOR)
# User A adds user C as a viewer of the exploration.
rights_manager.assign_role_for_exploration(
self.user_a_id, self.EXP_ID_1, self.user_c_id,
rights_manager.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the exploration. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(user_a_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_b_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_explorations(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration',
self._null_fn):
# User A creates and saves another valid exploration.
self.save_new_valid_exploration(self.EXP_ID_2, self.user_a_id)
self._run_one_off_job()
# User A is subscribed to two explorations.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.activity_ids),
sorted([self.EXP_ID_1, self.EXP_ID_2]))
def test_community_owned_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration',
self._null_fn):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a_id, self.EXP_ID_1, self.user_b_id,
rights_manager.ROLE_EDITOR)
# The exploration becomes community-owned.
rights_manager.publish_exploration(self.user_a_id, self.EXP_ID_1)
rights_manager.release_ownership_of_exploration(
self.user_a_id, self.EXP_ID_1)
# User C edits the exploration.
exp_services.update_exploration(
self.user_c_id, self.EXP_ID_1, [], 'Update exploration')
self._run_one_off_job()
# User A and user B are subscribed to the exploration; user C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
def test_deleted_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration',
self._null_fn):
# User A deletes the exploration.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self._run_one_off_job()
# User A is not subscribed to the exploration.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_collection_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration',
self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection',
self._null_fn):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A adds user B as an editor to the collection.
rights_manager.assign_role_for_collection(
self.user_a_id, self.COLLECTION_ID_1, self.user_b_id,
rights_manager.ROLE_EDITOR)
# User A adds user C as a viewer of the collection.
rights_manager.assign_role_for_collection(
self.user_a_id, self.COLLECTION_ID_1, self.user_c_id,
rights_manager.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the collection. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
# User A is also subscribed to the exploration within the collection
# because they created both.
self.assertEqual(
sorted(user_a_subscriptions_model.activity_ids), [
self.EXP_ID_1, self.EXP_ID_FOR_COLLECTION_1])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(user_a_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_b_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_collections(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration',
self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection',
self._null_fn):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A creates and saves another valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_2, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
self._run_one_off_job()
# User A is subscribed to two collections.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.collection_ids),
sorted([self.COLLECTION_ID_1, self.COLLECTION_ID_2]))
def test_deleted_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration',
self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection',
self._null_fn):
# User A creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_a_id)
# User A deletes the collection.
collection_services.delete_collection(
self.user_a_id, self.COLLECTION_ID_1)
# User A deletes the exploration from earlier.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self._run_one_off_job()
# User A is not subscribed to the collection.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_adding_exploration_to_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection',
self._null_fn):
# User B creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_b_id)
# User B adds the exploration created by user A to the collection.
collection_services.update_collection(
self.user_b_id, self.COLLECTION_ID_1, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': self.EXP_ID_1
}], 'Add new exploration to collection.')
# Users A and B have no subscriptions (to either explorations or
# collections).
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
# User B should be subscribed to the collection and user A to the
# exploration.
self.assertEqual(
user_a_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(
user_a_subscriptions_model.collection_ids, [])
self.assertEqual(
user_b_subscriptions_model.activity_ids, [])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
|
|
# coding=utf-8
#
# Copyright 2014-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from f5.bigip.tm.ltm.auth import Crldp_Server
from f5.bigip.tm.ltm.auth import Kerberos_Delegation
from f5.bigip.tm.ltm.auth import Ldap
from f5.bigip.tm.ltm.auth import Ocsp_Responder
from f5.bigip.tm.ltm.auth import Profile
from f5.bigip.tm.ltm.auth import Radius
from f5.bigip.tm.ltm.auth import Radius_Server
from f5.bigip.tm.ltm.auth import Ssl_Cc_Ldap
from f5.bigip.tm.ltm.auth import Ssl_Crldp
from f5.bigip.tm.ltm.auth import Ssl_Ocsp
from f5.bigip.tm.ltm.auth import Tacacs
import pytest
from requests.exceptions import HTTPError
from six import iteritems
TESTDESCRIPTION = "TESTDESCRIPTION"
def delete_dependency(mgmt_root, name):
try:
foo = mgmt_root.tm.ltm.auth.ssl_cc_ldaps.ssl_cc_ldap.load(name=name)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def setup_dependency(request, mgmt_root, name, **kwargs):
def teardown():
delete_dependency(mgmt_root, name)
delete_dependency(mgmt_root, name)
res = mgmt_root.tm.ltm.auth.ssl_cc_ldaps.ssl_cc_ldap.create(name=name,
**kwargs)
request.addfinalizer(teardown)
return res
# Helper class to limit code repetition
class HelperTest(object):
def __init__(self, collection_name):
self.partition = 'Common'
self.lowered = collection_name.lower()
self.test_name = 'fake_' + self.urielementname()
self.authkinds = {
'crldp_server': 'tm:ltm:auth:crldp-server:crldp-serverstate',
'kerberos_delegation':
'tm:ltm:auth:kerberos-delegation:kerberos-delegationstate',
'ldap': 'tm:ltm:auth:ldap:ldapstate',
'ocsp_responder': 'tm:ltm:auth:ocsp-responder:ocsp-responderstate',
'profile': 'tm:ltm:auth:profile:profilestate',
'radius': 'tm:ltm:auth:radius:radiusstate',
'radius_server': 'tm:ltm:auth:radius-server:radius-serverstate',
'ssl_cc_ldap': 'tm:ltm:auth:ssl-cc-ldap:ssl-cc-ldapstate',
'ssl_crldp': 'tm:ltm:auth:ssl-crldp:ssl-crldpstate',
'ssl_ocsp': 'tm:ltm:auth:ssl-ocsp:ssl-ocspstate',
'tacacs': 'tm:ltm:auth:tacacs:tacacsstate'
}
def urielementname(self):
if self.lowered[-2:] == '_s':
endind = 2
else:
endind = 1
return self.lowered[:-endind]
def delete_resource(self, resource):
try:
foo = resource.load(name=self.test_name, partition=self.partition)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def setup_test(self, request, mgmt_root, **kwargs):
def teardown():
self.delete_resource(resource)
resourcecollection = \
getattr(getattr(getattr(mgmt_root.tm, 'ltm'), 'auth'),
self.lowered)
resource = getattr(resourcecollection, self.urielementname())
self.delete_resource(resource)
created = resource.create(name=self.test_name,
partition=self.partition,
**kwargs)
request.addfinalizer(teardown)
return created, resourcecollection
def test_MCURDL(self, request, mgmt_root, **kwargs):
# Testing create
authres, authcollection = self.setup_test(request, mgmt_root, **kwargs)
assert authres.name == self.test_name
assert authres.fullPath == '/Common/'+self.test_name
assert authres.generation and isinstance(authres.generation, int)
assert authres.kind == self.authkinds[self.urielementname()]
# Testing update
authres.description = TESTDESCRIPTION
authres.update()
assert hasattr(authres, 'description')
assert authres.description == TESTDESCRIPTION
# Testing refresh
authres.description = ''
authres.refresh()
assert hasattr(authres, 'description')
assert authres.description == TESTDESCRIPTION
# Testing modify
meta_data = authres.__dict__.pop('_meta_data')
start_dict = copy.deepcopy(authres.__dict__)
authres.__dict__['_meta_data'] = meta_data
authres.modify(description='MODIFIED')
desc = 'description'
for k, v in iteritems(authres.__dict__):
if k != desc:
start_dict[k] = authres.__dict__[k]
assert getattr(authres, k) == start_dict[k]
elif k == desc:
assert getattr(authres, desc) == 'MODIFIED'
# Testing load
a2 = getattr(authcollection, self.urielementname())
authres2 = a2.load(partition=self.partition, name=self.test_name)
assert authres.selfLink == authres2.selfLink
def test_collection(self, request, mgmt_root, **kwargs):
authres, authcollection = self.setup_test(request, mgmt_root, **kwargs)
assert authres.name == self.test_name
assert authres.fullPath == '/Common/' + self.test_name
assert authres.generation and isinstance(authres.generation, int)
assert authres.kind == self.authkinds[self.urielementname()]
coll = authcollection.get_collection()
assert isinstance(coll, list)
assert len(coll)
if self.lowered == 'crldp_servers':
assert isinstance(coll[0], Crldp_Server)
elif self.lowered == 'kerberos_delegations':
assert isinstance(coll[0], Kerberos_Delegation)
elif self.lowered == 'ldaps':
assert isinstance(coll[0], Ldap)
elif self.lowered == 'ocsp_responders':
assert isinstance(coll[0], Ocsp_Responder)
elif self.lowered == 'profiles':
assert isinstance(coll[0], Profile)
elif self.lowered == 'radius_s':
assert isinstance(coll[0], Radius)
elif self.lowered == 'radius_server_s':
assert isinstance(coll[0], Radius_Server)
elif self.lowered == 'ssl_cc_ldaps':
assert isinstance(coll[0], Ssl_Cc_Ldap)
elif self.lowered == 'ssl_crldps':
assert isinstance(coll[0], Ssl_Crldp)
elif self.lowered == 'ssl_ocsps':
assert isinstance(coll[0], Ssl_Ocsp)
elif self.lowered == 'tacacs':
assert isinstance(coll[0], Tacacs)
def test_profile_MCRDL(self, request, mgmt_root, **kwargs):
# Testing create
authres, authcollection = self.setup_test(request, mgmt_root, **kwargs)
assert authres.name == self.test_name
assert authres.fullPath == '/Common/' + self.test_name
assert authres.generation and isinstance(authres.generation, int)
assert authres.kind == self.authkinds[self.urielementname()]
assert authres.idleTimeout == 300
# Testing refresh
authres.idleTimeout = 0
authres.refresh()
assert hasattr(authres, 'idleTimeout')
assert authres.idleTimeout == 300
# Testing modify
meta_data = authres.__dict__.pop('_meta_data')
start_dict = copy.deepcopy(authres.__dict__)
authres.__dict__['_meta_data'] = meta_data
authres.modify(idleTimeout=100)
desc = 'idleTimeout'
for k, v in iteritems(authres.__dict__):
if k != desc:
start_dict[k] = authres.__dict__[k]
assert getattr(authres, k) == start_dict[k]
elif k == desc:
assert getattr(authres, desc) == 100
# Testing load
a2 = getattr(authcollection, self.urielementname())
authres2 = a2.load(partition=self.partition, name=self.test_name)
assert authres.selfLink == authres2.selfLink
class TestCrldpServer(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Crldp_Servers')
auth.test_MCURDL(request, mgmt_root, host='10.10.10.10')
def test_collection(self, request, mgmt_root):
auth = HelperTest('Crldp_Servers')
auth.test_collection(request, mgmt_root, host='10.10.10.10')
@pytest.mark.skipif(True, reason='this depends on an optional module')
class TestKerberosDelegation(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Kerberos_Delegations')
auth.test_MCURDL(request, mgmt_root,
serverPrincipal='HTTP/fake.com',
clientPrincipal='HTTP/faketoo.com')
def test_collection(self, request, mgmt_root):
auth = HelperTest('Kerberos_Delegations')
auth.test_collection(request, mgmt_root,
serverPrincipal='HTTP/fake.com',
clientPrincipal='HTTP/faketoo.com')
@pytest.mark.skipif(True, reason='this depends on an optional module')
class TestLdap(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Ldaps')
auth.test_MCURDL(request, mgmt_root, servers=['10.10.10.10'])
def test_collection(self, request, mgmt_root):
auth = HelperTest('Ldaps')
auth.test_collection(request, mgmt_root, servers=['10.10.10.10'])
class TestOcspResponder(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Ocsp_Responders')
auth.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
auth = HelperTest('Ocsp_Responders')
auth.test_collection(request, mgmt_root)
class TestProfile(object):
def test_MCURDL(self, request, mgmt_root):
setup_dependency(request, mgmt_root, 'fakeldap', servers=[
'10.10.10.10'], userKey=12345)
auth = HelperTest('Profiles')
auth.test_profile_MCRDL(request, mgmt_root,
defaultsFrom='/Common/ssl_cc_ldap',
configuration='/Common/fakeldap')
def test_collection(self, request, mgmt_root):
setup_dependency(request, mgmt_root, 'fakeldap', servers=[
'10.10.10.10'], userKey=12345)
auth = HelperTest('Profiles')
auth.test_profile_MCRDL(request, mgmt_root,
defaultsFrom='/Common/ssl_cc_ldap',
configuration='/Common/fakeldap')
@pytest.mark.skipif(True, reason='this depends on an optional module')
class TestRadius(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Radius_s')
auth.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
auth = HelperTest('Radius_s')
auth.test_collection(request, mgmt_root)
class TestRadiusServer(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Radius_Servers')
auth.test_MCURDL(request, mgmt_root, server='10.10.10.10',
secret='sekrit')
def test_collection(self, request, mgmt_root):
auth = HelperTest('Radius_Servers')
auth.test_collection(request, mgmt_root, server='10.10.10.10',
secret='sekrit')
class TestSSLCcLdap(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Ssl_Cc_Ldaps')
auth.test_MCURDL(request, mgmt_root, servers=['10.10.10.10'],
userKey=12345)
def test_collection(self, request, mgmt_root):
auth = HelperTest('Ssl_Cc_Ldaps')
auth.test_collection(request, mgmt_root, servers=['10.10.10.10'],
userKey=12345)
class TestSSLClrdp(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Ssl_Crldps')
auth.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
auth = HelperTest('Ssl_Crldps')
auth.test_collection(request, mgmt_root)
class TestSSLOcsp(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Ssl_Ocsps')
auth.test_MCURDL(request, mgmt_root)
def test_collection(self, request, mgmt_root):
auth = HelperTest('Ssl_Ocsps')
auth.test_collection(request, mgmt_root)
@pytest.mark.skipif(True, reason='this depends on an optional module')
class TestTacacs(object):
def test_MCURDL(self, request, mgmt_root):
auth = HelperTest('Tacacs_s')
auth.test_MCURDL(request, mgmt_root, servers=['10.10.10.10'],
secret='fortytwo', service='http')
def test_collection(self, request, mgmt_root):
auth = HelperTest('Tacacs_s')
auth.test_collection(request, mgmt_root, servers=['10.10.10.10'],
secret='fortytwo', service='http')
|
|
"""Undocumented Module"""
__all__ = ['BufferViewer']
from panda3d.core import *
from direct.task import Task
from direct.directnotify.DirectNotifyGlobal import *
from direct.showbase.DirectObject import DirectObject
import math
class BufferViewer(DirectObject):
notify = directNotify.newCategory('BufferViewer')
def __init__(self):
"""Access: private. Constructor."""
self.enabled = 0
size = ConfigVariableDouble('buffer-viewer-size', '0 0')
self.sizex = size[0]
self.sizey = size[1]
self.position = ConfigVariableString('buffer-viewer-position', "lrcorner").getValue()
self.layout = ConfigVariableString('buffer-viewer-layout', "hline").getValue()
self.include = "all"
self.exclude = "none"
self.cullbin = "fixed"
self.cullsort = 10000
self.renderParent = render2d
self.cards = []
self.cardindex = 0
self.cardmaker = CardMaker("cubemaker")
self.cardmaker.setFrame(-1,1,-1,1)
self.task = 0
self.window = 0
self.dirty = 1
self.accept("render-texture-targets-changed", self.refreshReadout)
if (ConfigVariableBool("show-buffers", 0).getValue()):
self.enable(1)
def refreshReadout(self):
"""Force the readout to be refreshed. This is usually invoked
by GraphicsOutput::add_render_texture (via an event handler).
However, it is also possible to invoke it manually. Currently,
the only time I know of that this is necessary is after a
window resize (and I ought to fix that)."""
self.dirty = 1
# Call enabled again, mainly to ensure that the task has been
# started.
self.enable(self.enabled)
def isValidTextureSet(self, x):
"""Access: private. Returns true if the parameter is a
list of GraphicsOutput and Texture, or the keyword 'all'."""
if (isinstance(x, list)):
for elt in x:
if (self.isValidTextureSet(elt)==0):
return 0
else:
return (x=="all") or (isinstance(x, Texture)) or (isinstance(x, GraphicsOutput))
def isEnabled(self):
"""Returns true if the buffer viewer is currently enabled."""
return self.enabled
def enable(self, x):
"""Turn the buffer viewer on or off. The initial state of the
buffer viewer depends on the Config variable 'show-buffers'."""
if (x != 0) and (x != 1):
BufferViewer.notify.error('invalid parameter to BufferViewer.enable')
return
self.enabled = x
self.dirty = 1
if (x and self.task == 0):
self.task = taskMgr.add(self.maintainReadout, "buffer-viewer-maintain-readout",
priority=1)
def toggleEnable(self):
"""Toggle the buffer viewer on or off. The initial state of the
enable flag depends on the Config variable 'show-buffers'."""
self.enable(1-self.enabled)
def setCardSize(self, x, y):
"""Set the size of each card. The units are relative to
render2d (ie, 1x1 card is not square). If one of the
dimensions is zero, then the viewer will choose a value
for that dimension so as to ensure that the aspect ratio
of the card matches the aspect ratio of the source-window.
If both dimensions are zero, the viewer uses a heuristic
to choose a reasonable size for the card. The initial
value is (0, 0)."""
if (x < 0) or (y < 0):
BufferViewer.notify.error('invalid parameter to BufferViewer.setCardSize')
return
self.sizex = x
self.sizey = y
self.dirty = 1
def setPosition(self, pos):
"""Set the position of the cards. The valid values are:
* llcorner - put them in the lower-left corner of the window
* lrcorner - put them in the lower-right corner of the window
* ulcorner - put them in the upper-left corner of the window
* urcorner - put them in the upper-right corner of the window
* window - put them in a separate window
The initial value is 'lrcorner'."""
valid=["llcorner","lrcorner","ulcorner","urcorner","window"]
if (valid.count(pos)==0):
BufferViewer.notify.error('invalid parameter to BufferViewer.setPosition')
BufferViewer.notify.error('valid parameters are: llcorner, lrcorner, ulcorner, urcorner, window')
return
if (pos == "window"):
BufferViewer.notify.error('BufferViewer.setPosition - "window" mode not implemented yet.')
return
self.position = pos
self.dirty = 1
def setLayout(self, lay):
"""Set the layout of the cards. The valid values are:
* vline - display them in a vertical line
* hline - display them in a horizontal line
* vgrid - display them in a vertical grid
* hgrid - display them in a horizontal grid
* cycle - display one card at a time, using selectCard/advanceCard
The default value is 'hline'."""
valid=["vline","hline","vgrid","hgrid","cycle"]
if (valid.count(lay)==0):
BufferViewer.notify.error('invalid parameter to BufferViewer.setLayout')
BufferViewer.notify.error('valid parameters are: vline, hline, vgrid, hgrid, cycle')
return
self.layout = lay
self.dirty = 1
def selectCard(self, i):
"""Only useful when using setLayout('cycle'). Sets the index
that selects which card to display. The index is taken modulo
the actual number of cards."""
self.cardindex = i
self.dirty = 1
def advanceCard(self):
"""Only useful when using setLayout('cycle'). Increments the index
that selects which card to display. The index is taken modulo
the actual number of cards."""
self.cardindex += 1
self.dirty = 1
def setInclude(self, x):
"""Set the include-set for the buffer viewer. The include-set
specifies which of the render-to-texture targets to display.
Valid inputs are the string 'all' (display every render-to-texture
target), or a list of GraphicsOutputs or Textures. The initial
value is 'all'."""
if (self.isValidTextureSet(x)==0):
BufferViewer.notify.error('setInclude: must be list of textures and buffers, or "all"')
return
self.include = x
self.dirty = 1
def setExclude(self, x):
"""Set the exclude-set for the buffer viewer. The exclude-set
should be a list of GraphicsOutputs and Textures to ignore.
The exclude-set is subtracted from the include-set (so the excludes
effectively override the includes.) The initial value is the
empty list."""
if (self.isValidTextureSet(x)==0):
BufferViewer.notify.error('setExclude: must be list of textures and buffers')
return
self.exclude = x
self.dirty = 1
def setSort(self, bin, sort):
"""Set the cull-bin and sort-order for the output cards. The
default value is 'fixed', 10000."""
self.cullbin = bin
self.cullsort = sort
self.dirty = 1
def setRenderParent(self, renderParent):
"""Set the scene graph root to which the output cards should
be parented. The default is render2d. """
self.renderParent = renderParent
self.dirty = 1
def analyzeTextureSet(self, x, set):
"""Access: private. Converts a list of GraphicsObject,
GraphicsEngine, and Texture into a table of Textures."""
if (isinstance(x, list)):
for elt in x:
self.analyzeTextureSet(elt, set)
elif (isinstance(x, Texture)):
set[x] = 1
elif (isinstance(x, GraphicsOutput)):
for itex in range(x.countTextures()):
tex = x.getTexture(itex)
set[tex] = 1
elif (isinstance(x, GraphicsEngine)):
for iwin in range(x.getNumWindows()):
win = x.getWindow(iwin)
self.analyzeTextureSet(win, set)
elif (x=="all"):
self.analyzeTextureSet(base.graphicsEngine, set)
else: return
def makeFrame(self, sizex, sizey):
"""Access: private. Each texture card is displayed with
a two-pixel wide frame (a ring of black and a ring of white).
This routine builds the frame geometry. It is necessary to
be precise so that the frame exactly aligns to pixel
boundaries, and so that it doesn't overlap the card at all."""
format=GeomVertexFormat.getV3cp()
vdata=GeomVertexData('card-frame', format, Geom.UHDynamic)
vwriter=GeomVertexWriter(vdata, 'vertex')
cwriter=GeomVertexWriter(vdata, 'color')
ringoffset = [0, 1, 1, 2]
ringbright = [0, 0, 1, 1]
for ring in range(4):
offsetx = (ringoffset[ring]*2.0) / float(sizex)
offsety = (ringoffset[ring]*2.0) / float(sizey)
bright = ringbright[ring]
vwriter.addData3f(-1-offsetx, 0, -1-offsety)
vwriter.addData3f(1+offsetx, 0, -1-offsety)
vwriter.addData3f(1+offsetx, 0, 1+offsety)
vwriter.addData3f(-1-offsetx, 0, 1+offsety)
cwriter.addData3f(bright, bright, bright)
cwriter.addData3f(bright, bright, bright)
cwriter.addData3f(bright, bright, bright)
cwriter.addData3f(bright, bright, bright)
triangles=GeomTriangles(Geom.UHStatic)
for i in range(2):
delta = i*8
triangles.addVertices(0+delta, 4+delta, 1+delta)
triangles.addVertices(1+delta, 4+delta, 5+delta)
triangles.addVertices(1+delta, 5+delta, 2+delta)
triangles.addVertices(2+delta, 5+delta, 6+delta)
triangles.addVertices(2+delta, 6+delta, 3+delta)
triangles.addVertices(3+delta, 6+delta, 7+delta)
triangles.addVertices(3+delta, 7+delta, 0+delta)
triangles.addVertices(0+delta, 7+delta, 4+delta)
triangles.closePrimitive()
geom=Geom(vdata)
geom.addPrimitive(triangles)
geomnode=GeomNode("card-frame")
geomnode.addGeom(geom)
return NodePath(geomnode)
def maintainReadout(self, task):
"""Access: private. Whenever necessary, rebuilds the entire
display from scratch. This is only done when the configuration
parameters have changed."""
# If nothing has changed, don't update.
if (self.dirty==0):
return Task.cont
self.dirty = 0
# Delete the old set of cards.
for card in self.cards:
card.removeNode()
self.cards = []
# If not enabled, return.
if (self.enabled == 0):
self.task = 0
return Task.done
# Generate the include and exclude sets.
exclude = {}
include = {}
self.analyzeTextureSet(self.exclude, exclude)
self.analyzeTextureSet(self.include, include)
# Generate a list of cards and the corresponding windows.
cards = []
wins = []
for iwin in range(base.graphicsEngine.getNumWindows()):
win = base.graphicsEngine.getWindow(iwin)
for itex in range(win.countTextures()):
tex = win.getTexture(itex)
if (tex in include) and (tex not in exclude):
if (tex.getTextureType() == Texture.TTCubeMap):
for face in range(6):
self.cardmaker.setUvRangeCube(face)
card = NodePath(self.cardmaker.generate())
card.setTexture(tex)
cards.append(card)
elif (tex.getTextureType() == Texture.TT2dTextureArray):
for layer in range(tex.getZSize()):
self.cardmaker.setUvRange((0, 1, 1, 0), (0, 0, 1, 1),\
(layer, layer, layer, layer))
card = NodePath(self.cardmaker.generate())
# 2D texture arrays are not supported by
# the fixed-function pipeline, so we need to
# enable the shader generator to view them.
card.setShaderAuto()
card.setTexture(tex)
cards.append(card)
else:
card = win.getTextureCard()
card.setTexture(tex)
cards.append(card)
wins.append(win)
exclude[tex] = 1
self.cards = cards
if (len(cards)==0):
self.task = 0
return Task.done
ncards = len(cards)
# Decide how many rows and columns to use for the layout.
if (self.layout == "hline"):
rows = 1
cols = ncards
elif (self.layout == "vline"):
rows = ncards
cols = 1
elif (self.layout == "hgrid"):
rows = int(math.sqrt(ncards))
cols = rows
if (rows * cols < ncards): cols += 1
if (rows * cols < ncards): rows += 1
elif (self.layout == "vgrid"):
rows = int(math.sqrt(ncards))
cols = rows
if (rows * cols < ncards): rows += 1
if (rows * cols < ncards): cols += 1
elif (self.layout == "cycle"):
rows = 1
cols = 1
else:
BufferViewer.notify.error('shouldnt ever get here in BufferViewer.maintainReadout')
# Choose an aspect ratio for the cards. All card size
# calculations are done in pixel-units, using integers,
# in order to ensure that everything ends up neatly on
# a pixel boundary.
aspectx = wins[0].getXSize()
aspecty = wins[0].getYSize()
for win in wins:
if (win.getXSize()*aspecty) != (win.getYSize()*aspectx):
aspectx = 1
aspecty = 1
# Choose a card size. If the user didn't specify a size,
# use a heuristic, otherwise, just follow orders. The
# heuristic uses an initial card size of 42.66666667% of
# the screen vertically, which comes to 256 pixels on
# an 800x600 display. Then, it double checks that the
# readout will fit on the screen, and if not, it shrinks it.
bordersize = 4.0
if (float(self.sizex)==0.0) and (float(self.sizey)==0.0):
sizey = int(0.4266666667 * base.win.getYSize())
sizex = (sizey * aspectx) // aspecty
v_sizey = (base.win.getYSize() - (rows-1) - (rows*2)) // rows
v_sizex = (v_sizey * aspectx) // aspecty
if (v_sizey < sizey) or (v_sizex < sizex):
sizey = v_sizey
sizex = v_sizex
adjustment = 2
h_sizex = float (base.win.getXSize() - adjustment) / float (cols)
h_sizex -= bordersize
if (h_sizex < 1.0):
h_sizex = 1.0
h_sizey = (h_sizex * aspecty) // aspectx
if (h_sizey < sizey) or (h_sizex < sizex):
sizey = h_sizey
sizex = h_sizex
else:
sizex = int(self.sizex * 0.5 * base.win.getXSize())
sizey = int(self.sizey * 0.5 * base.win.getYSize())
if (sizex == 0): sizex = (sizey*aspectx) // aspecty
if (sizey == 0): sizey = (sizex*aspecty) // aspectx
# Convert from pixels to render2d-units.
fsizex = (2.0 * sizex) / float(base.win.getXSize())
fsizey = (2.0 * sizey) / float(base.win.getYSize())
fpixelx = 2.0 / float(base.win.getXSize())
fpixely = 2.0 / float(base.win.getYSize())
# Choose directional offsets
if (self.position == "llcorner"):
dirx = -1.0
diry = -1.0
elif (self.position == "lrcorner"):
dirx = 1.0
diry = -1.0
elif (self.position == "ulcorner"):
dirx = -1.0
diry = 1.0
elif (self.position == "urcorner"):
dirx = 1.0
diry = 1.0
else:
BufferViewer.notify.error('window mode not implemented yet')
# Create the frame
frame = self.makeFrame(sizex, sizey)
# Now, position the cards on the screen.
# For each card, create a frame consisting of eight quads.
for r in range(rows):
for c in range(cols):
index = c + r*cols
if (index < ncards):
index = (index + self.cardindex) % len(cards)
posx = dirx * (1.0 - ((c + 0.5) * (fsizex + fpixelx * bordersize))) - (fpixelx * dirx)
posy = diry * (1.0 - ((r + 0.5) * (fsizey + fpixely * bordersize))) - (fpixely * diry)
placer = NodePath("card-structure")
placer.setPos(Point3.rfu(posx, 0, posy))
placer.setScale(Vec3.rfu(fsizex*0.5, 1.0, fsizey*0.5))
placer.setBin(self.cullbin, self.cullsort)
placer.reparentTo(self.renderParent)
frame.instanceTo(placer)
cards[index].reparentTo(placer)
cards[index] = placer
return Task.cont
|
|
from collections import OrderedDict
from itertools import chain
import time
from django.conf import settings
from pandas.core.frame import DataFrame
# an immediate fix to an error with the installation of pandas v0.15
try:
from pandas.io.parsers import ExcelWriter
except ImportError, e:
from pandas import ExcelWriter
from pyxform.survey_element import SurveyElement
from pyxform.section import Section, RepeatingSection
from pyxform.question import Question
from onadata.apps.viewer.models.data_dictionary import DataDictionary
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.libs.exceptions import NoRecordsFoundError
from onadata.libs.utils.common_tags import ID, XFORM_ID_STRING, STATUS,\
ATTACHMENTS, GEOLOCATION, UUID, SUBMISSION_TIME, NA_REP,\
BAMBOO_DATASET_ID, DELETEDAT, TAGS, NOTES, SUBMITTED_BY
from onadata.libs.utils.export_tools import question_types_to_exclude
# this is Mongo Collection where we will store the parsed submissions
xform_instances = settings.MONGO_DB.instances
# the bind type of select multiples that we use to compare
MULTIPLE_SELECT_BIND_TYPE = u"select"
GEOPOINT_BIND_TYPE = u"geopoint"
# column group delimiters
GROUP_DELIMITER_SLASH = '/'
GROUP_DELIMITER_DOT = '.'
DEFAULT_GROUP_DELIMITER = GROUP_DELIMITER_SLASH
GROUP_DELIMITERS = [GROUP_DELIMITER_SLASH, GROUP_DELIMITER_DOT]
def get_valid_sheet_name(sheet_name, existing_name_list):
# truncate sheet_name to XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS
new_sheet_name = \
sheet_name[:XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS]
# make sure its unique within the list
i = 1
generated_name = new_sheet_name
while generated_name in existing_name_list:
digit_length = len(str(i))
allowed_name_len = XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS - \
digit_length
# make name the required len
if len(generated_name) > allowed_name_len:
generated_name = generated_name[:allowed_name_len]
generated_name = "{0}{1}".format(generated_name, i)
i += 1
return generated_name
def remove_dups_from_list_maintain_order(l):
return list(OrderedDict.fromkeys(l))
def get_prefix_from_xpath(xpath):
xpath = str(xpath)
parts = xpath.rsplit('/', 1)
if len(parts) == 1:
return None
elif len(parts) == 2:
return '%s/' % parts[0]
else:
raise ValueError(
'%s cannot be prefixed, it returns %s' % (xpath, str(parts)))
class AbstractDataFrameBuilder(object):
IGNORED_COLUMNS = [XFORM_ID_STRING, STATUS, ID, ATTACHMENTS, GEOLOCATION,
BAMBOO_DATASET_ID, DELETEDAT, SUBMITTED_BY]
# fields NOT within the form def that we want to include
ADDITIONAL_COLUMNS = [UUID, SUBMISSION_TIME, TAGS, NOTES]
BINARY_SELECT_MULTIPLES = False
"""
Group functionality used by any DataFrameBuilder i.e. XLS, CSV and KML
"""
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True, binary_select_multiples=False):
self.username = username
self.id_string = id_string
self.filter_query = filter_query
self.group_delimiter = group_delimiter
self.split_select_multiples = split_select_multiples
self.BINARY_SELECT_MULTIPLES = binary_select_multiples
self._setup()
def _setup(self):
self.dd = DataDictionary.objects.get(user__username=self.username,
id_string=self.id_string)
self.select_multiples = self._collect_select_multiples(self.dd)
self.gps_fields = self._collect_gps_fields(self.dd)
@classmethod
def _fields_to_select(cls, dd):
return [c.get_abbreviated_xpath()
for c in dd.get_survey_elements() if isinstance(c, Question)]
@classmethod
def _collect_select_multiples(cls, dd):
return dict([(e.get_abbreviated_xpath(), [c.get_abbreviated_xpath()
for c in e.children])
for e in dd.get_survey_elements()
if e.bind.get("type") == "select"])
@classmethod
def _split_select_multiples(cls, record, select_multiples,
binary_select_multiples=False):
""" Prefix contains the xpath and slash if we are within a repeat so
that we can figure out which select multiples belong to which repeat
"""
for key, choices in select_multiples.items():
# the select multiple might be blank or not exist in the record,
# need to make those False
selections = []
if key in record:
# split selected choices by spaces and join by / to the
# element's xpath
selections = ["%s/%s" % (key, r)
for r in record[key].split(" ")]
# remove the column since we are adding separate columns
# for each choice
record.pop(key)
if not binary_select_multiples:
# add columns to record for every choice, with default
# False and set to True for items in selections
record.update(dict([(choice, choice in selections)
for choice in choices]))
else:
YES = 1
NO = 0
record.update(
dict([(choice, YES if choice in selections else NO)
for choice in choices]))
# recurs into repeats
for record_key, record_item in record.items():
if type(record_item) == list:
for list_item in record_item:
if type(list_item) == dict:
cls._split_select_multiples(
list_item, select_multiples)
return record
@classmethod
def _collect_gps_fields(cls, dd):
return [e.get_abbreviated_xpath() for e in dd.get_survey_elements()
if e.bind.get("type") == "geopoint"]
@classmethod
def _tag_edit_string(cls, record):
"""
Turns a list of tags into a string representation.
"""
if '_tags' in record:
tags = []
for tag in record['_tags']:
if ',' in tag and ' ' in tag:
tags.append('"%s"' % tag)
else:
tags.append(tag)
record.update({'_tags': u', '.join(sorted(tags))})
@classmethod
def _split_gps_fields(cls, record, gps_fields):
updated_gps_fields = {}
for key, value in record.iteritems():
if key in gps_fields and isinstance(value, basestring):
gps_xpaths = DataDictionary.get_additional_geopoint_xpaths(key)
gps_parts = dict([(xpath, None) for xpath in gps_xpaths])
# hack, check if its a list and grab the object within that
parts = value.split(' ')
# TODO: check whether or not we can have a gps recording
# from ODKCollect that has less than four components,
# for now we are assuming that this is not the case.
if len(parts) == 4:
gps_parts = dict(zip(gps_xpaths, parts))
updated_gps_fields.update(gps_parts)
# check for repeats within record i.e. in value
elif type(value) == list:
for list_item in value:
if type(list_item) == dict:
cls._split_gps_fields(list_item, gps_fields)
record.update(updated_gps_fields)
def _query_mongo(self, query='{}', start=0,
limit=ParsedInstance.DEFAULT_LIMIT,
fields='[]', count=False):
# ParsedInstance.query_mongo takes params as json strings
# so we dumps the fields dictionary
count_args = {
'username': self.username,
'id_string': self.id_string,
'query': query,
'fields': '[]',
'sort': '{}',
'count': True
}
count_object = ParsedInstance.query_mongo(**count_args)
record_count = count_object[0]["count"]
if record_count == 0:
raise NoRecordsFoundError("No records found for your query")
# if count was requested, return the count
if count:
return record_count
else:
query_args = {
'username': self.username,
'id_string': self.id_string,
'query': query,
'fields': fields,
# TODO: we might want to add this in for the user
# to sepcify a sort order
'sort': '{}',
'start': start,
'limit': limit,
'count': False
}
# use ParsedInstance.query_mongo
cursor = ParsedInstance.query_mongo(**query_args)
return cursor
class XLSDataFrameBuilder(AbstractDataFrameBuilder):
"""
Generate structures from mongo and DataDictionary for a DataFrameXLSWriter
This builder can choose to query the data in batches and write to a single
ExcelWriter object using multiple instances of DataFrameXLSWriter
"""
INDEX_COLUMN = u"_index"
PARENT_TABLE_NAME_COLUMN = u"_parent_table_name"
PARENT_INDEX_COLUMN = u"_parent_index"
EXTRA_COLUMNS = [INDEX_COLUMN, PARENT_TABLE_NAME_COLUMN,
PARENT_INDEX_COLUMN]
SHEET_NAME_MAX_CHARS = 30
XLS_SHEET_COUNT_LIMIT = 255
XLS_COLUMN_COUNT_MAX = 255
CURRENT_INDEX_META = 'current_index'
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True, binary_select_multiples=False):
super(XLSDataFrameBuilder, self).__init__(
username, id_string, filter_query, group_delimiter,
split_select_multiples, binary_select_multiples)
def _setup(self):
super(XLSDataFrameBuilder, self)._setup()
# need to split columns, with repeats in individual sheets and
# everything else on the default sheet
self._generate_sections()
def export_to(self, file_path, batchsize=1000):
self.xls_writer = ExcelWriter(file_path)
# get record count
record_count = self._query_mongo(count=True)
# query in batches and for each batch create an XLSDataFrameWriter and
# write to existing xls_writer object
start = 0
header = True
while start < record_count:
cursor = self._query_mongo(self.filter_query, start=start,
limit=batchsize)
data = self._format_for_dataframe(cursor)
# write all cursor's data to their respective sheets
for section_name, section in self.sections.iteritems():
records = data[section_name]
# TODO: currently ignoring nested repeats
# so ignore sections that have 0 records
if len(records) > 0:
# use a different group delimiter if needed
columns = section["columns"]
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
columns = [self.group_delimiter.join(col.split("/"))
for col in columns]
columns = columns + self.EXTRA_COLUMNS
writer = XLSDataFrameWriter(records, columns)
writer.write_to_excel(self.xls_writer, section_name,
header=header, index=False)
header = False
# increment counter(s)
start += batchsize
time.sleep(0.1)
self.xls_writer.save()
def _format_for_dataframe(self, cursor):
"""
Format each record for consumption by a dataframe
returns a dictionary with the key being the name of the sheet,
and values a list of dicts to feed into a DataFrame
"""
data = dict((section_name, [])
for section_name in self.sections.keys())
main_section = self.sections[self.survey_name]
main_sections_columns = main_section["columns"]
for record in cursor:
# from record, we'll end up with multiple records, one for each
# section we have
# add records for the default section
self._add_data_for_section(data[self.survey_name],
record, main_sections_columns,
self.survey_name)
parent_index = main_section[self.CURRENT_INDEX_META]
for sheet_name, section in self.sections.iteritems():
# skip default section i.e survey name
if sheet_name != self.survey_name:
xpath = section["xpath"]
columns = section["columns"]
# TODO: handle nested repeats -ignoring nested repeats for
# now which will not be in the top level record, perhaps
# nest sections as well so we can recurs in and get them
if xpath in record:
repeat_records = record[xpath]
# num_repeat_records = len(repeat_records)
for repeat_record in repeat_records:
self._add_data_for_section(
data[sheet_name],
repeat_record, columns, sheet_name,
parent_index, self.survey_name)
return data
def _add_data_for_section(self, data_section, record, columns,
section_name, parent_index=-1,
parent_table_name=None):
data_section.append({})
self.sections[section_name][self.CURRENT_INDEX_META] += 1
index = self.sections[section_name][self.CURRENT_INDEX_META]
# data_section[len(data_section)-1].update(record) # we could simply do
# this but end up with duplicate data from repeats
if self.split_select_multiples:
# find any select multiple(s) and add additional columns to record
record = self._split_select_multiples(
record, self.select_multiples)
# alt, precision
self._split_gps_fields(record, self.gps_fields)
for column in columns:
data_value = None
try:
data_value = record[column]
except KeyError:
# a record may not have responses for some elements simply
# because they were not captured
pass
data_section[
len(data_section) - 1].update({
self.group_delimiter.join(column.split('/'))
if self.group_delimiter != DEFAULT_GROUP_DELIMITER
else column: data_value})
data_section[len(data_section) - 1].update({
XLSDataFrameBuilder.INDEX_COLUMN: index,
XLSDataFrameBuilder.PARENT_INDEX_COLUMN: parent_index,
XLSDataFrameBuilder.PARENT_TABLE_NAME_COLUMN: parent_table_name})
# add ADDITIONAL_COLUMNS
data_section[len(data_section) - 1].update(
dict([(column, record[column] if column in record else None)
for column in self.ADDITIONAL_COLUMNS]))
def _generate_sections(self):
"""
Split survey questions into separate sections for each xls sheet and
columns for each section
"""
# clear list
self.sections = OrderedDict()
# dict of select multiple elements
self.select_multiples = {}
survey_element = self.dd.survey
self.survey_name = get_valid_sheet_name(
survey_element.name, self.sections.keys())
self._create_section(
self.survey_name, survey_element.get_abbreviated_xpath(), False)
# build sections
self._build_sections_recursive(self.survey_name, self.dd.get_survey())
for section_name in self.sections:
self.sections[section_name]['columns'] += self.ADDITIONAL_COLUMNS
self.get_exceeds_xls_limits()
def _build_sections_recursive(self, section_name, element,
is_repeating=False):
"""Builds a section's children and recurses any repeating sections
to build those as a separate section
"""
for child in element.children:
# if a section, recurse
if isinstance(child, Section):
new_is_repeating = isinstance(child, RepeatingSection)
new_section_name = section_name
# if its repeating, build a new section
if new_is_repeating:
new_section_name = get_valid_sheet_name(
child.name, self.sections.keys())
self._create_section(
new_section_name, child.get_abbreviated_xpath(), True)
self._build_sections_recursive(
new_section_name, child, new_is_repeating)
else:
# add to survey_sections
child_bind_type = child.bind.get(u"type")
if isinstance(child, Question) and not \
question_types_to_exclude(child.type)\
and not child_bind_type == MULTIPLE_SELECT_BIND_TYPE:
self._add_column_to_section(section_name, child)
elif child_bind_type == MULTIPLE_SELECT_BIND_TYPE:
self.select_multiples[child.get_abbreviated_xpath()] = \
[option.get_abbreviated_xpath()
for option in child.children]
# if select multiple, get its choices and make them
# columns
if self.split_select_multiples:
for option in child.children:
self._add_column_to_section(section_name, option)
else:
self._add_column_to_section(section_name, child)
# split gps fields within this section
if child_bind_type == GEOPOINT_BIND_TYPE:
# add columns for geopoint components
for xpath in self.dd.get_additional_geopoint_xpaths(
child.get_abbreviated_xpath()):
self._add_column_to_section(section_name, xpath)
def get_exceeds_xls_limits(self):
if not hasattr(self, "exceeds_xls_limits"):
self.exceeds_xls_limits = False
if len(self.sections) > self.XLS_SHEET_COUNT_LIMIT:
self.exceeds_xls_limits = True
else:
for section in self.sections.itervalues():
if len(section["columns"]) > self.XLS_COLUMN_COUNT_MAX:
self.exceeds_xls_limits = True
break
return self.exceeds_xls_limits
def _create_section(self, section_name, xpath, is_repeat):
# index = len(self.sections)
self.sections[section_name] = {
"name": section_name, "xpath": xpath, "columns": [],
"is_repeat": is_repeat, self.CURRENT_INDEX_META: 0}
def _add_column_to_section(self, sheet_name, column):
section = self.sections[sheet_name]
xpath = None
if isinstance(column, SurveyElement):
xpath = column.get_abbreviated_xpath()
elif isinstance(column, basestring):
xpath = column
assert(xpath)
# make sure column is not already in list
if xpath not in section["columns"]:
section["columns"].append(xpath)
class CSVDataFrameBuilder(AbstractDataFrameBuilder):
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True, binary_select_multiples=False):
super(CSVDataFrameBuilder, self).__init__(
username, id_string, filter_query, group_delimiter,
split_select_multiples, binary_select_multiples)
self.ordered_columns = OrderedDict()
def _setup(self):
super(CSVDataFrameBuilder, self)._setup()
@classmethod
def _reindex(cls, key, value, ordered_columns, parent_prefix=None):
"""
Flatten list columns by appending an index, otherwise return as is
"""
d = {}
# check for lists
if type(value) is list and len(value) > 0 \
and key != NOTES and key != ATTACHMENTS:
for index, item in enumerate(value):
# start at 1
index += 1
# for each list check for dict, we want to transform the key of
# this dict
if type(item) is dict:
for nested_key, nested_val in item.iteritems():
# given the key "children/details" and nested_key/
# abbreviated xpath
# "children/details/immunization/polio_1",
# generate ["children", index, "immunization/polio_1"]
xpaths = [
"%s[%s]" % (
nested_key[:nested_key.index(key) + len(key)],
index),
nested_key[nested_key.index(key) + len(key) + 1:]]
# re-create xpath the split on /
xpaths = "/".join(xpaths).split("/")
new_prefix = xpaths[:-1]
if type(nested_val) is list:
# if nested_value is a list, rinse and repeat
d.update(cls._reindex(
nested_key, nested_val,
ordered_columns, new_prefix))
else:
# it can only be a scalar
# collapse xpath
if parent_prefix:
xpaths[0:len(parent_prefix)] = parent_prefix
new_xpath = u"/".join(xpaths)
# check if this key exists in our ordered columns
if key in ordered_columns.keys():
if new_xpath not in ordered_columns[key]:
ordered_columns[key].append(new_xpath)
d[new_xpath] = nested_val
else:
d[key] = value
else:
# anything that's not a list will be in the top level dict so its
# safe to simply assign
if key == NOTES:
d[key] = u"\r\n".join(value)
elif key == ATTACHMENTS:
d[key] = []
else:
d[key] = value
return d
@classmethod
def _build_ordered_columns(cls, survey_element, ordered_columns,
is_repeating_section=False):
"""
Build a flat ordered dict of column groups
is_repeating_section ensures that child questions of repeating sections
are not considered columns
"""
for child in survey_element.children:
# child_xpath = child.get_abbreviated_xpath()
if isinstance(child, Section):
child_is_repeating = False
if isinstance(child, RepeatingSection):
ordered_columns[child.get_abbreviated_xpath()] = []
child_is_repeating = True
cls._build_ordered_columns(child, ordered_columns,
child_is_repeating)
elif isinstance(child, Question) and not \
question_types_to_exclude(child.type) and not\
is_repeating_section: # if is_repeating_section,
# its parent already initiliased an empty list
# so we dont add it to our list of columns,
# the repeating columns list will be
# generated when we reindex
ordered_columns[child.get_abbreviated_xpath()] = None
def _format_for_dataframe(self, cursor):
# TODO: check for and handle empty results
# add ordered columns for select multiples
if self.split_select_multiples:
for key, choices in self.select_multiples.items():
# HACK to ensure choices are NOT duplicated
self.ordered_columns[key] = \
remove_dups_from_list_maintain_order(choices)
# add ordered columns for gps fields
for key in self.gps_fields:
gps_xpaths = self.dd.get_additional_geopoint_xpaths(key)
self.ordered_columns[key] = [key] + gps_xpaths
data = []
for record in cursor:
# split select multiples
if self.split_select_multiples:
record = self._split_select_multiples(
record, self.select_multiples,
self.BINARY_SELECT_MULTIPLES)
# check for gps and split into components i.e. latitude, longitude,
# altitude, precision
self._split_gps_fields(record, self.gps_fields)
self._tag_edit_string(record)
flat_dict = {}
# re index repeats
for key, value in record.iteritems():
reindexed = self._reindex(key, value, self.ordered_columns)
flat_dict.update(reindexed)
# if delimetr is diferent, replace within record as well
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
flat_dict = dict((self.group_delimiter.join(k.split('/')), v)
for k, v in flat_dict.iteritems())
data.append(flat_dict)
return data
def export_to(self, file_or_path, data_frame_max_size=30000):
from math import ceil
# get record count
record_count = self._query_mongo(query=self.filter_query, count=True)
self.ordered_columns = OrderedDict()
self._build_ordered_columns(self.dd.survey, self.ordered_columns)
# pandas will only export 30k records in a dataframe to a csv
# - we need to create multiple 30k dataframes if required,
# we need to go through all the records though so that
# we can figure out the columns we need for repeats
datas = []
num_data_frames = \
int(ceil(float(record_count) / float(data_frame_max_size)))
for i in range(num_data_frames):
cursor = self._query_mongo(
self.filter_query, start=(i * data_frame_max_size),
limit=data_frame_max_size)
data = self._format_for_dataframe(cursor)
datas.append(data)
columns = list(chain.from_iterable(
[[xpath] if cols is None else cols
for xpath, cols in self.ordered_columns.iteritems()]))
# use a different group delimiter if needed
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
columns = [self.group_delimiter.join(col.split("/"))
for col in columns]
# add extra columns
columns += [col for col in self.ADDITIONAL_COLUMNS]
header = True
if hasattr(file_or_path, 'read'):
csv_file = file_or_path
close = False
else:
csv_file = open(file_or_path, "wb")
close = True
for data in datas:
writer = CSVDataFrameWriter(data, columns)
writer.write_to_csv(csv_file, header=header)
header = False
if close:
csv_file.close()
class XLSDataFrameWriter(object):
def __init__(self, records, columns):
self.dataframe = DataFrame(records, columns=columns)
def write_to_excel(self, excel_writer, sheet_name, header=False,
index=False):
self.dataframe.to_excel(excel_writer, sheet_name, header=header,
index=index)
class CSVDataFrameWriter(object):
def __init__(self, records, columns):
# TODO: if records is empty, raise a known exception
# catch it in the view and handle
assert(len(records) > 0)
self.dataframe = DataFrame(records, columns=columns)
# remove columns we don't want
for col in AbstractDataFrameBuilder.IGNORED_COLUMNS:
if col in self.dataframe.columns:
del(self.dataframe[col])
def write_to_csv(self, csv_file, header=True, index=False):
na_rep = getattr(settings, 'NA_REP', NA_REP)
self.dataframe.to_csv(csv_file, header=header, index=index,
na_rep=na_rep, encoding='utf-8')
|
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class MaintenanceWindow(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_epoch_millis': 'int',
'creator_id': 'str',
'customer_id': 'str',
'end_time_in_seconds': 'int',
'event_name': 'str',
'host_tag_group_host_names_group_anded': 'bool',
'id': 'str',
'point_tag_filter': 'str',
'point_tag_filter_anded': 'bool',
'reason': 'str',
'relevant_customer_tags': 'list[str]',
'relevant_customer_tags_anded': 'bool',
'relevant_host_names': 'list[str]',
'relevant_host_tags': 'list[str]',
'relevant_host_tags_anded': 'bool',
'running_state': 'str',
'sort_attr': 'int',
'start_time_in_seconds': 'int',
'targets': 'list[str]',
'title': 'str',
'updated_epoch_millis': 'int',
'updater_id': 'str'
}
attribute_map = {
'created_epoch_millis': 'createdEpochMillis',
'creator_id': 'creatorId',
'customer_id': 'customerId',
'end_time_in_seconds': 'endTimeInSeconds',
'event_name': 'eventName',
'host_tag_group_host_names_group_anded': 'hostTagGroupHostNamesGroupAnded',
'id': 'id',
'point_tag_filter': 'pointTagFilter',
'point_tag_filter_anded': 'pointTagFilterAnded',
'reason': 'reason',
'relevant_customer_tags': 'relevantCustomerTags',
'relevant_customer_tags_anded': 'relevantCustomerTagsAnded',
'relevant_host_names': 'relevantHostNames',
'relevant_host_tags': 'relevantHostTags',
'relevant_host_tags_anded': 'relevantHostTagsAnded',
'running_state': 'runningState',
'sort_attr': 'sortAttr',
'start_time_in_seconds': 'startTimeInSeconds',
'targets': 'targets',
'title': 'title',
'updated_epoch_millis': 'updatedEpochMillis',
'updater_id': 'updaterId'
}
def __init__(self, created_epoch_millis=None, creator_id=None, customer_id=None, end_time_in_seconds=None, event_name=None, host_tag_group_host_names_group_anded=None, id=None, point_tag_filter=None, point_tag_filter_anded=None, reason=None, relevant_customer_tags=None, relevant_customer_tags_anded=None, relevant_host_names=None, relevant_host_tags=None, relevant_host_tags_anded=None, running_state=None, sort_attr=None, start_time_in_seconds=None, targets=None, title=None, updated_epoch_millis=None, updater_id=None, _configuration=None): # noqa: E501
"""MaintenanceWindow - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._created_epoch_millis = None
self._creator_id = None
self._customer_id = None
self._end_time_in_seconds = None
self._event_name = None
self._host_tag_group_host_names_group_anded = None
self._id = None
self._point_tag_filter = None
self._point_tag_filter_anded = None
self._reason = None
self._relevant_customer_tags = None
self._relevant_customer_tags_anded = None
self._relevant_host_names = None
self._relevant_host_tags = None
self._relevant_host_tags_anded = None
self._running_state = None
self._sort_attr = None
self._start_time_in_seconds = None
self._targets = None
self._title = None
self._updated_epoch_millis = None
self._updater_id = None
self.discriminator = None
if created_epoch_millis is not None:
self.created_epoch_millis = created_epoch_millis
if creator_id is not None:
self.creator_id = creator_id
if customer_id is not None:
self.customer_id = customer_id
self.end_time_in_seconds = end_time_in_seconds
if event_name is not None:
self.event_name = event_name
if host_tag_group_host_names_group_anded is not None:
self.host_tag_group_host_names_group_anded = host_tag_group_host_names_group_anded
if id is not None:
self.id = id
if point_tag_filter is not None:
self.point_tag_filter = point_tag_filter
if point_tag_filter_anded is not None:
self.point_tag_filter_anded = point_tag_filter_anded
self.reason = reason
self.relevant_customer_tags = relevant_customer_tags
if relevant_customer_tags_anded is not None:
self.relevant_customer_tags_anded = relevant_customer_tags_anded
if relevant_host_names is not None:
self.relevant_host_names = relevant_host_names
if relevant_host_tags is not None:
self.relevant_host_tags = relevant_host_tags
if relevant_host_tags_anded is not None:
self.relevant_host_tags_anded = relevant_host_tags_anded
if running_state is not None:
self.running_state = running_state
if sort_attr is not None:
self.sort_attr = sort_attr
self.start_time_in_seconds = start_time_in_seconds
if targets is not None:
self.targets = targets
self.title = title
if updated_epoch_millis is not None:
self.updated_epoch_millis = updated_epoch_millis
if updater_id is not None:
self.updater_id = updater_id
@property
def created_epoch_millis(self):
"""Gets the created_epoch_millis of this MaintenanceWindow. # noqa: E501
:return: The created_epoch_millis of this MaintenanceWindow. # noqa: E501
:rtype: int
"""
return self._created_epoch_millis
@created_epoch_millis.setter
def created_epoch_millis(self, created_epoch_millis):
"""Sets the created_epoch_millis of this MaintenanceWindow.
:param created_epoch_millis: The created_epoch_millis of this MaintenanceWindow. # noqa: E501
:type: int
"""
self._created_epoch_millis = created_epoch_millis
@property
def creator_id(self):
"""Gets the creator_id of this MaintenanceWindow. # noqa: E501
:return: The creator_id of this MaintenanceWindow. # noqa: E501
:rtype: str
"""
return self._creator_id
@creator_id.setter
def creator_id(self, creator_id):
"""Sets the creator_id of this MaintenanceWindow.
:param creator_id: The creator_id of this MaintenanceWindow. # noqa: E501
:type: str
"""
self._creator_id = creator_id
@property
def customer_id(self):
"""Gets the customer_id of this MaintenanceWindow. # noqa: E501
:return: The customer_id of this MaintenanceWindow. # noqa: E501
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this MaintenanceWindow.
:param customer_id: The customer_id of this MaintenanceWindow. # noqa: E501
:type: str
"""
self._customer_id = customer_id
@property
def end_time_in_seconds(self):
"""Gets the end_time_in_seconds of this MaintenanceWindow. # noqa: E501
The time in epoch seconds when this maintenance window will end # noqa: E501
:return: The end_time_in_seconds of this MaintenanceWindow. # noqa: E501
:rtype: int
"""
return self._end_time_in_seconds
@end_time_in_seconds.setter
def end_time_in_seconds(self, end_time_in_seconds):
"""Sets the end_time_in_seconds of this MaintenanceWindow.
The time in epoch seconds when this maintenance window will end # noqa: E501
:param end_time_in_seconds: The end_time_in_seconds of this MaintenanceWindow. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and end_time_in_seconds is None:
raise ValueError("Invalid value for `end_time_in_seconds`, must not be `None`") # noqa: E501
self._end_time_in_seconds = end_time_in_seconds
@property
def event_name(self):
"""Gets the event_name of this MaintenanceWindow. # noqa: E501
The name of an event associated with the creation/update of this maintenance window # noqa: E501
:return: The event_name of this MaintenanceWindow. # noqa: E501
:rtype: str
"""
return self._event_name
@event_name.setter
def event_name(self, event_name):
"""Sets the event_name of this MaintenanceWindow.
The name of an event associated with the creation/update of this maintenance window # noqa: E501
:param event_name: The event_name of this MaintenanceWindow. # noqa: E501
:type: str
"""
self._event_name = event_name
@property
def host_tag_group_host_names_group_anded(self):
"""Gets the host_tag_group_host_names_group_anded of this MaintenanceWindow. # noqa: E501
If true, a source/host must be in 'relevantHostNames' and have tags matching the specification formed by 'relevantHostTags' and 'relevantHostTagsAnded' in order for this maintenance window to apply. If false, a source/host must either be in 'relevantHostNames' or match 'relevantHostTags' and 'relevantHostTagsAnded'. Default: false # noqa: E501
:return: The host_tag_group_host_names_group_anded of this MaintenanceWindow. # noqa: E501
:rtype: bool
"""
return self._host_tag_group_host_names_group_anded
@host_tag_group_host_names_group_anded.setter
def host_tag_group_host_names_group_anded(self, host_tag_group_host_names_group_anded):
"""Sets the host_tag_group_host_names_group_anded of this MaintenanceWindow.
If true, a source/host must be in 'relevantHostNames' and have tags matching the specification formed by 'relevantHostTags' and 'relevantHostTagsAnded' in order for this maintenance window to apply. If false, a source/host must either be in 'relevantHostNames' or match 'relevantHostTags' and 'relevantHostTagsAnded'. Default: false # noqa: E501
:param host_tag_group_host_names_group_anded: The host_tag_group_host_names_group_anded of this MaintenanceWindow. # noqa: E501
:type: bool
"""
self._host_tag_group_host_names_group_anded = host_tag_group_host_names_group_anded
@property
def id(self):
"""Gets the id of this MaintenanceWindow. # noqa: E501
:return: The id of this MaintenanceWindow. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MaintenanceWindow.
:param id: The id of this MaintenanceWindow. # noqa: E501
:type: str
"""
self._id = id
@property
def point_tag_filter(self):
"""Gets the point_tag_filter of this MaintenanceWindow. # noqa: E501
Query that filters on point tags of timeseries scanned by alert. # noqa: E501
:return: The point_tag_filter of this MaintenanceWindow. # noqa: E501
:rtype: str
"""
return self._point_tag_filter
@point_tag_filter.setter
def point_tag_filter(self, point_tag_filter):
"""Sets the point_tag_filter of this MaintenanceWindow.
Query that filters on point tags of timeseries scanned by alert. # noqa: E501
:param point_tag_filter: The point_tag_filter of this MaintenanceWindow. # noqa: E501
:type: str
"""
self._point_tag_filter = point_tag_filter
@property
def point_tag_filter_anded(self):
"""Gets the point_tag_filter_anded of this MaintenanceWindow. # noqa: E501
Whether to AND point tags filter listed in pointTagFilter. If true, a timeseries must contain the point tags along with other filters in order for the maintenance window to apply.If false, the tags are OR'ed, the customer must contain one of the tags. Default: false # noqa: E501
:return: The point_tag_filter_anded of this MaintenanceWindow. # noqa: E501
:rtype: bool
"""
return self._point_tag_filter_anded
@point_tag_filter_anded.setter
def point_tag_filter_anded(self, point_tag_filter_anded):
"""Sets the point_tag_filter_anded of this MaintenanceWindow.
Whether to AND point tags filter listed in pointTagFilter. If true, a timeseries must contain the point tags along with other filters in order for the maintenance window to apply.If false, the tags are OR'ed, the customer must contain one of the tags. Default: false # noqa: E501
:param point_tag_filter_anded: The point_tag_filter_anded of this MaintenanceWindow. # noqa: E501
:type: bool
"""
self._point_tag_filter_anded = point_tag_filter_anded
@property
def reason(self):
"""Gets the reason of this MaintenanceWindow. # noqa: E501
The purpose of this maintenance window # noqa: E501
:return: The reason of this MaintenanceWindow. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this MaintenanceWindow.
The purpose of this maintenance window # noqa: E501
:param reason: The reason of this MaintenanceWindow. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and reason is None:
raise ValueError("Invalid value for `reason`, must not be `None`") # noqa: E501
self._reason = reason
@property
def relevant_customer_tags(self):
"""Gets the relevant_customer_tags of this MaintenanceWindow. # noqa: E501
List of alert tags whose matching alerts will be put into maintenance because of this maintenance window # noqa: E501
:return: The relevant_customer_tags of this MaintenanceWindow. # noqa: E501
:rtype: list[str]
"""
return self._relevant_customer_tags
@relevant_customer_tags.setter
def relevant_customer_tags(self, relevant_customer_tags):
"""Sets the relevant_customer_tags of this MaintenanceWindow.
List of alert tags whose matching alerts will be put into maintenance because of this maintenance window # noqa: E501
:param relevant_customer_tags: The relevant_customer_tags of this MaintenanceWindow. # noqa: E501
:type: list[str]
"""
if self._configuration.client_side_validation and relevant_customer_tags is None:
raise ValueError("Invalid value for `relevant_customer_tags`, must not be `None`") # noqa: E501
self._relevant_customer_tags = relevant_customer_tags
@property
def relevant_customer_tags_anded(self):
"""Gets the relevant_customer_tags_anded of this MaintenanceWindow. # noqa: E501
Whether to AND customer tags listed in relevantCustomerTags. If true, a customer must contain all tags in order for the maintenance window to apply. If false, the tags are OR'ed, and a customer must contain one of the tags. Default: false # noqa: E501
:return: The relevant_customer_tags_anded of this MaintenanceWindow. # noqa: E501
:rtype: bool
"""
return self._relevant_customer_tags_anded
@relevant_customer_tags_anded.setter
def relevant_customer_tags_anded(self, relevant_customer_tags_anded):
"""Sets the relevant_customer_tags_anded of this MaintenanceWindow.
Whether to AND customer tags listed in relevantCustomerTags. If true, a customer must contain all tags in order for the maintenance window to apply. If false, the tags are OR'ed, and a customer must contain one of the tags. Default: false # noqa: E501
:param relevant_customer_tags_anded: The relevant_customer_tags_anded of this MaintenanceWindow. # noqa: E501
:type: bool
"""
self._relevant_customer_tags_anded = relevant_customer_tags_anded
@property
def relevant_host_names(self):
"""Gets the relevant_host_names of this MaintenanceWindow. # noqa: E501
List of source/host names that will be put into maintenance because of this maintenance window # noqa: E501
:return: The relevant_host_names of this MaintenanceWindow. # noqa: E501
:rtype: list[str]
"""
return self._relevant_host_names
@relevant_host_names.setter
def relevant_host_names(self, relevant_host_names):
"""Sets the relevant_host_names of this MaintenanceWindow.
List of source/host names that will be put into maintenance because of this maintenance window # noqa: E501
:param relevant_host_names: The relevant_host_names of this MaintenanceWindow. # noqa: E501
:type: list[str]
"""
self._relevant_host_names = relevant_host_names
@property
def relevant_host_tags(self):
"""Gets the relevant_host_tags of this MaintenanceWindow. # noqa: E501
List of source/host tags whose matching sources/hosts will be put into maintenance because of this maintenance window # noqa: E501
:return: The relevant_host_tags of this MaintenanceWindow. # noqa: E501
:rtype: list[str]
"""
return self._relevant_host_tags
@relevant_host_tags.setter
def relevant_host_tags(self, relevant_host_tags):
"""Sets the relevant_host_tags of this MaintenanceWindow.
List of source/host tags whose matching sources/hosts will be put into maintenance because of this maintenance window # noqa: E501
:param relevant_host_tags: The relevant_host_tags of this MaintenanceWindow. # noqa: E501
:type: list[str]
"""
self._relevant_host_tags = relevant_host_tags
@property
def relevant_host_tags_anded(self):
"""Gets the relevant_host_tags_anded of this MaintenanceWindow. # noqa: E501
Whether to AND source/host tags listed in relevantHostTags. If true, a source/host must contain all tags in order for the maintenance window to apply. If false, the tags are OR'ed, and a source/host must contain one of the tags. Default: false # noqa: E501
:return: The relevant_host_tags_anded of this MaintenanceWindow. # noqa: E501
:rtype: bool
"""
return self._relevant_host_tags_anded
@relevant_host_tags_anded.setter
def relevant_host_tags_anded(self, relevant_host_tags_anded):
"""Sets the relevant_host_tags_anded of this MaintenanceWindow.
Whether to AND source/host tags listed in relevantHostTags. If true, a source/host must contain all tags in order for the maintenance window to apply. If false, the tags are OR'ed, and a source/host must contain one of the tags. Default: false # noqa: E501
:param relevant_host_tags_anded: The relevant_host_tags_anded of this MaintenanceWindow. # noqa: E501
:type: bool
"""
self._relevant_host_tags_anded = relevant_host_tags_anded
@property
def running_state(self):
"""Gets the running_state of this MaintenanceWindow. # noqa: E501
:return: The running_state of this MaintenanceWindow. # noqa: E501
:rtype: str
"""
return self._running_state
@running_state.setter
def running_state(self, running_state):
"""Sets the running_state of this MaintenanceWindow.
:param running_state: The running_state of this MaintenanceWindow. # noqa: E501
:type: str
"""
allowed_values = ["ONGOING", "PENDING", "ENDED"] # noqa: E501
if (self._configuration.client_side_validation and
running_state not in allowed_values):
raise ValueError(
"Invalid value for `running_state` ({0}), must be one of {1}" # noqa: E501
.format(running_state, allowed_values)
)
self._running_state = running_state
@property
def sort_attr(self):
"""Gets the sort_attr of this MaintenanceWindow. # noqa: E501
Numeric value used in default sorting # noqa: E501
:return: The sort_attr of this MaintenanceWindow. # noqa: E501
:rtype: int
"""
return self._sort_attr
@sort_attr.setter
def sort_attr(self, sort_attr):
"""Sets the sort_attr of this MaintenanceWindow.
Numeric value used in default sorting # noqa: E501
:param sort_attr: The sort_attr of this MaintenanceWindow. # noqa: E501
:type: int
"""
self._sort_attr = sort_attr
@property
def start_time_in_seconds(self):
"""Gets the start_time_in_seconds of this MaintenanceWindow. # noqa: E501
The time in epoch seconds when this maintenance window will start # noqa: E501
:return: The start_time_in_seconds of this MaintenanceWindow. # noqa: E501
:rtype: int
"""
return self._start_time_in_seconds
@start_time_in_seconds.setter
def start_time_in_seconds(self, start_time_in_seconds):
"""Sets the start_time_in_seconds of this MaintenanceWindow.
The time in epoch seconds when this maintenance window will start # noqa: E501
:param start_time_in_seconds: The start_time_in_seconds of this MaintenanceWindow. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and start_time_in_seconds is None:
raise ValueError("Invalid value for `start_time_in_seconds`, must not be `None`") # noqa: E501
self._start_time_in_seconds = start_time_in_seconds
@property
def targets(self):
"""Gets the targets of this MaintenanceWindow. # noqa: E501
List of targets to notify, overriding the alert's targets. # noqa: E501
:return: The targets of this MaintenanceWindow. # noqa: E501
:rtype: list[str]
"""
return self._targets
@targets.setter
def targets(self, targets):
"""Sets the targets of this MaintenanceWindow.
List of targets to notify, overriding the alert's targets. # noqa: E501
:param targets: The targets of this MaintenanceWindow. # noqa: E501
:type: list[str]
"""
self._targets = targets
@property
def title(self):
"""Gets the title of this MaintenanceWindow. # noqa: E501
Title of this maintenance window # noqa: E501
:return: The title of this MaintenanceWindow. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this MaintenanceWindow.
Title of this maintenance window # noqa: E501
:param title: The title of this MaintenanceWindow. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
@property
def updated_epoch_millis(self):
"""Gets the updated_epoch_millis of this MaintenanceWindow. # noqa: E501
:return: The updated_epoch_millis of this MaintenanceWindow. # noqa: E501
:rtype: int
"""
return self._updated_epoch_millis
@updated_epoch_millis.setter
def updated_epoch_millis(self, updated_epoch_millis):
"""Sets the updated_epoch_millis of this MaintenanceWindow.
:param updated_epoch_millis: The updated_epoch_millis of this MaintenanceWindow. # noqa: E501
:type: int
"""
self._updated_epoch_millis = updated_epoch_millis
@property
def updater_id(self):
"""Gets the updater_id of this MaintenanceWindow. # noqa: E501
:return: The updater_id of this MaintenanceWindow. # noqa: E501
:rtype: str
"""
return self._updater_id
@updater_id.setter
def updater_id(self, updater_id):
"""Sets the updater_id of this MaintenanceWindow.
:param updater_id: The updater_id of this MaintenanceWindow. # noqa: E501
:type: str
"""
self._updater_id = updater_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MaintenanceWindow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MaintenanceWindow):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MaintenanceWindow):
return True
return self.to_dict() != other.to_dict()
|
|
from sympy import (pi, sin, cos, Symbol, Integral, summation, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import os
import sys
class MockPrint(object):
def write(self, s):
pass
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
raises(ValueError, lambda: plot(x, y))
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
###
# Examples from the 'advanced' notebook
###
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
s = summation(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p = plot(summation(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
|
|
# -*- coding: utf_8 -*-
# author www.chedanji.com
from xml.etree import ElementTree as etree
from xml.etree.ElementTree import Element, SubElement, ElementTree
import subprocess
from xml.dom import minidom
import os
import time
import io
import json
import getopt
import datetime
import urllib2
import re
alfred_workflow_data = os.environ['alfred_workflow_data']
repo_directory = alfred_workflow_data + '/tldr'
default_platform = 'osx'
def query(query):
global default_platform
clone()
dic = parse_args(query)
isUpdate = dic['isUpdate']
default_platform = dic['platform']
command = dic['command']
if bool(isUpdate):
update()
output_title('Update success')
else:
update(7)
if(len(query) == 0):
rowList = [{
'uid': '1',
'arg': '',
'autocomplete': '',
'icon': 'icon.png',
'title': 'Please input',
'valid': 'no'
}]
else:
# output_title(command)
rowList = parse_man_page(command)
if(len(rowList) == 0):
rowList = hint(command, default_platform)
if(len(rowList) == 0):
rowList = [{
'uid': '1',
'arg': '',
'autocomplete': '',
'icon': 'icon.png',
'title': 'Page not found',
'valid': 'no'
}]
print gen_xml(rowList)
def find_page_location(command):
with io.open(os.path.join(alfred_workflow_data, 'index.json'),
encoding='utf-8') as f:
index = json.load(f)
command_list = [item['name'] for item in index['commands']]
if command not in command_list:
return os.path.join(os.path.join(repo_directory, 'pages'),
os.path.join("common", command + '.md'))
supported_platforms = index['commands'][
command_list.index(command)]['platform']
if default_platform in supported_platforms:
platform = default_platform
elif 'common' in supported_platforms:
platform = 'common'
else:
platform = ''
if not platform:
return
page_path = os.path.join(os.path.join(repo_directory, 'pages'),
os.path.join(platform, command + '.md'))
return page_path
def parse_page(page):
with io.open(page, encoding='utf-8') as f:
lines = f.readlines()
if (len(lines) <= 0):
return []
first_line = lines[0]
if (first_line.startswith('#')):
return parse_old_page(lines)
else:
return parse_new_page(lines)
def parse_old_page(lines):
row_list = []
uid = 1
item = {}
description = {}
for line in lines:
if line.startswith('#'):
continue
elif line.startswith('-'):
item = {}
item['uid'] = str(uid)
item['subtitle'] = line.replace('-', '').replace(':', '').strip()
elif line.startswith('`'):
item['title'] = line.replace('`', '').replace('{{', '').replace('}}', '').strip()
row_list.append(item)
uid += 1
return row_list
def parse_new_page(lines):
row_list = []
uid = 1
item = {}
code_pattern = re.compile(r'^( {4,} | \t)')
subtext_pattern = re.compile(r'^\=?$')
for line in lines:
if (len(line.strip() == 0)):
continue
elif (code_pattern.match(line)):
item[title] = line.replace('{{', '').replace('}}', '').strip()
row_list.append(item)
elif (subtext_pattern.match(line.rstrip())):
continue
else:
item = {}
item['uid'] = str(uid)
item[subtitle] = line.strip()
uid += 1
return row_list
def parse_man_page(command):
page_path = find_page_location(command)
if page_path and os.path.exists(page_path):
return parse_page(page_path)
return []
def gen_xml(rowList):
items = Element('items')
for row in rowList:
item = SubElement(items, 'item')
item.set('autocomplete', row.get('autocomplete') or '')
item.set('uid', row.get('uid') or '')
item.set('arg', row.get('title') or '')
item.set('valid', row.get('valid') or '')
title = SubElement(item, 'title')
title.text = row.get('title') or ''
subtitle = SubElement(item, 'subtitle')
subtitle.text = row.get('subtitle') or ''
icon = SubElement(item, 'icon')
icon.text = row.get('icon')
tree = minidom.parseString(etree.tostring(items))
return tree.toxml()
def output_title(msg):
print gen_xml([{
'uid': str(time.time()),
'arg': '',
'autocomplete': '',
'icon': 'icon.png',
'title': str(msg),
'valid': 'no'
}])
def clone():
if(not os.path.exists(alfred_workflow_data)):
os.mkdir(alfred_workflow_data)
if(not os.path.exists(repo_directory)):
child = subprocess.Popen(['git clone https://github.com/tldr-pages/tldr.git ' + '"' + str(repo_directory) + '"'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
success, err = child.communicate()
if child.returncode:
raise Exception(err)
download_index()
def update(days=0):
if days > 0 and os.path.exists(os.path.join(alfred_workflow_data, 'config.json')):
with io.open(os.path.join(alfred_workflow_data, 'config.json'),
encoding='utf-8') as f:
try:
config = json.load(f)
except:
config = {'update_date': datetime.datetime.now().strftime('%Y%m%d')}
if (datetime.datetime.now().date() - datetime.datetime.strptime(config['update_date'], '%Y%m%d').date()).days < days:
return
os.chdir(repo_directory)
local = subprocess.check_output('git rev-parse master'.split()).strip()
remote = subprocess.check_output(
'git ls-remote https://github.com/tldr-pages/tldr/ HEAD'.split()
).split()[0]
if local != remote:
subprocess.check_call('git checkout master'.split())
subprocess.check_call('git pull --rebase'.split())
with io.open(os.path.join(alfred_workflow_data, 'config.json'), mode='wb') as f:
data = {
'update_date': datetime.datetime.now().strftime('%Y%m%d')
}
json.dump(data, f)
download_index()
def parse_args(query=''):
query = query.split()
dic = {
'isUpdate': False,
'platform': default_platform,
'command': ''
}
try:
opts, args = getopt.gnu_getopt(query, 'uo:')
except:
return dic
for opt, arg in opts:
if opt == '-u':
dic['isUpdate'] = True
elif opt == '-o':
dic['platform'] = arg
dic['command'] = '-'.join(args)
return dic
def download_index():
url = 'http://tldr.sh/assets/index.json'
req = urllib2.Request(url, headers={'User-Agent' : "Magic Browser"})
try:
res = urllib2.urlopen(req)
except urllib2.HTTPError,e:
print(e)
return
with io.open(os.path.join(alfred_workflow_data, 'index.json'), mode='wb') as f:
f.write(res.read())
def hint(command, platform=''):
if (len(command) == 0):
return []
with io.open(os.path.join(alfred_workflow_data, 'index.json'),
encoding='utf-8') as f:
index = json.load(f)
result = []
for item in index['commands']:
if (platform in item['platform'] or 'common' in item['platform']) and command == item['name'][0: len(command)]:
if platform == 'osx':
autocomplete = item['name']
elif len(platform) > 0:
autocomplete = item['name'] + ' -o ' + platform
result.append({
'uid': str(time.time()),
'arg': '',
'autocomplete': autocomplete,
'icon': 'icon.png',
'title': item['name'],
'valid': 'no'
})
return result
|
|
##################################################################
# Code for testing the variational Multi-Stage Generative Model. #
##################################################################
# basic python
import numpy as np
import numpy.random as npr
import cPickle
# theano business
import theano
import theano.tensor as T
# phil's sweetness
import utils
from NetLayers import relu_actfun, softplus_actfun, tanh_actfun
from InfNet import InfNet
from HydraNet import HydraNet
from GPSImputerWI import GPSImputerWI, load_gpsimputer_from_file
from load_data import load_udm, load_tfd, load_svhn_gray, load_binarized_mnist
from HelperFuncs import construct_masked_data, shift_and_scale_into_01, \
row_shuffle, to_fX
RESULT_PATH = "IMP_MNIST_GPSI_WI/"
###############################
###############################
## TEST GPS IMPUTER ON MNIST ##
###############################
###############################
def test_mnist(step_type='add',
imp_steps=6,
occ_dim=15,
drop_prob=0.0):
#########################################
# Format the result tag more thoroughly #
#########################################
dp_int = int(100.0 * drop_prob)
result_tag = "{}GPSI_OD{}_DP{}_IS{}_{}_NA".format(RESULT_PATH, occ_dim, dp_int, imp_steps, step_type)
##########################
# Get some training data #
##########################
rng = np.random.RandomState(1234)
Xtr, Xva, Xte = load_binarized_mnist(data_path='./data/')
Xtr = np.vstack((Xtr, Xva))
Xva = Xte
#del Xte
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
##########################
# Get some training data #
##########################
# rng = np.random.RandomState(1234)
# dataset = 'data/mnist.pkl.gz'
# datasets = load_udm(dataset, as_shared=False, zero_mean=False)
# Xtr = datasets[0][0]
# Xva = datasets[1][0]
# Xte = datasets[2][0]
# # Merge validation set and training set, and test on test set.
# #Xtr = np.concatenate((Xtr, Xva), axis=0)
# #Xva = Xte
# Xtr = to_fX(shift_and_scale_into_01(Xtr))
# Xva = to_fX(shift_and_scale_into_01(Xva))
# tr_samples = Xtr.shape[0]
# va_samples = Xva.shape[0]
batch_size = 200
batch_reps = 1
all_pix_mean = np.mean(np.mean(Xtr, axis=1))
data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) )
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
x_dim = Xtr.shape[1]
s_dim = x_dim
h_dim = 50
z_dim = 100
init_scale = 0.6
x_in_sym = T.matrix('x_in_sym')
x_out_sym = T.matrix('x_out_sym')
x_mask_sym = T.matrix('x_mask_sym')
###############
# p_h_given_x #
###############
params = {}
shared_config = [x_dim, 250]
top_config = [shared_config[-1], h_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = tanh_actfun #relu_actfun
params['init_scale'] = 'xg' #init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_h_given_x = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_h_given_x.init_biases(0.0)
################
# p_s0_given_h #
################
params = {}
shared_config = [h_dim, 250]
output_config = [s_dim, s_dim, s_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = tanh_actfun #relu_actfun
params['init_scale'] = 'xg' #init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_s0_given_h = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_s0_given_h.init_biases(0.0)
#################
# p_zi_given_xi #
#################
params = {}
shared_config = [(x_dim + x_dim), 500, 500]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = tanh_actfun #relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_zi_given_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_zi_given_xi.init_biases(0.0)
###################
# p_sip1_given_zi #
###################
params = {}
shared_config = [z_dim, 500, 500]
output_config = [s_dim, s_dim, s_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = tanh_actfun #relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_sip1_given_zi = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_sip1_given_zi.init_biases(0.0)
################
# p_x_given_si #
################
params = {}
shared_config = [s_dim]
output_config = [x_dim, x_dim]
params['shared_config'] = shared_config
params['output_config'] = output_config
params['activation'] = tanh_actfun #relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
p_x_given_si = HydraNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
p_x_given_si.init_biases(0.0)
###############
# q_h_given_x #
###############
params = {}
shared_config = [x_dim, 250]
top_config = [shared_config[-1], h_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = tanh_actfun #relu_actfun
params['init_scale'] = 'xg' #init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
q_h_given_x = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
q_h_given_x.init_biases(0.0)
#################
# q_zi_given_xi #
#################
params = {}
shared_config = [(x_dim + x_dim), 500, 500]
top_config = [shared_config[-1], z_dim]
params['shared_config'] = shared_config
params['mu_config'] = top_config
params['sigma_config'] = top_config
params['activation'] = tanh_actfun #relu_actfun
params['init_scale'] = init_scale
params['vis_drop'] = 0.0
params['hid_drop'] = 0.0
params['bias_noise'] = 0.0
params['input_noise'] = 0.0
params['build_theano_funcs'] = False
q_zi_given_xi = InfNet(rng=rng, Xd=x_in_sym, \
params=params, shared_param_dicts=None)
q_zi_given_xi.init_biases(0.0)
###########################################################
# Define parameters for the GPSImputer, and initialize it #
###########################################################
print("Building the GPSImputer...")
gpsi_params = {}
gpsi_params['x_dim'] = x_dim
gpsi_params['h_dim'] = h_dim
gpsi_params['z_dim'] = z_dim
gpsi_params['s_dim'] = s_dim
# switch between direct construction and construction via p_x_given_si
gpsi_params['use_p_x_given_si'] = False
gpsi_params['imp_steps'] = imp_steps
gpsi_params['step_type'] = step_type
gpsi_params['x_type'] = 'bernoulli'
gpsi_params['obs_transform'] = 'sigmoid'
GPSI = GPSImputerWI(rng=rng,
x_in=x_in_sym, x_out=x_out_sym, x_mask=x_mask_sym, \
p_h_given_x=p_h_given_x, \
p_s0_given_h=p_s0_given_h, \
p_zi_given_xi=p_zi_given_xi, \
p_sip1_given_zi=p_sip1_given_zi, \
p_x_given_si=p_x_given_si, \
q_h_given_x=q_h_given_x, \
q_zi_given_xi=q_zi_given_xi, \
params=gpsi_params, \
shared_param_dicts=None)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
log_name = "{}_RESULTS.txt".format(result_tag)
out_file = open(log_name, 'wb')
costs = [0. for i in range(10)]
learn_rate = 0.0002
momentum = 0.5
batch_idx = np.arange(batch_size) + tr_samples
for i in range(250000):
scale = min(1.0, ((i+1) / 5000.0))
lam_scale = 1.0 - min(1.0, ((i+1) / 100000.0)) # decays from 1.0->0.0
if (((i + 1) % 15000) == 0):
learn_rate = learn_rate * 0.93
if (i > 10000):
momentum = 0.90
else:
momentum = 0.75
# get the indices of training samples for this batch update
batch_idx += batch_size
if (np.max(batch_idx) >= tr_samples):
# we finished an "epoch", so we rejumble the training set
Xtr = row_shuffle(Xtr)
batch_idx = np.arange(batch_size)
# set sgd and objective function hyperparams for this update
GPSI.set_sgd_params(lr=scale*learn_rate, \
mom_1=scale*momentum, mom_2=0.98)
GPSI.set_train_switch(1.0)
GPSI.set_lam_nll(lam_nll=1.0)
GPSI.set_lam_kld(lam_kld_p=0.05, lam_kld_q=0.95, \
lam_kld_g=(0.1 * lam_scale), lam_kld_s=(0.1 * lam_scale))
GPSI.set_lam_l2w(1e-5)
# perform a minibatch update and record the cost for this batch
xb = to_fX( Xtr.take(batch_idx, axis=0) )
xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
result = GPSI.train_joint(xi, xo, xm, batch_reps)
# do diagnostics and general training tracking
costs = [(costs[j] + result[j]) for j in range(len(result)-1)]
if ((i % 250) == 0):
costs = [(v / 250.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " joint_cost: {0:.4f}".format(costs[0])
str3 = " nll_bound : {0:.4f}".format(costs[1])
str4 = " nll_cost : {0:.4f}".format(costs[2])
str5 = " kld_cost : {0:.4f}".format(costs[3])
str6 = " reg_cost : {0:.4f}".format(costs[4])
joint_str = "\n".join([str1, str2, str3, str4, str5, str6])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 1000) == 0):
Xva = row_shuffle(Xva)
# record an estimate of performance on the test set
xi, xo, xm = construct_masked_data(Xva[0:5000], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll, kld = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10)
vfe = np.mean(nll) + np.mean(kld)
str1 = " va_nll_bound : {}".format(vfe)
str2 = " va_nll_term : {}".format(np.mean(nll))
str3 = " va_kld_q2p : {}".format(np.mean(kld))
joint_str = "\n".join([str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
if ((i % 2000) == 0):
GPSI.save_to_file("{}_PARAMS.pkl".format(result_tag))
# Get some validation samples for evaluating model performance
xb = to_fX( Xva[0:100] )
xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
xi = np.repeat(xi, 2, axis=0)
xo = np.repeat(xo, 2, axis=0)
xm = np.repeat(xm, 2, axis=0)
# draw some sample imputations from the model
samp_count = xi.shape[0]
_, model_samps = GPSI.sample_imputer(xi, xo, xm, use_guide_policy=False)
seq_len = len(model_samps)
seq_samps = np.zeros((seq_len*samp_count, model_samps[0].shape[1]))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = model_samps[s2][s1]
idx += 1
file_name = "{0:s}_samples_ng_b{1:d}.png".format(result_tag, i)
utils.visualize_samples(seq_samps, file_name, num_rows=20)
# show KLds and NLLs on a step-by-step basis
xb = to_fX( Xva[0:1000] )
xi, xo, xm = construct_masked_data(xb, drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
step_costs = GPSI.compute_per_step_cost(xi, xo, xm)
step_nlls = step_costs[0]
step_klds = step_costs[1]
step_nums = np.arange(step_nlls.shape[0])
file_name = "{0:s}_NLL_b{1:d}.png".format(result_tag, i)
utils.plot_stem(step_nums, step_nlls, file_name)
file_name = "{0:s}_KLD_b{1:d}.png".format(result_tag, i)
utils.plot_stem(step_nums, step_klds, file_name)
#################################
#################################
## CHECK MNIST IMPUTER RESULTS ##
#################################
#################################
def test_mnist_results(step_type='add',
imp_steps=6,
occ_dim=15,
drop_prob=0.0):
#########################################
# Format the result tag more thoroughly #
#########################################
dp_int = int(100.0 * drop_prob)
result_tag = "{}GPSI_OD{}_DP{}_IS{}_{}_NA".format(RESULT_PATH, occ_dim, dp_int, imp_steps, step_type)
##########################
# Get some training data #
##########################
rng = np.random.RandomState(1234)
Xtr, Xva, Xte = load_binarized_mnist(data_path='./data/')
Xtr = np.vstack((Xtr, Xva))
Xva = Xte
#del Xte
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
##########################
# Get some training data #
##########################
# rng = np.random.RandomState(1234)
# dataset = 'data/mnist.pkl.gz'
# datasets = load_udm(dataset, as_shared=False, zero_mean=False)
# Xtr = datasets[0][0]
# Xva = datasets[1][0]
# Xte = datasets[2][0]
# # Merge validation set and training set, and test on test set.
# #Xtr = np.concatenate((Xtr, Xva), axis=0)
# #Xva = Xte
# Xtr = to_fX(shift_and_scale_into_01(Xtr))
# Xva = to_fX(shift_and_scale_into_01(Xva))
# tr_samples = Xtr.shape[0]
# va_samples = Xva.shape[0]
batch_size = 250
batch_reps = 1
all_pix_mean = np.mean(np.mean(Xtr, axis=1))
data_mean = to_fX( all_pix_mean * np.ones((Xtr.shape[1],)) )
# Load parameters from a previously trained model
print("Testing model load from file...")
GPSI = load_gpsimputer_from_file(f_name="{}_PARAMS.pkl".format(result_tag), \
rng=rng)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
log_name = "{}_FINAL_RESULTS_NEW.txt".format(result_tag)
out_file = open(log_name, 'wb')
Xva = row_shuffle(Xva)
# record an estimate of performance on the test set
str0 = "GUIDED SAMPLE BOUND:"
print(str0)
xi, xo, xm = construct_masked_data(Xva[:5000], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll_0, kld_0 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \
use_guide_policy=True)
xi, xo, xm = construct_masked_data(Xva[5000:], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll_1, kld_1 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \
use_guide_policy=True)
nll = np.concatenate((nll_0, nll_1))
kld = np.concatenate((kld_0, kld_1))
vfe = np.mean(nll) + np.mean(kld)
str1 = " va_nll_bound : {}".format(vfe)
str2 = " va_nll_term : {}".format(np.mean(nll))
str3 = " va_kld_q2p : {}".format(np.mean(kld))
joint_str = "\n".join([str0, str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
# record an estimate of performance on the test set
str0 = "UNGUIDED SAMPLE BOUND:"
print(str0)
xi, xo, xm = construct_masked_data(Xva[:5000], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll_0, kld_0 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \
use_guide_policy=False)
xi, xo, xm = construct_masked_data(Xva[5000:], drop_prob=drop_prob, \
occ_dim=occ_dim, data_mean=data_mean)
nll_1, kld_1 = GPSI.compute_fe_terms(xi, xo, xm, sample_count=10, \
use_guide_policy=False)
nll = np.concatenate((nll_0, nll_1))
kld = np.concatenate((kld_0, kld_1))
str1 = " va_nll_bound : {}".format(np.mean(nll))
str2 = " va_nll_term : {}".format(np.mean(nll))
str3 = " va_kld_q2p : {}".format(np.mean(kld))
joint_str = "\n".join([str0, str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
if __name__=="__main__":
#########
# MNIST #
#########
# TRAINING
#test_mnist(step_type='add', occ_dim=14, drop_prob=0.0)
#test_mnist(step_type='add', occ_dim=16, drop_prob=0.0)
#test_mnist(step_type='add', occ_dim=0, drop_prob=0.6)
#test_mnist(step_type='add', occ_dim=0, drop_prob=0.8)
#test_mnist(step_type='jump', occ_dim=14, drop_prob=0.0)
#test_mnist(step_type='jump', occ_dim=16, drop_prob=0.0)
#test_mnist(step_type='jump', occ_dim=0, drop_prob=0.6)
#test_mnist(step_type='jump', occ_dim=0, drop_prob=0.8)
#test_mnist(step_type='add', imp_steps=1, occ_dim=0, drop_prob=0.9)
#test_mnist(step_type='add', imp_steps=2, occ_dim=0, drop_prob=0.9)
#test_mnist(step_type='add', imp_steps=5, occ_dim=0, drop_prob=0.9)
#test_mnist(step_type='add', imp_steps=10, occ_dim=0, drop_prob=0.9)
#test_mnist(step_type='add', imp_steps=15, occ_dim=0, drop_prob=0.9)
test_mnist(step_type='add', imp_steps=10, occ_dim=0, drop_prob=1.0)
# RESULTS
# test_mnist_results(step_type='add', occ_dim=14, drop_prob=0.0)
# test_mnist_results(step_type='add', occ_dim=16, drop_prob=0.0)
# test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.6)
# test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.7)
# test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.8)
# test_mnist_results(step_type='add', occ_dim=0, drop_prob=0.9)
# test_mnist_results(step_type='jump', occ_dim=14, drop_prob=0.0)
# test_mnist_results(step_type='jump', occ_dim=16, drop_prob=0.0)
# test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.6)
# test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.7)
# test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.8)
# test_mnist_results(step_type='jump', occ_dim=0, drop_prob=0.9)
#test_mnist_results(step_type='add', imp_steps=1, occ_dim=0, drop_prob=0.9)
#test_mnist_results(step_type='add', imp_steps=2, occ_dim=0, drop_prob=0.9)
#test_mnist_results(step_type='add', imp_steps=5, occ_dim=0, drop_prob=0.9)
#test_mnist_results(step_type='add', imp_steps=10, occ_dim=0, drop_prob=0.9)
#test_mnist_results(step_type='add', imp_steps=15, occ_dim=0, drop_prob=0.9)
|
|
"""redirect tests"""
import copy
import os
import re
import sys
import time
import numpy as np
import pytest
import tqdm
import wandb
from wandb.cli import cli
impls = [wandb.wandb_sdk.lib.redirect.StreamWrapper]
console_modes = ["wrap"]
if os.name != "nt":
impls.append(wandb.wandb_sdk.lib.redirect.Redirect)
console_modes.append("redirect")
class CapList(list):
def append(self, x):
if not x:
return
lines = re.split(b"\r\n|\n", x)
if len(lines) > 1:
[self.append(l) for l in lines]
return
if x.startswith(b"\r"):
if self:
self.pop()
x = x[1:]
for sep in [b"\r\n", b"\n"]:
if x.endswith(sep):
x = x[: -len(sep)]
super(CapList, self).append(x)
@pytest.mark.parametrize("cls", impls)
def test_basic(cls, capfd):
with capfd.disabled():
out = CapList()
redir = cls("stdout", cbs=[out.append])
redir.install()
print("Test")
redir.uninstall()
assert out == [b"Test"]
@pytest.mark.parametrize("cls", impls)
def test_reinstall(cls, capfd):
with capfd.disabled():
o1, o2 = CapList(), CapList()
r1 = cls("stdout", cbs=[o1.append])
r2 = cls("stdout", cbs=[o2.append])
r1.install()
print("ABCD")
r2.install()
print("WXYZ")
r1.install()
print("1234")
r2.install()
print("5678")
r2.uninstall()
assert o1 == [b"ABCD", b"1234"]
assert o2 == [b"WXYZ", b"5678"]
@pytest.mark.parametrize("cls", impls)
def test_tqdm_progbar(cls, capfd):
with capfd.disabled():
o = CapList()
r = cls("stderr", cbs=[o.append])
r.install()
for i in tqdm.tqdm(range(10)):
time.sleep(0.1)
r.uninstall()
assert len(o) == 1 and o[0].startswith(b"100%")
@pytest.mark.parametrize("cls", impls)
def test_formatting(cls, capfd):
with capfd.disabled():
o = CapList()
r = cls("stdout", cbs=[o.append])
r.install()
print("\x1b[31m\x1b[40m\x1b[1mHello\x01\x1b[22m\x1b[39m")
r.uninstall()
assert o == [b"\x1b[31m\x1b[40m\x1b[1mHello"]
@pytest.mark.parametrize("cls", impls)
def test_cursor(cls, capfd):
with capfd.disabled():
o = CapList()
r = cls("stdout", cbs=[o.append])
r.install()
s = "ABCD\nEFGH\nIJKX\nMNOP"
s += "\x1b[1A"
s += "\x1b[1D"
s += "L"
s += "\x1b[1B"
s += "\r"
s += "\x1b[K"
s += "QRSD"
s += "\x1b[1D"
s += "\x1b[1C"
s += "\x1b[1D"
s += "T"
s += "\x1b[4A"
s += "\x1b[1K"
s += "\r"
s += "1234"
s += "\x1b[4B"
s += "\r"
s += "WXYZ"
s += "\x1b[2K"
print(s)
r.uninstall()
assert o == [b"1234", b"EFGH", b"IJKL", b"QRST"]
@pytest.mark.parametrize("cls", impls)
def test_erase_screen(cls, capfd):
with capfd.disabled():
o = CapList()
r = cls("stdout", cbs=[o.append])
r.install()
s = "QWERT\nYUIOP\n12345"
s += "\r"
s += "\x1b[J"
s += "\x1b[A"
s += "\r"
s += "\x1b[1J"
print(s)
r.uninstall()
assert o == [b" UIOP"]
o = CapList()
r = cls("stdout", cbs=[o.append])
r.install()
print("QWERT\nYUIOP\n12345")
print("\x1b[2J")
r.uninstall()
assert o == []
@pytest.mark.parametrize("cls", impls)
def test_interactive(cls, capfd):
with capfd.disabled():
r = cls("stdout", [lambda _: None])
r.install()
# TODO
r.uninstall()
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Tensorflow not available.")
@pytest.mark.skipif(
not sys.stdout.isatty(), reason="Keras won't show progressbar on non tty terminal."
)
@pytest.mark.parametrize("cls", impls)
def test_keras_progbar(cls, capfd):
import tensorflow as tf
with capfd.disabled():
o = CapList()
r = cls("stdout", [o.append])
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(10, input_dim=10))
model.compile(loss="mse", optimizer="sgd")
r.install()
epochs = 5
model.fit(np.zeros((10000, 10)), np.ones((10000, 10)), epochs=epochs)
r.uninstall()
assert len(o) in (epochs * 2, epochs * 2 + 1) # Allow 1 offs
@pytest.mark.parametrize("cls", impls)
def test_numpy(cls, capfd):
with capfd.disabled():
r = cls("stdout", [lambda _: None])
r.install()
print(np.random.randint(64, size=(40, 40, 40, 40)))
r.uninstall()
@pytest.mark.skipif(
sys.version_info >= (3, 9) or sys.version_info < (3, 5),
reason="Torch not available.",
)
@pytest.mark.parametrize("cls", impls)
@pytest.mark.timeout(5)
def test_print_torch_model(cls, capfd):
# https://github.com/wandb/client/issues/2097
import torch
with capfd.disabled():
r = cls("stdout", [lambda _: None])
model = torch.nn.ModuleList(
torch.nn.Conv2d(1, 1, 1, bias=False) for _ in range(1000)
)
start = time.time()
print(model)
end = time.time()
t1 = end - start
r.install()
start = time.time()
print(model)
end = time.time()
t2 = end - start
overhead = t2 - t1
assert overhead < 0.2
r.uninstall()
@pytest.mark.parametrize("console", console_modes)
def test_run_with_console_redirect(test_settings, capfd, console):
with capfd.disabled():
local_settings = copy.copy(test_settings)
local_settings.update(
console=console, source=wandb.sdk.wandb_settings.Source.INIT
)
run = wandb.init(settings=local_settings)
print(np.random.randint(64, size=(40, 40, 40, 40)))
for i in tqdm.tqdm(range(100)):
time.sleep(0.02)
print("\n" * 1000)
print("---------------")
time.sleep(1)
run.finish()
@pytest.mark.parametrize("console", console_modes)
def test_offline_compression(test_settings, capfd, runner, console):
with capfd.disabled():
local_settings = copy.copy(test_settings)
local_settings.update(
mode="offline",
console=console,
source=wandb.sdk.wandb_settings.Source.INIT,
)
run = wandb.init(settings=local_settings)
for _ in tqdm.tqdm(range(100), ncols=139, ascii=" 123456789#"):
time.sleep(0.05)
print("\n" * 1000)
print("QWERT")
print("YUIOP")
print("12345")
print("\x1b[A\r\x1b[J\x1b[A\r\x1b[1J")
time.sleep(2)
run.finish()
binary_log_file = (
os.path.join(os.path.dirname(run.dir), "run-" + run.id) + ".wandb"
)
binary_log = runner.invoke(
cli.sync, ["--view", "--verbose", binary_log_file]
).stdout
# Only a single output record per stream is written when the run finishes
assert binary_log.count("Record: output") == 2
# Only final state of progress bar is logged
assert binary_log.count("#") == 100, binary_log.count
# Intermediate states are not logged
assert "QWERT" not in binary_log
assert "YUIOP" not in binary_log
assert "12345" not in binary_log
assert "UIOP" in binary_log
@pytest.mark.parametrize("console", console_modes)
@pytest.mark.parametrize("numpy", [True, False])
@pytest.mark.timeout(120)
def test_very_long_output(test_settings, capfd, runner, console, numpy):
# https://wandb.atlassian.net/browse/WB-5437
local_settings = copy.copy(test_settings)
local_settings.update(
mode="offline", console=console, source=wandb.sdk.wandb_settings.Source.INIT,
)
with capfd.disabled():
if not numpy:
wandb.wandb_sdk.lib.redirect.np = wandb.wandb_sdk.lib.redirect._Numpy()
try:
run = wandb.init(settings=local_settings)
print("LOG" * 1000000)
print("\x1b[31m\x1b[40m\x1b[1mHello\x01\x1b[22m\x1b[39m" * 100)
print("===finish===")
time.sleep(3)
run.finish()
binary_log_file = (
os.path.join(os.path.dirname(run.dir), "run-" + run.id) + ".wandb"
)
binary_log = runner.invoke(
cli.sync, ["--view", "--verbose", binary_log_file]
).stdout
assert "\\033[31m\\033[40m\\033[1mHello" in binary_log
assert binary_log.count("LOG") == 1000000
assert "===finish===" in binary_log
finally:
wandb.wandb_sdk.lib.redirect.np = np
@pytest.mark.parametrize("console", console_modes)
def test_no_numpy(test_settings, capfd, runner, console):
local_settings = copy.copy(test_settings)
local_settings.update(console=console, source=wandb.sdk.wandb_settings.Source.INIT)
with capfd.disabled():
wandb.wandb_sdk.lib.redirect.np = wandb.wandb_sdk.lib.redirect._Numpy()
try:
run = wandb.init(settings=local_settings)
print("\x1b[31m\x1b[40m\x1b[1mHello\x01\x1b[22m\x1b[39m")
run.finish()
binary_log_file = (
os.path.join(os.path.dirname(run.dir), "run-" + run.id) + ".wandb"
)
binary_log = runner.invoke(
cli.sync, ["--view", "--verbose", binary_log_file]
).stdout
finally:
wandb.wandb_sdk.lib.redirect.np = np
@pytest.mark.parametrize("console", console_modes)
def test_memory_leak2(test_settings, capfd, runner, console):
local_settings = copy.copy(test_settings)
local_settings.update(console=console, source=wandb.sdk.wandb_settings.Source.INIT)
with capfd.disabled():
run = wandb.init(settings=local_settings)
for i in range(1000):
print("ABCDEFGH")
time.sleep(3)
assert len(run._out_redir._emulator.buffer) < 1000
run.finish()
|
|
# -*- coding: utf-8 -*-
"""Import NeuroElectrics DataFormat (NEDF) files."""
from copy import deepcopy
from datetime import datetime, timezone
from xml.etree import ElementTree
import numpy as np
from ..base import BaseRaw
from ..meas_info import create_info
from ..utils import _mult_cal_one
from ...utils import warn, verbose, _check_fname
def _getsubnodetext(node, name):
"""Get an element from an XML node, raise an error otherwise.
Parameters
----------
node: Element
XML Element
name: str
Child element name
Returns
-------
test: str
Text contents of the child nodes
"""
subnode = node.findtext(name)
if not subnode:
raise RuntimeError('NEDF header ' + name + ' not found')
return subnode
def _parse_nedf_header(header):
"""Read header information from the first 10kB of an .nedf file.
Parameters
----------
header : bytes
Null-terminated header data, mostly the file's first 10240 bytes.
Returns
-------
info : dict
A dictionary with header information.
dt : numpy.dtype
Structure of the binary EEG/accelerometer/trigger data in the file.
n_samples : int
The number of data samples.
"""
info = {}
# nedf files have three accelerometer channels sampled at 100Hz followed
# by five EEG samples + TTL trigger sampled at 500Hz
# For 32 EEG channels and no stim channels, the data layout may look like
# [ ('acc', '>u2', (3,)),
# ('data', dtype([
# ('eeg', 'u1', (32, 3)),
# ('trig', '>i4', (1,))
# ]), (5,))
# ]
dt = [] # dtype for the binary data block
datadt = [] # dtype for a single EEG sample
headerend = header.find(b'\0')
if headerend == -1:
raise RuntimeError('End of header null not found')
headerxml = ElementTree.fromstring(header[:headerend])
nedfversion = headerxml.findtext('NEDFversion', '')
if nedfversion not in ['1.3', '1.4']:
warn('NEDFversion unsupported, use with caution')
if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM':
warn('Found Starstim, this hasn\'t been tested extensively!')
if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF':
raise RuntimeError('Unknown additional channel, aborting.')
n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0))
if n_acc:
# expect one sample of u16 accelerometer data per block
dt.append(('acc', '>u2', (n_acc,)))
eegset = headerxml.find('EEGSettings')
if eegset is None:
raise RuntimeError('No EEG channels found')
nchantotal = int(_getsubnodetext(eegset, 'TotalNumberOfChannels'))
info['nchan'] = nchantotal
info['sfreq'] = int(_getsubnodetext(eegset, 'EEGSamplingRate'))
info['ch_names'] = [e.text for e in eegset.find('EEGMontage')]
if nchantotal != len(info['ch_names']):
raise RuntimeError(
f"TotalNumberOfChannels ({nchantotal}) != "
f"channel count ({len(info['ch_names'])})")
# expect nchantotal uint24s
datadt.append(('eeg', 'B', (nchantotal, 3)))
if headerxml.find('STIMSettings') is not None:
# 2* -> two stim samples per eeg sample
datadt.append(('stim', 'B', (2, nchantotal, 3)))
warn('stim channels are currently ignored')
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B'
datadt.append(('trig', trigger_type))
# 5 data samples per block
dt.append(('data', np.dtype(datadt), (5,)))
date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', 0)
info['meas_date'] = datetime.fromtimestamp(int(date) / 1000, timezone.utc)
n_samples = int(_getsubnodetext(eegset, 'NumberOfRecordsOfEEG'))
n_full, n_last = divmod(n_samples, 5)
dt_last = deepcopy(dt)
assert dt_last[-1][-1] == (5,)
dt_last[-1] = list(dt_last[-1])
dt_last[-1][-1] = (n_last,)
dt_last[-1] = tuple(dt_last[-1])
return info, np.dtype(dt), np.dtype(dt_last), n_samples, n_full
# the first 10240 bytes are header in XML format, padded with NULL bytes
_HDRLEN = 10240
class RawNedf(BaseRaw):
"""Raw object from NeuroElectrics nedf file."""
def __init__(self, filename, preload=False, verbose=None):
filename = _check_fname(filename, 'read', True, 'filename')
with open(filename, mode='rb') as fid:
header = fid.read(_HDRLEN)
header, dt, dt_last, n_samp, n_full = _parse_nedf_header(header)
ch_names = header['ch_names'] + ['STI 014']
ch_types = ['eeg'] * len(ch_names)
ch_types[-1] = 'stim'
info = create_info(ch_names, header['sfreq'], ch_types)
# scaling factor ADC-values -> volts
# taken from the NEDF EEGLAB plugin
# (https://www.neuroelectrics.com/resources/software/):
for ch in info['chs'][:-1]:
ch['cal'] = 2.4 / (6.0 * 8388607)
info['meas_date'] = header['meas_date']
raw_extra = dict(dt=dt, dt_last=dt_last, n_full=n_full)
super().__init__(
info, preload=preload, filenames=[filename], verbose=verbose,
raw_extras=[raw_extra], last_samps=[n_samp - 1])
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
dt = self._raw_extras[fi]['dt']
dt_last = self._raw_extras[fi]['dt_last']
n_full = self._raw_extras[fi]['n_full']
n_eeg = dt[1].subdtype[0][0].shape[0]
# data is stored in 5-sample chunks (except maybe the last one!)
# so we have to do some gymnastics to pick the correct parts to
# read
offset = start // 5 * dt.itemsize + _HDRLEN
start_sl = start % 5
n_samples = stop - start
n_samples_full = min(stop, n_full * 5) - start
last = None
n_chunks = (n_samples_full - 1) // 5 + 1
n_tot = n_chunks * 5
with open(self._filenames[fi], 'rb') as fid:
fid.seek(offset, 0)
chunks = np.fromfile(fid, dtype=dt, count=n_chunks)
assert len(chunks) == n_chunks
if n_samples != n_samples_full:
last = np.fromfile(fid, dtype=dt_last, count=1)
eeg = _convert_eeg(chunks, n_eeg, n_tot)
trig = chunks['data']['trig'].reshape(1, n_tot)
if last is not None:
n_last = dt_last['data'].shape[0]
eeg = np.concatenate(
(eeg, _convert_eeg(last, n_eeg, n_last)), axis=-1)
trig = np.concatenate(
(trig, last['data']['trig'].reshape(1, n_last)), axis=-1)
one_ = np.concatenate((eeg, trig))
one = one_[:, start_sl:n_samples + start_sl]
_mult_cal_one(data, one, idx, cals, mult)
def _convert_eeg(chunks, n_eeg, n_tot):
# convert uint8-triplet -> int32
eeg = chunks['data']['eeg'] @ np.array([1 << 16, 1 << 8, 1])
# convert sign if necessary
eeg[eeg > (1 << 23)] -= 1 << 24
eeg = eeg.reshape((n_tot, n_eeg)).T
return eeg
@verbose
def read_raw_nedf(filename, preload=False, verbose=None):
"""Read NeuroElectrics .nedf files.
NEDF file versions starting from 1.3 are supported.
Parameters
----------
filename : str
Path to the .nedf file.
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawNedf
A Raw object containing NEDF data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawNedf(filename, preload, verbose)
|
|
"""
Base_Map.py
Created by amounra on 2014-7-26.
This file allows the reassignment of the controls from their default arrangement. The order is from left to right;
Buttons are Note #'s and Faders/Rotaries are Controller #'s
"""
USER_OFFSET = 10
OSC_TRANSMIT = False
OSC_OUTPORT = 7400
SHIFT_LATCHING = False
CAP_BUTTON_TRANSLATIONS = False #include the top 8 capacitive touch buttons in UserMode translations.
CHANNEL = 0 #main channel (0 - 15)
AFTERTOUCH = True #when True, sends AT in instrument modes and UserMode. When false, turns CC's off for instrument modes and transmits CC's in UserModes.
BASE_PADS = [60, 61, 62, 63, 64, 65, 66, 67, 52, 53, 54, 55, 56, 57, 58, 59, 44, 45, 46, 47, 48, 49, 50, 51, 36, 37, 38, 39, 40, 41, 42, 43] #there are 16 of these
BASE_TOUCHSTRIPS = [1, 2, 3, 4, 5, 6, 7, 8, 9] #there are 9 of these
BASE_TOUCHPADS = [10, 11, 12, 13, 14, 15, 16, 17]
BASE_BUTTONS = [18, 19, 20, 21, 22, 23, 24, 25] #there are 16 of these
BASE_RUNNERS = [68, 69, 70, 71, 72, 73, 74, 75]
BASE_LCDS = [34, 35]
COLOR_MAP = [2, 64, 4, 8, 16, 127, 32]
"""You can change the orientation of the Up, Down, Left, and Right buttons (where applicable) by changing the array. The values correspond to the buttons from top to bottom."""
UDLR = [0, 1, 2, 3]
"""The values in this array determine the choices for what length of clip is created when "Fixed Length" is turned on:
0 = 1 Beat
1 = 2 Beat
2 = 1 Bar
3 = 2 Bars
4 = 4 Bars
5 = 8 Bars
6 = 16 Bars
7 = 32 Bars
"""
LENGTH_VALUES = [2, 3, 4]
"""It is possible to create a custom list of scales to be used by the script. For instance, the list below would include major, minor, auto, drumpad, and chromatic scales, in that order."""
#SCALENAMES = ['Major', 'Minor', 'Auto', 'DrumPad', 'Chromatic']
from _Framework.ButtonElement import Color
from _Mono_Framework.LividColors import *
class BaseColors:
class DefaultButton:
On = LividRGB.WHITE
Off = LividRGB.OFF
Disabled = LividRGB.OFF
Alert = LividRGB.BlinkFast.WHITE
class MainModes:
Clips = LividRGB.WHITE
Clips_shifted = LividRGB.BlinkFast.WHITE
Sends = LividRGB.MAGENTA
Sends_shifted = LividRGB.BlinkFast.MAGENTA
Device = LividRGB.CYAN
Device_shifted = LividRGB.BlinkFast.CYAN
User = LividRGB.RED
User_shifted = LividRGB.BlinkFast.RED
class Session:
StopClipTriggered = LividRGB.BlinkFast.BLUE
StopClip = LividRGB.BLUE
Scene = LividRGB.CYAN
NoScene = LividRGB.OFF
SceneTriggered = LividRGB.BlinkFast.BLUE
ClipTriggeredPlay = LividRGB.BlinkFast.GREEN
ClipTriggeredRecord = LividRGB.BlinkFast.RED
RecordButton = LividRGB.OFF
ClipStopped = LividRGB.WHITE
ClipStarted = LividRGB.GREEN
ClipRecording = LividRGB.RED
NavigationButtonOn = LividRGB.BLUE
Empty = LividRGB.OFF
class NoteEditor:
class Step:
Low = LividRGB.CYAN
High = LividRGB.WHITE
Full = LividRGB.YELLOW
Muted = LividRGB.YELLOW
StepEmpty = LividRGB.OFF
class StepEditing:
High = LividRGB.GREEN
Low = LividRGB.CYAN
Full = LividRGB.YELLOW
Muted = LividRGB.WHITE
StepEmpty = LividRGB.OFF
StepEmptyBase = LividRGB.OFF
StepEmptyScale = LividRGB.OFF
StepDisabled = LividRGB.OFF
Playhead = Color(31)
PlayheadRecord = Color(31)
StepSelected = LividRGB.GREEN
QuantizationSelected = LividRGB.RED
QuantizationUnselected = LividRGB.MAGENTA
class LoopSelector:
Playhead = LividRGB.YELLOW
OutsideLoop = LividRGB.BLUE
InsideLoopStartBar = LividRGB.CYAN
SelectedPage = LividRGB.WHITE
InsideLoop = LividRGB.CYAN
PlayheadRecord = LividRGB.RED
class DrumGroup:
PadAction = LividRGB.WHITE
PadFilled = LividRGB.GREEN
PadSelected = LividRGB.WHITE
PadSelectedNotSoloed = LividRGB.WHITE
PadEmpty = LividRGB.OFF
PadMuted = LividRGB.YELLOW
PadSoloed = LividRGB.CYAN
PadMutedSelected = LividRGB.BLUE
PadSoloedSelected = LividRGB.BLUE
PadInvisible = LividRGB.OFF
PadAction = LividRGB.RED
class Mixer:
SoloOn = LividRGB.CYAN
SoloOff = LividRGB.OFF
MuteOn = LividRGB.YELLOW
MuteOff = LividRGB.OFF
ArmSelected = LividRGB.GREEN
ArmUnselected = LividRGB.RED
ArmOff = LividRGB.OFF
StopClip = LividRGB.BLUE
SelectedOn = LividRGB.BLUE
SelectedOff = LividRGB.OFF
class Recording:
Transition = LividRGB.BlinkSlow.GREEN
class Recorder:
On = LividRGB.WHITE
Off = LividRGB.BLUE
NewOn = LividRGB.BlinkFast.YELLOW
NewOff = LividRGB.YELLOW
FixedOn = LividRGB.BlinkFast.CYAN
FixedOff = LividRGB.CYAN
RecordOn = LividRGB.BlinkFast.GREEN
RecordOff = LividRGB.GREEN
FixedAssigned = LividRGB.MAGENTA
FixedNotAssigned = LividRGB.OFF
class Transport:
OverdubOn = LividRGB.BlinkFast.RED
OverdubOff = LividRGB.RED
class Sequencer:
OctaveOn = LividRGB.BlinkFast.CYAN
OctaveOff = LividRGB.OFF
On = LividRGB.WHITE
Off = LividRGB.OFF
class Device:
NavOn = LividRGB.MAGENTA
NavOff = LividRGB.OFF
BankOn = LividRGB.YELLOW
BankOff = LividRGB.OFF
ChainNavOn = LividRGB.RED
ChainNavOff = LividRGB.OFF
ContainNavOn = LividRGB.CYAN
ContainNavOff = LividRGB.OFF
class DeviceNavigator:
DeviceNavOn = LividRGB.MAGENTA
DeviceNavOff = LividRGB.OFF
ChainNavOn = LividRGB.RED
ChainNavOff = LividRGB.OFF
LevelNavOn = LividRGB.CYAN
LevelNavOff = LividRGB.OFF
class Mod:
class Nav:
OnValue = LividRGB.RED
OffValue = LividRGB.WHITE
class MonoInstrument:
PressFlash = LividRGB.WHITE
OffsetOnValue = LividRGB.GREEN
ScaleOffsetOnValue = LividRGB.RED
SplitModeOnValue = LividRGB.WHITE
SequencerModeOnValue = LividRGB.CYAN
DrumOffsetOnValue = LividRGB.MAGENTA
VerticalOffsetOnValue = LividRGB.BLUE
class Keys:
SelectedNote = LividRGB.GREEN
RootWhiteValue = LividRGB.RED
RootBlackValue = LividRGB.MAGENTA
WhiteValue = LividRGB.CYAN
BlackValue = LividRGB.BLUE
class Drums:
SelectedNote = LividRGB.BLUE
EvenValue = LividRGB.GREEN
OddValue = LividRGB.MAGENTA
class Translation:
class Channel_10:
Pad_0 = LividRGB.OFF
Pad_1 = LividRGB.OFF
Pad_2 = LividRGB.OFF
Pad_3 = LividRGB.OFF
Pad_4 = LividRGB.OFF
Pad_5 = LividRGB.OFF
Pad_6 = LividRGB.OFF
Pad_7 = LividRGB.OFF
Pad_8 = LividRGB.OFF
Pad_9 = LividRGB.OFF
Pad_10 = LividRGB.OFF
Pad_11 = LividRGB.OFF
Pad_12 = LividRGB.OFF
Pad_13 = LividRGB.OFF
Pad_14 = LividRGB.OFF
Pad_15 = LividRGB.OFF
Pad_16 = LividRGB.OFF
Pad_17 = LividRGB.OFF
Pad_18 = LividRGB.OFF
Pad_19 = LividRGB.OFF
Pad_20 = LividRGB.OFF
Pad_21 = LividRGB.OFF
Pad_22 = LividRGB.OFF
Pad_23 = LividRGB.OFF
Pad_24 = LividRGB.OFF
Pad_25 = LividRGB.OFF
Pad_26 = LividRGB.OFF
Pad_27 = LividRGB.OFF
Pad_28 = LividRGB.OFF
Pad_29 = LividRGB.OFF
Pad_30 = LividRGB.OFF
Pad_31 = LividRGB.OFF
class Channel_11:
Pad_0 = LividRGB.OFF
Pad_1 = LividRGB.OFF
Pad_2 = LividRGB.OFF
Pad_3 = LividRGB.OFF
Pad_4 = LividRGB.OFF
Pad_5 = LividRGB.OFF
Pad_6 = LividRGB.OFF
Pad_7 = LividRGB.OFF
Pad_8 = LividRGB.OFF
Pad_9 = LividRGB.OFF
Pad_10 = LividRGB.OFF
Pad_11 = LividRGB.OFF
Pad_12 = LividRGB.OFF
Pad_13 = LividRGB.OFF
Pad_14 = LividRGB.OFF
Pad_15 = LividRGB.OFF
Pad_16 = LividRGB.OFF
Pad_17 = LividRGB.OFF
Pad_18 = LividRGB.OFF
Pad_19 = LividRGB.OFF
Pad_20 = LividRGB.OFF
Pad_21 = LividRGB.OFF
Pad_22 = LividRGB.OFF
Pad_23 = LividRGB.OFF
Pad_24 = LividRGB.OFF
Pad_25 = LividRGB.OFF
Pad_26 = LividRGB.OFF
Pad_27 = LividRGB.OFF
Pad_28 = LividRGB.OFF
Pad_29 = LividRGB.OFF
Pad_30 = LividRGB.OFF
Pad_31 = LividRGB.OFF
class Channel_12:
Pad_0 = LividRGB.OFF
Pad_1 = LividRGB.OFF
Pad_2 = LividRGB.OFF
Pad_3 = LividRGB.OFF
Pad_4 = LividRGB.OFF
Pad_5 = LividRGB.OFF
Pad_6 = LividRGB.OFF
Pad_7 = LividRGB.OFF
Pad_8 = LividRGB.OFF
Pad_9 = LividRGB.OFF
Pad_10 = LividRGB.OFF
Pad_11 = LividRGB.OFF
Pad_12 = LividRGB.OFF
Pad_13 = LividRGB.OFF
Pad_14 = LividRGB.OFF
Pad_15 = LividRGB.OFF
Pad_16 = LividRGB.OFF
Pad_17 = LividRGB.OFF
Pad_18 = LividRGB.OFF
Pad_19 = LividRGB.OFF
Pad_20 = LividRGB.OFF
Pad_21 = LividRGB.OFF
Pad_22 = LividRGB.OFF
Pad_23 = LividRGB.OFF
Pad_24 = LividRGB.OFF
Pad_25 = LividRGB.OFF
Pad_26 = LividRGB.OFF
Pad_27 = LividRGB.OFF
Pad_28 = LividRGB.OFF
Pad_29 = LividRGB.OFF
Pad_30 = LividRGB.OFF
Pad_31 = LividRGB.OFF
class Channel_13:
Pad_0 = LividRGB.OFF
Pad_1 = LividRGB.OFF
Pad_2 = LividRGB.OFF
Pad_3 = LividRGB.OFF
Pad_4 = LividRGB.OFF
Pad_5 = LividRGB.OFF
Pad_6 = LividRGB.OFF
Pad_7 = LividRGB.OFF
Pad_8 = LividRGB.OFF
Pad_9 = LividRGB.OFF
Pad_10 = LividRGB.OFF
Pad_11 = LividRGB.OFF
Pad_12 = LividRGB.OFF
Pad_13 = LividRGB.OFF
Pad_14 = LividRGB.OFF
Pad_15 = LividRGB.OFF
Pad_16 = LividRGB.OFF
Pad_17 = LividRGB.OFF
Pad_18 = LividRGB.OFF
Pad_19 = LividRGB.OFF
Pad_20 = LividRGB.OFF
Pad_21 = LividRGB.OFF
Pad_22 = LividRGB.OFF
Pad_23 = LividRGB.OFF
Pad_24 = LividRGB.OFF
Pad_25 = LividRGB.OFF
Pad_26 = LividRGB.OFF
Pad_27 = LividRGB.OFF
Pad_28 = LividRGB.OFF
Pad_29 = LividRGB.OFF
Pad_30 = LividRGB.OFF
Pad_31 = LividRGB.OFF
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from pathlib import Path
import json
import gzip
import numpy as np
import warnings
from shutil import copyfile, copyfileobj
from monty.tempfile import ScratchDir
import xml.etree.cElementTree as ET
from pymatgen.core.periodic_table import Element
from pymatgen.electronic_structure.core import OrbitalType
from pymatgen.io.vasp.inputs import Kpoints, Poscar
from pymatgen.io.vasp.outputs import Chgcar, Locpot, Oszicar, Outcar, \
Vasprun, Procar, Xdatcar, Dynmat, BSVasprun, UnconvergedVASPWarning, \
VaspParserError, Wavecar, Waveder, Elfcar
from pymatgen import Spin, Orbital, Lattice, Structure
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.testing import PymatgenTest
"""
Created on Jul 16, 2012
"""
__author__ = "Shyue Ping Ong, Stephen Dacek, Mark Turiansky"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jul 16, 2012"
class VasprunTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_multiple_dielectric(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.GW0.xml")
self.assertEqual(len(v.other_dielectric), 3)
def test_charge_charge_dielectric(self):
"""
VASP 5.4.4 writes out two dielectric functions to vasprun.xml
These are the "density-density" and "velocity-velocity" linear response functions.
See the comments in `linear_optics.F` for details.
"""
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dielectric_5.4.4",
parse_potcar_file=False)
self.assertEqual(v.dielectric is not None, True)
self.assertEqual('density' in v.dielectric_data, True)
self.assertEqual('velocity' in v.dielectric_data, True)
def test_optical_absorption_coeff(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.BSE.xml.gz")
absorption_coeff = v.optical_absorption_coeff
self.assertEqual(absorption_coeff[1], 24966408728.917931)
def test_vasprun_with_more_than_two_unlabelled_dielectric_functions(self):
with self.assertRaises(NotImplementedError):
Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dielectric_bad",
parse_potcar_file=False)
def test_bad_vasprun(self):
self.assertRaises(ET.ParseError,
Vasprun, self.TEST_FILES_DIR / "bad_vasprun.xml")
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
v = Vasprun(self.TEST_FILES_DIR / "bad_vasprun.xml",
exception_on_bad_xml=False)
# Verify some things
self.assertEqual(len(v.ionic_steps), 1)
self.assertAlmostEqual(v.final_energy, -269.00551374)
self.assertTrue(issubclass(w[-1].category,
UserWarning))
def test_runtype(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.hse06")
self.assertIn(v.run_type, "HSE06")
def test_vdw(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.vdw")
self.assertAlmostEqual(v.final_energy, -9.78310677)
def test_nonlmn(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.nonlm'
vasprun = Vasprun(filepath, parse_potcar_file=False)
orbs = list(vasprun.complete_dos.pdos[vasprun.final_structure[
0]].keys())
self.assertIn(OrbitalType.s, orbs)
def test_standard(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
vasprun = Vasprun(filepath, parse_potcar_file=False)
# Test NELM parsing.
self.assertEqual(vasprun.parameters["NELM"], 60)
# test pdos parsing
pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
self.assertAlmostEqual(pdos0[Orbital.s][Spin.up][16], 0.0026)
self.assertAlmostEqual(pdos0[Orbital.pz][Spin.down][16], 0.0012)
self.assertEqual(pdos0[Orbital.s][Spin.up].shape, (301,))
filepath2 = self.TEST_FILES_DIR / 'lifepo4.xml'
vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True,
parse_potcar_file=False)
totalscsteps = sum([len(i['electronic_steps'])
for i in vasprun.ionic_steps])
self.assertEqual(29, len(vasprun.ionic_steps))
self.assertEqual(len(vasprun.structures), len(vasprun.ionic_steps))
self.assertEqual(vasprun.lattice,
vasprun.lattice_rec.reciprocal_lattice)
for i, step in enumerate(vasprun.ionic_steps):
self.assertEqual(vasprun.structures[i], step["structure"])
self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][
"structure"] for i in range(len(vasprun.ionic_steps))]))
self.assertEqual(308, totalscsteps,
"Incorrect number of energies read from vasprun.xml")
self.assertEqual(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * ["O"],
vasprun.atomic_symbols)
self.assertEqual(vasprun.final_structure.composition.reduced_formula,
"LiFe4(PO4)4")
self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
expectedans = (2.539, 4.0906, 1.5516, False)
(gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
self.assertAlmostEqual(gap, expectedans[0])
self.assertAlmostEqual(cbm, expectedans[1])
self.assertAlmostEqual(vbm, expectedans[2])
self.assertEqual(direct, expectedans[3])
self.assertFalse(vasprun.is_hubbard)
self.assertEqual(vasprun.potcar_symbols,
['PAW_PBE Li 17Jan2003', 'PAW_PBE Fe 06Sep2000',
'PAW_PBE Fe 06Sep2000', 'PAW_PBE P 17Jan2003',
'PAW_PBE O 08Apr2002'])
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints,
"Actual kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints_weights,
"Actual kpoints weights cannot be read")
for atomdoses in vasprun.pdos:
for orbitaldos in atomdoses:
self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")
# test skipping ionic steps.
vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False)
self.assertEqual(vasprun_skip.nionic_steps, 29)
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
self.assertEqual(len(vasprun_skip.ionic_steps),
len(vasprun_skip.structures))
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
# Check that nionic_steps is preserved no matter what.
self.assertEqual(vasprun_skip.nionic_steps,
vasprun.nionic_steps)
self.assertNotAlmostEqual(vasprun_skip.final_energy,
vasprun.final_energy)
# Test with ionic_step_offset
vasprun_offset = Vasprun(filepath, 3, 6, parse_potcar_file=False)
self.assertEqual(len(vasprun_offset.ionic_steps),
int(len(vasprun.ionic_steps) / 3) - 1)
self.assertEqual(vasprun_offset.structures[0],
vasprun_skip.structures[2])
self.assertTrue(vasprun_ggau.is_hubbard)
self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[Spin.up][
0][0][96][0], 0.0032)
d = vasprun_ggau.as_dict()
self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
self.assertEqual(d["nelements"], 4)
def test_unconverged(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.unconverged'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False)
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category,
UnconvergedVASPWarning))
self.assertTrue(vasprun_unconverged.converged_ionic)
self.assertFalse(vasprun_unconverged.converged_electronic)
self.assertFalse(vasprun_unconverged.converged)
def test_dfpt(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.dfpt'
vasprun_dfpt = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][0],
3.33402531)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][1],
-0.00559998)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[2][2],
3.31237357)
self.assertTrue(vasprun_dfpt.converged)
entry = vasprun_dfpt.get_computed_entry()
entry = MaterialsProjectCompatibility(
check_potcar_hash=False).process_entry(entry)
self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,
entry.energy)
def test_dfpt_ionic(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.dfpt.ionic'
vasprun_dfpt_ionic = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][0],
515.73485838)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][1],
-0.00263523)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[2][2],
19.02110169)
def test_dfpt_unconverged(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.dfpt.unconverged'
vasprun_dfpt_unconv = Vasprun(filepath, parse_potcar_file=False)
self.assertFalse(vasprun_dfpt_unconv.converged_electronic)
self.assertTrue(vasprun_dfpt_unconv.converged_ionic)
self.assertFalse(vasprun_dfpt_unconv.converged)
def test_uniform(self):
vasprun_uniform = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.uniform",
parse_potcar_file=False)
self.assertEqual(vasprun_uniform.kpoints.style,
Kpoints.supported_modes.Reciprocal)
def test_no_projected(self):
vasprun_no_pdos = Vasprun(self.TEST_FILES_DIR / "Li_no_projected.xml",
parse_potcar_file=False)
self.assertIsNotNone(vasprun_no_pdos.complete_dos)
self.assertFalse(vasprun_no_pdos.dos_has_errors)
def test_dielectric(self):
vasprun_diel = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dielectric",
parse_potcar_file=False)
self.assertAlmostEqual(0.4294, vasprun_diel.dielectric[0][10])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][0])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][1])
self.assertAlmostEqual(19.941, vasprun_diel.dielectric[1][51][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[1][51][3])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][0])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][1])
self.assertAlmostEqual(34.186, vasprun_diel.dielectric[2][85][2])
self.assertAlmostEqual(0.0, vasprun_diel.dielectric[2][85][3])
def test_indirect_vasprun(self):
v = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.indirect.gz")
(gap, cbm, vbm, direct) = v.eigenvalue_band_properties
self.assertFalse(direct)
def test_optical_vasprun(self):
vasprun_optical = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.opticaltransitions",
parse_potcar_file=False)
self.assertAlmostEqual(3.084, vasprun_optical.optical_transition[0][0])
self.assertAlmostEqual(3.087, vasprun_optical.optical_transition[3][0])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[0][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[1][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[7][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[19][1])
self.assertAlmostEqual(3.3799999999,
vasprun_optical.optical_transition[54][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[55][0])
self.assertAlmostEqual(3.381, vasprun_optical.optical_transition[56][0])
self.assertAlmostEqual(10554.9860,
vasprun_optical.optical_transition[54][1])
self.assertAlmostEqual(0.0, vasprun_optical.optical_transition[55][1])
self.assertAlmostEqual(0.001, vasprun_optical.optical_transition[56][1])
def test_force_constants(self):
vasprun_fc = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.dfpt.phonon",
parse_potcar_file=False)
fc_ans = [[-0.00184451, -0., -0.],
[-0., -0.00933824, -0.03021279],
[-0., -0.03021279, 0.01202547]]
nm_ans = [[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[-0.0884346, 0.08837289, 0.24995639],
[0.0884346, -0.08837289, -0.24995639],
[-0.15306645, 0.05105771, 0.14441306],
[0.15306645, -0.05105771, -0.14441306],
[0.0884346, -0.08837289, -0.24995639],
[-0.0884346, 0.08837289, 0.24995639],
[0.15306645, -0.05105771, -0.14441306],
[-0.15306645, 0.05105771, 0.14441306]]
nm_eigenval_ans = [-0.59067079, -0.59067079, -0.59067003, -0.59067003,
-0.59067003, -0.59067003, -0.585009, -0.585009,
-0.58500895, -0.58500883, -0.5062956, -0.5062956]
self.assertEqual(vasprun_fc.force_constants.shape, (16, 16, 3, 3))
self.assertTrue(np.allclose(vasprun_fc.force_constants[8, 9], fc_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvals.size, 48)
self.assertTrue(np.allclose(vasprun_fc.normalmode_eigenvals[17:29],
nm_eigenval_ans))
self.assertEqual(vasprun_fc.normalmode_eigenvecs.shape, (48, 16, 3))
self.assertTrue(
np.allclose(vasprun_fc.normalmode_eigenvecs[33], nm_ans))
def test_Xe(self):
vr = Vasprun(self.TEST_FILES_DIR / 'vasprun.xml.xe',
parse_potcar_file=False)
self.assertEqual(vr.atomic_symbols, ['Xe'])
def test_invalid_element(self):
self.assertRaises(ValueError, Vasprun,
self.TEST_FILES_DIR / 'vasprun.xml.wrong_sp')
def test_selective_dynamics(self):
vsd = Vasprun(self.TEST_FILES_DIR / 'vasprun.xml.indirect.gz')
np.testing.assert_array_equal(
vsd.final_structure.site_properties.get('selective_dynamics'),
[[True] * 3, [False] * 3], "Selective dynamics parsing error")
def test_as_dict(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
vasprun = Vasprun(filepath, parse_potcar_file=False)
# Test that as_dict() is json-serializable
self.assertIsNotNone(json.dumps(vasprun.as_dict()))
self.assertEqual(
vasprun.as_dict()["input"]["potcar_type"],
['PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE'])
self.assertEqual(vasprun.as_dict()['input']['nkpoints'], 24)
def test_get_band_structure(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = self.TEST_FILES_DIR / 'vasprun_Si_bands.xml'
vasprun = Vasprun(filepath,
parse_projected_eigen=True,
parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / 'KPOINTS_Si_bands')
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13],
"wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64])
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
projected = bs.get_projection_on_elements()
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"], 0.4238)
projected = bs.get_projections_on_elements_and_orbitals(
{"Si": ["s"]})
self.assertAlmostEqual(projected[Spin.up][0][0]["Si"]["s"], 0.4238)
# Test compressed files case 1: compressed KPOINTS in current dir
with ScratchDir("./"):
copyfile(self.TEST_FILES_DIR / 'vasprun_Si_bands.xml',
'vasprun.xml')
# Check for error if no KPOINTS file
vasprun = Vasprun('vasprun.xml',
parse_projected_eigen=True,
parse_potcar_file=False)
with self.assertRaises(VaspParserError):
_ = vasprun.get_band_structure(line_mode=True)
# Check KPOINTS.gz succesfully inferred and used if present
with open(self.TEST_FILES_DIR / 'KPOINTS_Si_bands', 'rb') as f_in:
with gzip.open('KPOINTS.gz', 'wb') as f_out:
copyfileobj(f_in, f_out)
bs_kpts_gzip = vasprun.get_band_structure()
self.assertEqual(bs.efermi, bs_kpts_gzip.efermi)
self.assertEqual(bs.as_dict(), bs_kpts_gzip.as_dict())
# Test compressed files case 2: compressed vasprun in another dir
with ScratchDir("./"):
os.mkdir('deeper')
copyfile(self.TEST_FILES_DIR / 'KPOINTS_Si_bands', Path('deeper') / 'KPOINTS')
with open(self.TEST_FILES_DIR / 'vasprun_Si_bands.xml', 'rb') as f_in:
with gzip.open(os.path.join('deeper', 'vasprun.xml.gz'),
'wb') as f_out:
copyfileobj(f_in, f_out)
vasprun = Vasprun(os.path.join('deeper', 'vasprun.xml.gz'),
parse_projected_eigen=True,
parse_potcar_file=False)
bs_vasprun_gzip = vasprun.get_band_structure(line_mode=True)
self.assertEqual(bs.efermi, bs_vasprun_gzip.efermi)
self.assertEqual(bs.as_dict(), bs_vasprun_gzip.as_dict())
# test hybrid band structures
vasprun.actual_kpoints_weights[-1] = 0.
bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / 'KPOINTS_Si_bands')
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [0])
self.assertAlmostEqual(cbm['energy'], 6.3676)
self.assertEqual(cbm['kpoint'].label, None)
self.assertEqual(vbm['kpoint_index'], [0])
self.assertAlmostEqual(vbm['energy'], 2.8218)
self.assertEqual(vbm['kpoint'].label, None)
# test self-consistent band structure calculation for non-hybrid functionals
vasprun = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.forcehybridlikecalc",
parse_projected_eigen=True,
parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=self.TEST_FILES_DIR / "KPOINTS.forcehybridlikecalc",
force_hybrid_mode=True, line_mode=True)
dict_to_test = bs.get_band_gap()
self.assertTrue(dict_to_test['direct'])
self.assertAlmostEqual(dict_to_test['energy'], 6.007899999999999)
self.assertEqual(dict_to_test['transition'], "\\Gamma-\\Gamma")
self.assertEqual(bs.get_branch(0)[0]['start_index'], 0)
self.assertEqual(bs.get_branch(0)[0]['end_index'], 0)
def test_sc_step_overflow(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml.sc_overflow'
# with warnings.catch_warnings(record=True) as w:
# warnings.simplefilter("always")
# vasprun = Vasprun(filepath)
# self.assertEqual(len(w), 3)
vasprun = Vasprun(filepath)
estep = vasprun.ionic_steps[0]['electronic_steps'][29]
self.assertTrue(np.isnan(estep['e_wo_entrp']))
def test_update_potcar(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
potcar_path = self.TEST_FILES_DIR / 'POTCAR.LiFePO4.gz'
potcar_path2 = self.TEST_FILES_DIR / 'POTCAR2.LiFePO4.gz'
vasprun = Vasprun(filepath, parse_potcar_file=False)
self.assertEqual(vasprun.potcar_spec,
[{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
vasprun.update_potcar_spec(potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
vasprun2 = Vasprun(filepath, parse_potcar_file=False)
self.assertRaises(ValueError, vasprun2.update_potcar_spec, potcar_path2)
vasprun = Vasprun(filepath, parse_potcar_file=potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
self.assertRaises(ValueError, Vasprun, filepath,
parse_potcar_file=potcar_path2)
def test_search_for_potcar(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
vasprun = Vasprun(filepath, parse_potcar_file=True)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
def test_potcar_not_found(self):
filepath = self.TEST_FILES_DIR / 'vasprun.xml'
# Ensure no potcar is found and nothing is updated
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vasprun = Vasprun(filepath, parse_potcar_file='.')
self.assertEqual(len(w), 2)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
def test_parsing_chemical_shift_calculations(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = self.TEST_FILES_DIR / "nmr" / "cs" / "basic" / 'vasprun.xml.chemical_shift.scstep'
vasprun = Vasprun(filepath)
nestep = len(vasprun.ionic_steps[-1]['electronic_steps'])
self.assertEqual(nestep, 10)
self.assertTrue(vasprun.converged)
def test_parsing_efg_calcs(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filepath = self.TEST_FILES_DIR / "nmr" / "efg" / "AlPO4" / 'vasprun.xml'
vasprun = Vasprun(filepath)
nestep = len(vasprun.ionic_steps[-1]['electronic_steps'])
self.assertEqual(nestep, 18)
self.assertTrue(vasprun.converged)
def test_charged_structure(self):
vpath = self.TEST_FILES_DIR / 'vasprun.charged.xml'
potcar_path = self.TEST_FILES_DIR / 'POT_GGA_PAW_PBE' / 'POTCAR.Si.gz'
vasprun = Vasprun(vpath, parse_potcar_file=False)
vasprun.update_charge_from_potcar(potcar_path)
self.assertEqual(vasprun.parameters.get("NELECT", 8), 9)
self.assertEqual(vasprun.structures[0].charge, 1)
vpath = self.TEST_FILES_DIR / 'vasprun.split.charged.xml'
potcar_path = self.TEST_FILES_DIR / 'POTCAR.split.charged.gz'
vasprun = Vasprun(vpath, parse_potcar_file=False)
vasprun.update_charge_from_potcar(potcar_path)
self.assertEqual(vasprun.parameters.get('NELECT', 0), 7)
self.assertEqual(vasprun.structures[-1].charge, 1)
def test_kpointset_electronvelocities(self):
vpath = self.TEST_FILES_DIR / 'vasprun.lvel.Si2H.xml'
vasprun = Vasprun(vpath, parse_potcar_file=False)
self.assertEqual(vasprun.eigenvalues[Spin.up].shape[0], len(vasprun.actual_kpoints))
class OutcarTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
for f in ['OUTCAR', 'OUTCAR.gz']:
filepath = self.TEST_FILES_DIR / f
outcar = Outcar(filepath)
expected_mag = ({'d': 0.0, 'p': 0.003, 's': 0.002, 'tot': 0.005},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},
{'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162},
{'d': 0.0, 'p': -0.117, 's': 0.005, 'tot': -0.112},
{'d': 0.0, 'p': -0.165, 's': 0.004, 'tot': -0.162})
expected_chg = ({'p': 0.154, 's': 0.078, 'd': 0.0, 'tot': 0.232},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947})
self.assertAlmostEqual(outcar.magnetization, expected_mag, 5,
"Wrong magnetization read from Outcar")
self.assertAlmostEqual(outcar.charge, expected_chg, 5,
"Wrong charge read from Outcar")
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.938,
'Total CPU time used (sec)': 545.142,
'Elapsed time (sec)': 546.709,
'Maximum memory used (kb)': 0.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 544.204,
'cores': '8'})
self.assertAlmostEqual(outcar.efermi, 2.0112)
self.assertAlmostEqual(outcar.nelect, 44.9999991)
self.assertAlmostEqual(outcar.total_mag, 0.9999998)
self.assertIsNotNone(outcar.as_dict())
self.assertFalse(outcar.lepsilon)
toten = 0
for k in outcar.final_energy_contribs.keys():
toten += outcar.final_energy_contribs[k]
self.assertAlmostEqual(toten, outcar.final_energy, 6)
def test_stopped(self):
filepath = self.TEST_FILES_DIR / 'OUTCAR.stopped'
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
for f in ['OUTCAR.lepsilon', 'OUTCAR.lepsilon.gz']:
filepath = self.TEST_FILES_DIR / f
outcar = Outcar(filepath)
self.assertTrue(outcar.lepsilon)
self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)
self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)
self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2],
0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2],
0.001419)
self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)
self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)
self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)
self.assertAlmostEqual(outcar.born[0][1][2], -0.385)
self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][0][0], -572.5437, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][0], 683.2985, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[0][1][3], 73.07059, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][0][0], 570.98927, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][1][0], -683.68519, places=4)
self.assertAlmostEqual(outcar.internal_strain_tensor[1][2][2], 570.98927, places=4)
def test_soc(self):
filepath = self.TEST_FILES_DIR / 'OUTCAR.NiO_SOC.gz'
outcar = Outcar(filepath)
expected_mag = (
{'s': Magmom([0.0, 0.0, -0.001]), 'p': Magmom([0.0, 0.0, -0.003]),
'd': Magmom([0.0, 0.0, 1.674]), 'tot': Magmom([0.0, 0.0, 1.671])},
{'s': Magmom([0.0, 0.0, 0.001]), 'p': Magmom([0.0, 0.0, 0.003]),
'd': Magmom([0.0, 0.0, -1.674]),
'tot': Magmom([0.0, 0.0, -1.671])},
{'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),
'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])},
{'s': Magmom([0.0, 0.0, 0.0]), 'p': Magmom([0.0, 0.0, 0.0]),
'd': Magmom([0.0, 0.0, 0.0]), 'tot': Magmom([0.0, 0.0, 0.0])}
)
# test note: Magmom class uses np.allclose() when testing for equality
# so fine to use assertEqual here
self.assertEqual(outcar.magnetization, expected_mag,
"Wrong vector magnetization read from Outcar for SOC calculation")
def test_polarization(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.BaTiO3.polar"
outcar = Outcar(filepath)
self.assertEqual(outcar.spin, True)
self.assertEqual(outcar.noncollinear, False)
self.assertAlmostEqual(outcar.p_ion[0], 0.0)
self.assertAlmostEqual(outcar.p_ion[1], 0.0)
self.assertAlmostEqual(outcar.p_ion[2], -5.56684)
self.assertAlmostEqual(outcar.p_sp1[0], 2.00068)
self.assertAlmostEqual(outcar.p_sp2[0], -2.00044)
self.assertAlmostEqual(outcar.p_elec[0], 0.00024)
self.assertAlmostEqual(outcar.p_elec[1], 0.00019)
self.assertAlmostEqual(outcar.p_elec[2], 3.61674)
def test_pseudo_zval(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.BaTiO3.polar"
outcar = Outcar(filepath)
self.assertDictEqual({'Ba': 10.00, 'Ti': 10.00, 'O': 6.00},
outcar.zval_dict)
def test_dielectric(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.dielectric"
outcar = Outcar(filepath)
outcar.read_corrections()
self.assertAlmostEqual(outcar.data["dipol_quadrupol_correction"],
0.03565)
self.assertAlmostEqual(outcar.final_energy, -797.46760559)
def test_freq_dielectric(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.LOPTICS"
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.dielectric_energies[0], 0)
self.assertAlmostEqual(outcar.dielectric_energies[-1], 39.826101)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],
8.96938800)
self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],
7.36167000e-01 + 1.53800000e-03j)
self.assertEqual(len(outcar.dielectric_energies),
len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[
0].transpose())
plasma_freq = outcar.plasma_frequencies
self.assertArrayAlmostEqual(plasma_freq["intraband"], np.zeros((3, 3)))
self.assertArrayAlmostEqual(plasma_freq["interband"],
[[367.49, 63.939, 11.976],
[63.939, 381.155, -24.461],
[11.976, -24.461, 297.844]])
def test_freq_dielectric_vasp544(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.LOPTICS.vasp544"
outcar = Outcar(filepath)
outcar.read_freq_dielectric()
self.assertAlmostEqual(outcar.dielectric_energies[0], 0)
self.assertAlmostEqual(outcar.dielectric_energies[-1], 39.63964)
self.assertAlmostEqual(outcar.dielectric_tensor_function[0][0, 0],
12.769435 + 0j)
self.assertAlmostEqual(outcar.dielectric_tensor_function[-1][0, 0],
0.828615 + 0.016594j)
self.assertEqual(len(outcar.dielectric_energies),
len(outcar.dielectric_tensor_function))
np.testing.assert_array_equal(outcar.dielectric_tensor_function[0],
outcar.dielectric_tensor_function[
0].transpose())
def test_read_elastic_tensor(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.total_tensor.Li2O.gz"
outcar = Outcar(filepath)
outcar.read_elastic_tensor()
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][0], 1986.3391)
self.assertAlmostEqual(outcar.data["elastic_tensor"][0][1], 187.8324)
self.assertAlmostEqual(outcar.data["elastic_tensor"][3][3], 586.3034)
def test_read_piezo_tensor(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.lepsilon.gz"
outcar = Outcar(filepath)
outcar.read_piezo_tensor()
self.assertAlmostEqual(outcar.data["piezo_tensor"][0][0], 0.52799)
self.assertAlmostEqual(outcar.data["piezo_tensor"][1][3], 0.35998)
self.assertAlmostEqual(outcar.data["piezo_tensor"][2][5], 0.35997)
def test_core_state_eigen(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.CL"
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[6]["2s"][-1], -174.4779)
filepath = self.TEST_FILES_DIR / "OUTCAR.icorelevel"
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[4]["3d"][-1], -31.4522)
def test_avg_core_poten(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.lepsilon"
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[-1][1], -90.0487)
filepath = self.TEST_FILES_DIR / "OUTCAR"
cp = Outcar(filepath).read_avg_core_poten()
self.assertAlmostEqual(cp[0][6], -73.1068)
def test_single_atom(self):
filepath = self.TEST_FILES_DIR / "OUTCAR.Al"
outcar = Outcar(filepath)
expected_mag = ({u'p': 0.0, u's': 0.0, u'd': 0.0, u'tot': 0.0},)
expected_chg = ({u'p': 0.343, u's': 0.425, u'd': 0.0, u'tot': 0.768},)
self.assertAlmostEqual(outcar.magnetization, expected_mag)
self.assertAlmostEqual(outcar.charge, expected_chg)
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.592,
'Total CPU time used (sec)': 50.194,
'Elapsed time (sec)': 52.337,
'Maximum memory used (kb)': 62900.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 49.602,
'cores': '32'})
self.assertAlmostEqual(outcar.efermi, 8.0942)
self.assertAlmostEqual(outcar.nelect, 3)
self.assertAlmostEqual(outcar.total_mag, 8.2e-06)
self.assertIsNotNone(outcar.as_dict())
def test_chemical_shielding(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "hydromagnesite" / "OUTCAR"
outcar = Outcar(filename)
expected_chemical_shielding = [[191.9974, 69.5232, 0.6342],
[195.0808, 68.183, 0.833],
[192.0389, 69.5762, 0.6329],
[195.0844, 68.1756, 0.8336],
[192.005, 69.5289, 0.6339],
[195.0913, 68.1859, 0.833],
[192.0237, 69.565, 0.6333],
[195.0788, 68.1733, 0.8337]]
self.assertAlmostEqual(
len(outcar.data["chemical_shielding"]["valence_only"][20: 28]),
len(expected_chemical_shielding))
self.assertArrayAlmostEqual(outcar.data["chemical_shielding"]["valence_and_core"][20:28],
expected_chemical_shielding, decimal=5)
def test_chemical_shielding_with_different_core_contribution(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
c_vo = outcar.data["chemical_shielding"]["valence_only"][7]
for x1, x2 in zip(list(c_vo),
[198.7009, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
c_vc = outcar.data["chemical_shielding"]["valence_and_core"][7]
for x1, x2 in zip(list(c_vc),
[-1.9406, 73.7484, 1.0000]):
self.assertAlmostEqual(x1, x2)
def test_cs_raw_tensors(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
unsym_tensors = outcar.data["unsym_cs_tensor"]
self.assertEqual(unsym_tensors[0],
[[-145.814605, -4.263425, 0.000301],
[4.263434, -145.812238, -8.7e-05],
[0.000136, -0.000189, -142.794068]])
self.assertEqual(unsym_tensors[29],
[[287.789318, -53.799325, 30.900024],
[-53.799571, 225.668117, -17.839598],
[3.801103, -2.195218, 88.896756]])
def test_cs_g0_contribution(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
g0_contrib = outcar.data["cs_g0_contribution"]
self.assertEqual(g0_contrib,
[[-8.773535, 9e-06, 1e-06],
[1.7e-05, -8.773536, -0.0792],
[-6e-06, -0.008328, -9.320237]])
def test_cs_core_contribution(self):
filename = self.TEST_FILES_DIR / "nmr" / "cs" / "core.diff" / "core.diff.chemical.shifts.OUTCAR"
outcar = Outcar(filename)
core_contrib = outcar.data["cs_core_contribution"]
self.assertEqual(core_contrib,
{'Mg': -412.8248405,
'C': -200.5098812,
'O': -271.0766979})
def test_nmr_efg(self):
filename = self.TEST_FILES_DIR / "nmr" / "efg" / "AlPO4" / "OUTCAR"
outcar = Outcar(filename)
expected_efg = [
{'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},
{'eta': 0.465, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.573},
{'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},
{'eta': 0.137, 'nuclear_quadrupole_moment': 146.6, 'cq': 6.327},
{'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},
{'eta': 0.112, 'nuclear_quadrupole_moment': 146.6, 'cq': -7.453},
{'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58},
{'eta': 0.42, 'nuclear_quadrupole_moment': 146.6, 'cq': -5.58}]
self.assertEqual(len(outcar.data["efg"][2:10]), len(expected_efg))
for e1, e2 in zip(outcar.data["efg"][2:10], expected_efg):
for k in e1.keys():
self.assertAlmostEqual(e1[k], e2[k], places=5)
exepected_tensors = [[[11.11, 1.371, 2.652], [1.371, 3.635, -3.572], [2.652, -3.572, -14.746]],
[[11.11, -1.371, 2.652], [-1.371, 3.635, 3.572], [2.652, 3.572, -14.746]],
[[-3.098, 6.511, 7.732], [6.511, 1.419, 11.445], [7.732, 11.445, 1.678]],
[[-3.098, -6.511, 7.732], [-6.511, 1.419, -11.445], [7.732, -11.445, 1.678]],
[[2.344, -10.775, -7.006], [-10.775, -7.152, -11.309], [-7.006, -11.309, 4.808]],
[[2.344, 10.775, -7.006], [10.775, -7.152, 11.309], [-7.006, 11.309, 4.808]],
[[2.404, -0.588, -6.83], [-0.588, 10.435, 3.159], [-6.83, 3.159, -12.839]],
[[2.404, 0.588, -6.83], [0.588, 10.435, -3.159], [-6.83, -3.159, -12.839]]]
self.assertEqual(len(outcar.data["unsym_efg_tensor"][2:10]), len(exepected_tensors))
for e1, e2 in zip(outcar.data["unsym_efg_tensor"][2:10], exepected_tensors):
self.assertArrayAlmostEqual(e1, e2)
def test_read_fermi_contact_shift(self):
filepath = self.TEST_FILES_DIR / "OUTCAR_fc"
outcar = Outcar(filepath)
outcar.read_fermi_contact_shift()
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'fch'][0][0],
-0.002)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'th'][0][0],
-0.052)
self.assertAlmostEqual(outcar.data["fermi_contact_shift"][u'dh'][0][0],
0.0)
def test_drift(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR")
self.assertEqual(len(outcar.drift), 5)
self.assertAlmostEqual(np.sum(outcar.drift), 0)
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.CL")
self.assertEqual(len(outcar.drift), 79)
self.assertAlmostEqual(np.sum(outcar.drift), 0.448010)
def test_electrostatic_potential(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR")
self.assertEqual(outcar.ngf, [54, 30, 54])
self.assertTrue(
np.allclose(outcar.sampling_radii, [0.9748, 0.9791, 0.7215]))
self.assertTrue(np.allclose(outcar.electrostatic_potential,
[-26.0704, -45.5046, -45.5046, -72.9539,
-73.0621, -72.9539, -73.0621]))
def test_mag_electrostatic_error(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.electrostaticerror.gz")
self.assertEqual(outcar.electrostatic_potential,
[-21.1667, -19.6865, -22.3983, -22.3307, -20.5213, -20.9292, -21.5063, -21.3554, -21.74,
-21.7018, -20.3422, -20.6128, -21.4405, -21.0022, -21.975, -21.915, -21.0156, -21.9027,
-22.3712, -21.5816, -21.8535, -20.5061, -22.2474, -22.1904, -22.2203, -20.1727, -21.1068,
-20.1669, -22.1272, -21.3446, -82.4717, -83.035, -81.8289, -82.5957, -81.7813, -82.5011,
-82.6098, -82.2885, -81.606, -99.1621, -99.3146, -99.1742, -99.4728, -100.2139, -99.852,
-99.3575, -99.4135, -98.9092, -99.8867, -99.3707, -99.0794, -98.8376, -99.3656, -98.6474,
-99.3264, -98.844, -99.074, -98.9354, -99.1643, -99.2412, -68.7667, -68.2528, -66.7326,
-67.7113, -69.2228, -67.014, -69.1456, -67.3151, -68.2625, -67.6156, -69.8112, -68.9266,
-67.8286, -69.3289, -68.7017, -67.2834, -68.4665, -68.0188, -67.7083, -69.7195, -67.4078,
-67.9646, -68.584, -69.2387, -69.7822, -67.0701, -67.8236, -68.2468, -68.6533, -68.3218,
-67.5923, -69.1266, -68.4615, -68.302, -67.999, -68.6709, -68.9973, -67.4147, -68.4463,
-68.0899, -67.665, -69.6705, -68.6433, -68.4288, -66.9027, -67.3211, -68.604, -69.1299,
-67.5565, -69.0845, -67.4289, -66.6864, -67.6484, -67.9783, -67.7661, -66.9797, -67.8007,
-68.3194, -69.3671, -67.2708])
def test_onsite_density_matrix(self):
outcar = Outcar(self.TEST_FILES_DIR / "OUTCAR.LinearResponseU.gz")
matrices = outcar.data["onsite_density_matrices"]
self.assertEqual(matrices[0][Spin.up][0][0], 1.0227)
self.assertEqual(len(matrices[0][Spin.up]), 5)
self.assertEqual(len(matrices[0][Spin.up][0]), 5)
self.assertTrue("onsite_density_matrices" in outcar.as_dict())
class BSVasprunTest(PymatgenTest):
_multiprocess_shared_ = True
def test_get_band_structure(self):
filepath = self.TEST_FILES_DIR / 'vasprun_Si_bands.xml'
vasprun = BSVasprun(filepath, parse_potcar_file=False)
bs = vasprun.get_band_structure(
kpoints_filename=self.TEST_FILES_DIR / 'KPOINTS_Si_bands')
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64])
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
d = vasprun.as_dict()
self.assertIn("eigenvalues", d["output"])
class OszicarTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'OSZICAR'
oszicar = Oszicar(filepath)
self.assertEqual(len(oszicar.electronic_steps),
len(oszicar.ionic_steps))
self.assertEqual(len(oszicar.all_energies), 60)
self.assertAlmostEqual(oszicar.final_energy, -526.63928)
class LocpotTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'LOCPOT'
locpot = Locpot.from_file(filepath)
self.assertAlmostEqual(-217.05226954,
sum(locpot.get_average_along_axis(0)))
self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)
class ChgcarTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
filepath = self.TEST_FILES_DIR / 'CHGCAR.nospin'
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 2)[0, 1], 0)
filepath = self.TEST_FILES_DIR / 'CHGCAR.spin'
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022)
# test sum
chg += chg
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022 * 2)
filepath = self.TEST_FILES_DIR / 'CHGCAR.Fe3O4'
chg = Chgcar.from_file(filepath)
ans = [1.56472768, 3.25985108, 3.49205728, 3.66275028, 3.8045896,
5.10813352]
myans = chg.get_integrated_diff(0, 3, 6)
self.assertTrue(np.allclose(myans[:, 1], ans))
def test_write(self):
filepath = self.TEST_FILES_DIR / 'CHGCAR.spin'
chg = Chgcar.from_file(filepath)
chg.write_file("CHGCAR_pmg")
with open("CHGCAR_pmg") as f:
for i, line in enumerate(f):
if i == 22130:
self.assertEqual("augmentation occupancies 1 15\n", line)
if i == 44255:
self.assertEqual("augmentation occupancies 1 15\n", line)
os.remove("CHGCAR_pmg")
def test_soc_chgcar(self):
filepath = self.TEST_FILES_DIR / "CHGCAR.NiO_SOC.gz"
chg = Chgcar.from_file(filepath)
self.assertEqual(set(chg.data.keys()),
{'total', 'diff_x', 'diff_y', 'diff_z', 'diff'})
self.assertTrue(chg.is_soc)
self.assertEqual(chg.data['diff'].shape, chg.data['diff_y'].shape)
# check our construction of chg.data['diff'] makes sense
# this has been checked visually too and seems reasonable
self.assertEqual(abs(chg.data['diff'][0][0][0]),
np.linalg.norm([chg.data['diff_x'][0][0][0],
chg.data['diff_y'][0][0][0],
chg.data['diff_z'][0][0][0]]))
# and that the net magnetization is about zero
# note: we get ~ 0.08 here, seems a little high compared to
# vasp output, but might be due to chgcar limitations?
self.assertAlmostEqual(chg.net_magnetization, 0.0, places=0)
chg.write_file("CHGCAR_pmg_soc")
chg_from_file = Chgcar.from_file("CHGCAR_pmg_soc")
self.assertTrue(chg_from_file.is_soc)
os.remove("CHGCAR_pmg_soc")
def test_hdf5(self):
chgcar = Chgcar.from_file(self.TEST_FILES_DIR / "CHGCAR.NiO_SOC.gz")
chgcar.to_hdf5("chgcar_test.hdf5")
import h5py
with h5py.File("chgcar_test.hdf5", "r") as f:
self.assertArrayAlmostEqual(np.array(f["vdata"]["total"]),
chgcar.data["total"])
self.assertArrayAlmostEqual(np.array(f["vdata"]["diff"]),
chgcar.data["diff"])
self.assertArrayAlmostEqual(np.array(f["lattice"]),
chgcar.structure.lattice.matrix)
self.assertArrayAlmostEqual(np.array(f["fcoords"]),
chgcar.structure.frac_coords)
for z in f["Z"]:
self.assertIn(z, [Element.Ni.Z, Element.O.Z])
for sp in f["species"]:
self.assertIn(sp, ["Ni", "O"])
chgcar2 = Chgcar.from_hdf5("chgcar_test.hdf5")
self.assertArrayAlmostEqual(chgcar2.data["total"],
chgcar.data["total"])
os.remove("chgcar_test.hdf5")
def test_as_dict_and_from_dict(self):
chgcar = Chgcar.from_file(self.TEST_FILES_DIR / "CHGCAR.NiO_SOC.gz")
d = chgcar.as_dict()
chgcar_from_dict = Chgcar.from_dict(d)
self.assertArrayAlmostEqual(chgcar.data['total'], chgcar_from_dict.data['total'])
self.assertArrayAlmostEqual(chgcar.structure.lattice.matrix,
chgcar_from_dict.structure.lattice.matrix)
class ElfcarTest(PymatgenTest):
def test_init(self):
elfcar = Elfcar.from_file(self.TEST_FILES_DIR / 'ELFCAR.gz')
self.assertAlmostEqual(0.19076207645194002, np.mean(elfcar.data["total"]))
self.assertAlmostEqual(0.19076046677910055, np.mean(elfcar.data["diff"]))
def test_alpha(self):
elfcar = Elfcar.from_file(self.TEST_FILES_DIR / 'ELFCAR.gz')
alpha = elfcar.get_alpha()
self.assertAlmostEqual(2.936678808979031, np.median(alpha.data["total"]))
class ProcarTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
filepath = self.TEST_FILES_DIR / 'PROCAR.simple'
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'd')[Spin.up], 0)
self.assertAlmostEqual(p.get_occupation(0, 's')[Spin.up],
0.35381249999999997)
self.assertAlmostEqual(p.get_occupation(0, 'p')[Spin.up], 1.19540625)
self.assertRaises(ValueError, p.get_occupation, 1, 'm')
self.assertEqual(p.nbands, 10)
self.assertEqual(p.nkpoints, 10)
self.assertEqual(p.nions, 3)
lat = Lattice.cubic(3.)
s = Structure(lat, ["Li", "Na", "K"], [[0., 0., 0.],
[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75]])
d = p.get_projection_on_elements(s)
self.assertAlmostEqual(d[Spin.up][2][2],
{'Na': 0.042, 'K': 0.646, 'Li': 0.042})
filepath = self.TEST_FILES_DIR / 'PROCAR'
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.up],
0.96214813853000025)
self.assertAlmostEqual(p.get_occupation(0, 'dxy')[Spin.down],
0.85796295426000124)
def test_phase_factors(self):
filepath = self.TEST_FILES_DIR / 'PROCAR.phase'
p = Procar(filepath)
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],
-0.746 + 0.099j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 0, 0],
0.372 - 0.654j)
# Two Li should have same phase factor.
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0],
p.phase_factors[Spin.up][0, 0, 1, 0])
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 2, 0],
-0.053 + 0.007j)
self.assertAlmostEqual(p.phase_factors[Spin.down][0, 0, 2, 0],
0.027 - 0.047j)
# new style phase factors (VASP 5.4.4+)
filepath = self.TEST_FILES_DIR / 'PROCAR.new_format_5.4.4'
p = Procar(filepath)
self.assertAlmostEqual(p.phase_factors[Spin.up][0, 0, 0, 0], -0.13 + 0.199j)
class XdatcarTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'XDATCAR_4'
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
filepath = self.TEST_FILES_DIR / 'XDATCAR_5'
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
x.concatenate(self.TEST_FILES_DIR / 'XDATCAR_4')
self.assertEqual(len(x.structures), 8)
self.assertIsNotNone(x.get_string())
class DynmatTest(PymatgenTest):
def test_init(self):
# nosetests pymatgen/io/vasp/tests/test_outputs.py:DynmatTest.test_init
filepath = self.TEST_FILES_DIR / 'DYNMAT'
d = Dynmat(filepath)
self.assertEqual(d.nspecs, 2)
self.assertEqual(d.natoms, 6)
self.assertEqual(d.ndisps, 3)
self.assertTrue(np.allclose(d.masses, [63.546, 196.966]))
self.assertTrue(4 in d.data)
self.assertTrue(2 in d.data[4])
self.assertTrue(np.allclose(
d.data[4][2]['dispvec'], [0., 0.05, 0.]
))
self.assertTrue(np.allclose(
d.data[4][2]['dynmat'][3], [0.055046, -0.298080, 0.]
))
# TODO: test get_phonon_frequencies once cross-checked
class WavecarTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
self.vol = np.dot(a[0, :], np.cross(a[1, :], a[2, :]))
b = np.array([np.cross(a[1, :], a[2, :]),
np.cross(a[2, :], a[0, :]),
np.cross(a[0, :], a[1, :])])
self.b = 2 * np.pi * b / self.vol
self.a = a
self.w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2')
def test_standard(self):
w = self.w
a = np.array([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
vol = np.dot(a[0, :], np.cross(a[1, :], a[2, :]))
b = np.array([np.cross(a[1, :], a[2, :]),
np.cross(a[2, :], a[0, :]),
np.cross(a[0, :], a[1, :])])
b = 2 * np.pi * b / vol
self.assertEqual(w.filename, self.TEST_FILES_DIR / 'WAVECAR.N2')
self.assertAlmostEqual(w.efermi, -5.7232, places=4)
self.assertEqual(w.encut, 25)
self.assertEqual(w.nb, 9)
self.assertEqual(w.nk, 1)
self.assertTrue(np.allclose(w.a, a))
self.assertTrue(np.allclose(w.b, b))
self.assertAlmostEqual(w.vol, vol)
self.assertEqual(len(w.kpoints), w.nk)
self.assertEqual(len(w.coeffs), w.nk)
self.assertEqual(len(w.coeffs[0]), w.nb)
self.assertEqual(len(w.band_energy), w.nk)
self.assertEqual(w.band_energy[0].shape, (w.nb, 3))
self.assertLessEqual(len(w.Gpoints[0]), 257)
for k in range(w.nk):
for b in range(w.nb):
self.assertEqual(len(w.coeffs[k][b]),
len(w.Gpoints[k]))
with self.assertRaises(ValueError):
Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.malformed')
import sys
from io import StringIO
saved_stdout = sys.stdout
try:
out = StringIO()
sys.stdout = out
Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2', verbose=True)
self.assertNotEqual(out.getvalue().strip(), '')
finally:
sys.stdout = saved_stdout
def test_n2_45210(self):
w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.45210')
self.assertEqual(w.filename, self.TEST_FILES_DIR / 'WAVECAR.N2.45210')
self.assertAlmostEqual(w.efermi, -5.7232, places=4)
self.assertEqual(w.encut, 25)
self.assertEqual(w.nb, 9)
self.assertEqual(w.nk, 1)
self.assertTrue(np.allclose(w.a, self.a))
self.assertTrue(np.allclose(w.b, self.b))
self.assertAlmostEqual(w.vol, self.vol)
self.assertEqual(len(w.kpoints), w.nk)
self.assertEqual(len(w.coeffs), w.nk)
self.assertEqual(len(w.coeffs[0]), w.nb)
self.assertEqual(len(w.band_energy), w.nk)
self.assertEqual(w.band_energy[0].shape, (w.nb, 3))
self.assertLessEqual(len(w.Gpoints[0]), 257)
def test_n2_spin(self):
w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.spin')
self.assertEqual(len(w.coeffs), 2)
self.assertEqual(len(w.band_energy), 2)
self.assertEqual(len(w.kpoints), w.nk)
self.assertEqual(len(w.Gpoints), w.nk)
self.assertEqual(len(w.coeffs[0][0]), w.nb)
self.assertEqual(len(w.band_energy[0]), w.nk)
temp_ggp = Wavecar._generate_G_points
try:
Wavecar._generate_G_points = lambda x, y: []
with self.assertRaises(ValueError):
Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2')
finally:
Wavecar._generate_G_points = temp_ggp
def test__generate_nbmax(self):
self.w._generate_nbmax()
self.assertEqual(self.w._nbmax.tolist(), [5, 5, 5])
def test__generate_G_points(self):
for k in range(self.w.nk):
kp = self.w.kpoints[k]
self.assertLessEqual(len(self.w._generate_G_points(kp)), 257)
def test_evaluate_wavefunc(self):
self.w.Gpoints.append(np.array([0, 0, 0]))
self.w.kpoints.append(np.array([0, 0, 0]))
self.w.coeffs.append([[1 + 1j]])
self.assertAlmostEqual(self.w.evaluate_wavefunc(-1, -1, [0, 0, 0]),
(1 + 1j) / np.sqrt(self.vol), places=4)
self.assertAlmostEqual(self.w.evaluate_wavefunc(0, 0, [0, 0, 0]),
np.sum(self.w.coeffs[0][0]) / np.sqrt(self.vol),
places=4)
def test_fft_mesh(self):
mesh = self.w.fft_mesh(0, 5)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (14, 1, 1))
self.assertEqual(mesh[tuple((self.w.ng / 2).astype(np.int))], 0j)
mesh = self.w.fft_mesh(0, 5, shift=False)
ind = np.argmax(np.abs(mesh))
self.assertEqual(np.unravel_index(ind, mesh.shape), (6, 8, 8))
self.assertEqual(mesh[0, 0, 0], 0j)
def test_get_parchg(self):
poscar = Poscar.from_file(self.TEST_FILES_DIR / 'POSCAR')
w = self.w
c = w.get_parchg(poscar, 0, 0, spin=0, phase=False)
self.assertTrue('total' in c.data)
self.assertTrue('diff' not in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))
self.assertTrue(np.all(c.data['total'] > 0.))
c = w.get_parchg(poscar, 0, 0, spin=0, phase=True)
self.assertTrue('total' in c.data)
self.assertTrue('diff' not in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))
self.assertFalse(np.all(c.data['total'] > 0.))
w.kpoints.append([0.2, 0.2, 0.2])
with warnings.catch_warnings(record=True) as wrns:
try:
c = w.get_parchg(poscar, 1, 0, spin=0, phase=True)
except IndexError:
pass
self.assertEqual(len(wrns), 1)
w = Wavecar(self.TEST_FILES_DIR / 'WAVECAR.N2.spin')
c = w.get_parchg(poscar, 0, 0, phase=False, scale=1)
self.assertTrue('total' in c.data)
self.assertTrue('diff' in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng))
self.assertTrue(np.all(c.data['total'] > 0.))
self.assertFalse(np.all(c.data['diff'] > 0.))
c = w.get_parchg(poscar, 0, 0, spin=0, phase=False)
self.assertTrue('total' in c.data)
self.assertTrue('diff' not in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))
self.assertTrue(np.all(c.data['total'] > 0.))
c = w.get_parchg(poscar, 0, 0, spin=0, phase=True)
self.assertTrue('total' in c.data)
self.assertTrue('diff' not in c.data)
self.assertEqual(np.prod(c.data['total'].shape), np.prod(w.ng * 2))
self.assertFalse(np.all(c.data['total'] > 0.))
class WavederTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
wder = Waveder(self.TEST_FILES_DIR / 'WAVEDER', gamma_only = True)
self.assertEqual(wder.nbands, 36)
self.assertEqual(wder.nkpoints, 56)
self.assertEqual(wder.nelect, 8)
band_i = 0
band_j = 0
kp_index = 0
spin_index = 0
cart_dir_index = 0
cder = wder.get_orbital_derivative_between_states(band_i, band_j, kp_index, spin_index, cart_dir_index)
self.assertAlmostEqual(cder, -1.33639226092e-103, places=114)
def test_consistency(self):
wder = Waveder(self.TEST_FILES_DIR / 'WAVEDER.Si')
wderf = np.loadtxt(self.TEST_FILES_DIR / 'WAVEDERF.Si', skiprows = 1)
with open(self.TEST_FILES_DIR / 'WAVEDERF.Si', 'r') as f:
first_line = [int(a) for a in f.readline().split()]
self.assertEqual(wder.nkpoints, first_line[1])
self.assertEqual(wder.nbands, first_line[2])
for i in range(10):
self.assertAlmostEqual(
first = wder.get_orbital_derivative_between_states(0,i,0,0,0).real,
second = wderf[i,6],
places = 10
)
self.assertAlmostEqual(wder.cder_data[0,i,0,0,0].real, wderf[i,6], places = 10)
self.assertAlmostEqual(wder.cder_data[0,i,0,0,0].imag, wderf[i,7], places = 10)
self.assertAlmostEqual(wder.cder_data[0,i,0,0,1].real, wderf[i,8], places = 10)
self.assertAlmostEqual(wder.cder_data[0,i,0,0,1].imag, wderf[i,9], places = 10)
self.assertAlmostEqual(wder.cder_data[0,i,0,0,2].real, wderf[i,10], places = 10)
self.assertAlmostEqual(wder.cder_data[0,i,0,0,2].imag, wderf[i,11], places = 10)
if __name__ == "__main__":
unittest.main()
|
|
#! /usr/bin/env python
"""
Protein-Ligand Interaction Profiler - Analyze and visualize protein-ligand interactions in PDB files.
plipcmd - Main script for PLIP command line execution.
Copyright 2014-2015 Sebastian Salentin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
modified by Li Tuan
"""
# Compatibility
from __future__ import print_function
# Own modules
try:
from plip.modules.preparation import *
from plip.modules.visualize import visualize_in_pymol
from plip.modules.plipremote import VisualizerData
from plip.modules.report import StructureReport,__version__
from plip.modules import config
from plip.modules.mp import parallel_fn
from plip.modules.webservices import check_pdb_status, fetch_pdb
except ImportError:
from modules.preparation import *
from modules.visualize import visualize_in_pymol
from modules.plipremote import VisualizerData
from modules.report import StructureReport, __version__
from modules import config
from modules.mp import parallel_fn
from modules.webservices import check_pdb_status, fetch_pdb
# Python standard library
import sys
import argparse
from argparse import ArgumentParser
import time
import multiprocessing
import json
# External libraries
import lxml.etree as et
descript = "Protein-Ligand Interaction Profiler (PLIP) v%s " \
"is a command-line based tool to analyze interactions in a protein-ligand complex. " \
"If you are using PLIP in your work, please cite: " \
"Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler. " \
"Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315" % __version__
def threshold_limiter(aparser, arg):
arg = float(arg)
if arg <= 0:
aparser.error("All thresholds have to be values larger than zero.")
return arg
def process_pdb(pdbfile, outpath):
write_message(outpath)
"""Analysis of a single PDB file. Can generate textual reports XML, PyMOL session files and images as output."""
startmessage = '\nStarting analysis of %s\n' % pdbfile.split('/')[-1]
write_message(startmessage)
write_message('='*len(startmessage)+'\n')
mol = PDBComplex()
mol.output_path = outpath
mol.load_pdb(pdbfile)
# #@todo Offers possibility for filter function from command line (by ligand chain, position, hetid)
for ligand in mol.ligands:
mol.characterize_complex(ligand)
create_folder_if_not_exists(outpath)
# Generate the report files
streport = StructureReport(mol)
config.MAXTHREADS = min(config.MAXTHREADS, len(mol.interaction_sets))
######################################
# PyMOL Visualization (parallelized) #
######################################
if config.PYMOL or config.PICS:
complexes = [VisualizerData(mol, site) for site in sorted(mol.interaction_sets)
if not len(mol.interaction_sets[site].interacting_res) == 0]
if config.MAXTHREADS > 1:
write_message('\nGenerating visualizations in parallel on %i cores ...' % config.MAXTHREADS)
parfn = parallel_fn(visualize_in_pymol)
parfn(complexes, processes=config.MAXTHREADS)
else:
[visualize_in_pymol(plcomplex) for plcomplex in complexes]
if config.XML: # Generate report in xml format
streport.write_xml()
if config.TXT: # Generate report in txt (rst) format
streport.write_txt()
def download_structure(inputpdbid):
"""Given a PDB ID, downloads the corresponding PDB structure.
Checks for validity of ID and handles error while downloading.
Returns the path of the downloaded file."""
try:
if len(inputpdbid) != 4 or extract_pdbid(inputpdbid.lower()) == 'UnknownProtein':
sysexit(3, 'Invalid PDB ID (Wrong format)\n')
pdbfile, pdbid = fetch_pdb(inputpdbid.lower())
pdbpath = tilde_expansion('%s/%s.pdb' % (config.BASEPATH.rstrip('/'), pdbid))
create_folder_if_not_exists(config.BASEPATH)
with open(pdbpath, 'w') as g:
g.write(pdbfile)
write_message('file downloaded as %s\n\n' % pdbpath)
return pdbpath, pdbid
except ValueError: # Invalid PDB ID, cannot fetch from RCBS server
sysexit(3, 'Invalid PDB ID (Entry does not exist)\n')
def remove_duplicates(slist):
"""Checks input lists for duplicates and returns
a list with unique entries"""
unique = list(set(slist))
difference = len(slist) - len(unique)
if difference == 1:
write_message("Removed one duplicate entry from input list.\n")
if difference > 1:
write_message("Removed %i duplicate entries from input list.\n" % difference)
return unique
def plip_main(inputstructs, inputpdbids):
"""Main function. Calls functions for processing, report generation and visualization."""
pdbid, pdbpath = None, None
# #@todo For multiprocessing, implement better stacktracing for errors
# Print title and version
title = "* Protein-Ligand Interaction Profiler v%s *" % __version__
write_message('\n' + '*' * len(title) + '\n')
write_message(title)
write_message('\n' + '*' * len(title) + '\n\n')
if inputstructs is not None: # Process PDB file(s)
num_structures = len(inputstructs)
inputstructs = remove_duplicates(inputstructs)
for inputstruct in inputstructs:
if os.path.getsize(inputstruct) == 0:
sysexit(2, 'Empty PDB file\n') # Exit if input file is empty
if num_structures > 1:
basename = inputstruct.split('.')[0].split('/')[-1]
config.OUTPATH = '/'.join([config.BASEPATH, basename])
process_pdb(inputstruct, config.OUTPATH)
else: # Try to fetch the current PDB structure(s) directly from the RCBS server
num_pdbids = len(inputpdbids)
inputpdbids =remove_duplicates(inputpdbids)
for inputpdbid in inputpdbids:
pdbpath, pdbid = download_structure(inputpdbid)
if num_pdbids > 1:
# config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()])
config.OUTPATH = '/'.join([config.BASEPATH, pdbid.upper()])
process_pdb(pdbpath, config.OUTPATH)
if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None:
if config.BASEPATH in ['.', './']:
write_message('\nFinished analysis. Find the result files in the working directory.\n\n')
else:
write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH)
# if __name__ == '__main__':
##############################
# Parse command line arguments
##############################
def myplip(pdbids,pathname):
# pdbids is a list
config.VERBOSE = True
config.DEBUG = False
config.MAXTHREADS = 1
config.XML = True
config.TXT = True
config.PICS = True
config.PYMOL = True
config.OUTPATH = pathname
config.OUTPATH = tilde_expansion("".join([config.OUTPATH, '/'])
if not config.OUTPATH.endswith('/') else config.OUTPATH)
config.BASEPATH = config.OUTPATH # Used for batch processing
# config.BASEPATH = 'basetest'
# config.BREAKCOMPOSITE = arguments.breakcomposite
# config.ALTLOC = arguments.altlocation
# config.PEPTIDES = False
# config.INTRA = arguments.intra
# config.NOFIX = arguments.nofix
config.KEEPMOD = True
# expanded_path = tilde_expansion(arguments.input) if arguments.input is not None else None
# plip_main(expanded_path, arguments.pdbid) # Start main script
plip_main(None, pdbids) # Start main script
if __name__ == "__main__":
myplip(['1NEX','1RLA'])
|
|
#!/usr/bin/env python
"""Semantic protocol buffers can be created from proto2 .proto files.
For maintaining inter-operatibility with primitive protocol buffer
implementations, we can parse the field descriptors created by the standard
Google proto implementation, and generate Semantic proto descriptors.
This file contains interoperability code with the Google protocol buffer
library.
"""
import logging
from grr.lib import rdfvalue
from grr.lib import type_info
from grr.proto import semantic_pb2
# Field types present in the proto2 field descriptors.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# These are labels in the descriptor. Semantic protobufs only distinguish
# between optional and repeated labels. Required is not enforced by the library
# - it should be done by the user in their Validate() method.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
# Semantic Value data store type specifies how they prefer to be encoded. This
# maps to a proto2 primitive field type. When parsing the .proto file we must
# ensure that the semantic value is getting encoded into the correct primitive
# field type.
_SEMANTIC_PRIMITIVE_TO_FIELD_TYPE = dict(
bytes=TYPE_BYTES,
string=TYPE_STRING,
integer=TYPE_INT64,
unsigned_integer=TYPE_UINT64,)
def DefineFromProtobuf(cls, protobuf):
"""Add type info definitions from an existing protobuf.
We support building this class by copying definitions from an annotated
protobuf using the semantic protobuf. This is ideal for interoperability
with other languages and non-semantic protobuf implementations. In that case
it might be easier to simply annotate the .proto file with the relevant
semantic information.
Args:
cls: The class to add fields descriptors to (i.e. the new semantic class).
protobuf: A generated proto2 protocol buffer class as produced by the
standard Google protobuf compiler.
"""
# Parse message level options.
message_options = protobuf.DESCRIPTOR.GetOptions()
semantic_options = message_options.Extensions[semantic_pb2.semantic]
# Hack to avoid dependency loop.
# TODO(user): remove this hack
classes_dict = type_info.TypeInfoObject.classes
# Support message descriptions
if semantic_options.description and not cls.__doc__:
cls.__doc__ = semantic_options.description
cls.union_field = semantic_options.union_field or None
# We search through all the field descriptors and build type info
# descriptors from them.
for field in protobuf.DESCRIPTOR.fields:
type_descriptor = None
# Does this field have semantic options?
options = field.GetOptions().Extensions[semantic_pb2.sem_type]
kwargs = dict(
description=options.description,
name=field.name,
friendly_name=options.friendly_name,
field_number=field.number,
labels=list(options.label))
if field.has_default_value:
kwargs["default"] = field.default_value
# This field is a non-protobuf semantic value.
if options.type and field.type != TYPE_MESSAGE:
rdf_type = getattr(rdfvalue, options.type, None)
if rdf_type:
# Make sure that the field type is the same as what is required by the
# semantic type.
required_field_type = _SEMANTIC_PRIMITIVE_TO_FIELD_TYPE[
rdf_type.data_store_type]
if required_field_type != field.type:
raise rdfvalue.InitializeError(
("%s: .proto file uses incorrect field to store Semantic Value "
"%s: Should be %s") % (cls.__name__, field.name,
rdf_type.data_store_type))
type_descriptor = classes_dict["ProtoRDFValue"](rdf_type=options.type,
**kwargs)
# A semantic protobuf is already a semantic value so it is an error to
# specify it in two places.
elif options.type and field.type == TYPE_MESSAGE:
raise rdfvalue.InitializeError(
("%s: .proto file specified both Semantic Value type %s and "
"Semantic protobuf %s") % (cls.__name__, options.type,
field.message_type.name))
# Try to figure out what this field actually is from the descriptor.
elif field.type == TYPE_DOUBLE:
type_descriptor = classes_dict["ProtoDouble"](**kwargs)
elif field.type == TYPE_FLOAT:
type_descriptor = classes_dict["ProtoFloat"](**kwargs)
elif field.type == TYPE_BOOL:
type_descriptor = classes_dict["ProtoBoolean"](**kwargs)
elif field.type == TYPE_STRING:
type_descriptor = classes_dict["ProtoString"](**kwargs)
elif field.type == TYPE_BYTES:
type_descriptor = classes_dict["ProtoBinary"](**kwargs)
if options.dynamic_type:
# This may be a dynamic type. In this case the dynamic_type option
# names a method (which must exist) which should return the class of
# the embedded semantic value.
dynamic_cb = getattr(cls, options.dynamic_type, None)
if dynamic_cb is not None:
type_descriptor = classes_dict["ProtoDynamicEmbedded"](
dynamic_cb=dynamic_cb, **kwargs)
else:
logging.warning("Dynamic type specifies a non existant callback %s",
options.dynamic_type)
elif (field.type == TYPE_MESSAGE and options.dynamic_type and
field.message_type.name == "Any"):
dynamic_cb = getattr(cls, options.dynamic_type, None)
if dynamic_cb is not None:
type_descriptor = classes_dict["ProtoDynamicAnyValueEmbedded"](
dynamic_cb=dynamic_cb, **kwargs)
else:
logging.warning("Dynamic type specifies a non existant AnyValue "
"callback %s", options.dynamic_type)
elif field.type == TYPE_INT64 or field.type == TYPE_INT32:
type_descriptor = classes_dict["ProtoSignedInteger"](**kwargs)
elif field.type == TYPE_UINT32 or field.type == TYPE_UINT64:
type_descriptor = classes_dict["ProtoUnsignedInteger"](**kwargs)
# An embedded protocol buffer.
elif field.type == TYPE_MESSAGE and field.message_type:
# Refer to another protobuf. Note that the target does not need to be
# known at this time. It will be resolved using the late binding algorithm
# when it is known. Therefore this can actually also refer to this current
# protobuf (i.e. nested proto).
type_descriptor = classes_dict["ProtoEmbedded"](
nested=field.message_type.name, **kwargs)
# TODO(user): support late binding here.
if type_descriptor.type:
# This traps the following problem:
# class Certificate(rdf_protodict.RDFValueArray):
# protobuf = jobs_pb2.BlobArray
#
# A primitive Protobuf definition like:
# message Certificate {
# ....
# };
# And a field like:
# optional Certificate csr = 1 [(sem_type) = {
# description: "A Certificate RDFValue with the CSR in it.",
# }];
# If we blindly allowed the Certificate RDFValue to be used, the
# semantic library will end up embedding a BlobArray protobuf, but the
# primitive library will still use Certificate.
# The name of the primitive protobuf the semantic type implements.
semantic_protobuf_primitive = type_descriptor.type.protobuf.__name__
# This is an error because the primitive library will use the protobuf
# named in the field, but the semantic library will implement a
# different protobuf.
if semantic_protobuf_primitive != field.message_type.name:
raise rdfvalue.InitializeError(
("%s.%s: Conflicting primitive (%s) and semantic protobuf %s "
"which implements primitive protobuf (%s)") %
(cls.__name__, field.name, field.message_type.name,
type_descriptor.type.__name__, semantic_protobuf_primitive))
elif field.enum_type: # It is an enum.
enum_desc = field.enum_type
enum_dict = {}
enum_descriptions = {}
enum_labels = {}
for enum_value in enum_desc.values:
enum_dict[enum_value.name] = enum_value.number
description = enum_value.GetOptions().Extensions[
semantic_pb2.description]
enum_descriptions[enum_value.name] = description
labels = [
label
for label in enum_value.GetOptions().Extensions[semantic_pb2.label]
]
enum_labels[enum_value.name] = labels
enum_dict = dict((x.name, x.number) for x in enum_desc.values)
type_descriptor = classes_dict["ProtoEnum"](
enum_name=enum_desc.name,
enum=enum_dict,
enum_descriptions=enum_descriptions,
enum_labels=enum_labels,
**kwargs)
# Attach the enum container to the class for easy reference:
setattr(cls, enum_desc.name, type_descriptor.enum_container)
# If we do not recognize the type descriptor we ignore this field.
if type_descriptor is not None:
# If the field is repeated, wrap it in a ProtoList.
if field.label == LABEL_REPEATED:
options = field.GetOptions().Extensions[semantic_pb2.sem_type]
type_descriptor = classes_dict["ProtoList"](type_descriptor,
labels=list(options.label))
try:
cls.AddDescriptor(type_descriptor)
except Exception:
logging.error("Failed to parse protobuf %s", cls)
raise
else:
logging.error("Unknown field type for %s - Ignoring.", field.name)
|
|
import copy
from hearthbreaker.cards.base import SpellCard
from hearthbreaker.tags.action import AddCard
from hearthbreaker.tags.base import Effect, BuffUntil, Buff, AuraUntil, ActionTag
from hearthbreaker.tags.condition import IsSpell
from hearthbreaker.tags.event import TurnStarted, TurnEnded, SpellCast
from hearthbreaker.tags.selector import PlayerSelector, CardSelector
from hearthbreaker.tags.status import Stealth, ChangeAttack, ManaChange
import hearthbreaker.targeting
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY
class Assassinate(SpellCard):
def __init__(self):
super().__init__("Assassinate", 5, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE,
target_func=hearthbreaker.targeting.find_enemy_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.die(self)
class Backstab(SpellCard):
def __init__(self):
super().__init__("Backstab", 0, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE,
target_func=hearthbreaker.targeting.find_minion_spell_target,
filter_func=lambda target: target.health == target.calculate_max_health() and
target.spell_targetable())
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(2), self)
class Betrayal(SpellCard):
def __init__(self):
super().__init__("Betrayal", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_enemy_minion_spell_target)
def use(self, player, game):
super().use(player, game)
left_minion = None
right_minion = None
index = self.target.index
if index > 0:
left_minion = game.other_player.minions[index - 1]
if index < min(len(game.other_player.minions) - 1, 6):
right_minion = game.other_player.minions[index + 1]
original_immune = self.target.immune
self.target.immune = True
if left_minion is not None:
left_minion.damage(self.target.calculate_attack(), self.target)
if right_minion is not None:
right_minion.damage(self.target.calculate_attack(), self.target)
self.target.immune = original_immune
class BladeFlurry(SpellCard):
def __init__(self):
super().__init__("Blade Flurry", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
if player.hero.weapon is not None:
# Yes, this card is affected by spell damage cards.
# Source: http://www.hearthhead.com/card=1064/blade-flurry#comments:id=1927317
attack_power = player.effective_spell_damage(player.hero.calculate_attack())
player.hero.weapon.destroy()
for minion in copy.copy(game.other_player.minions):
minion.damage(attack_power, self)
game.other_player.hero.damage(attack_power, self)
class ColdBlood(SpellCard):
def __init__(self):
super().__init__("Cold Blood", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
if player.cards_played > 0:
self.target.change_attack(4)
else:
self.target.change_attack(2)
class Conceal(SpellCard):
def __init__(self):
super().__init__("Conceal", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for minion in player.minions:
if not minion.stealth:
minion.add_buff(BuffUntil(Stealth(), TurnStarted()))
class DeadlyPoison(SpellCard):
def __init__(self):
super().__init__("Deadly Poison", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
player.hero.weapon.base_attack += 2
player.hero.change_temp_attack(2)
def can_use(self, player, game):
return super().can_use(player, game) and player.hero.weapon is not None
class Eviscerate(SpellCard):
def __init__(self):
super().__init__("Eviscerate", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
if player.cards_played > 0:
self.target.damage(player.effective_spell_damage(4), self)
else:
self.target.damage(player.effective_spell_damage(2), self)
class FanOfKnives(SpellCard):
def __init__(self):
super().__init__("Fan of Knives", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for minion in copy.copy(game.other_player.minions):
minion.damage(player.effective_spell_damage(1), self)
player.draw()
class Headcrack(SpellCard):
def __init__(self):
super().__init__("Headcrack", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
game.other_player.hero.damage(player.effective_spell_damage(2), self)
if player.cards_played > 0:
player.add_effect(Effect(TurnEnded(), ActionTag(AddCard(self), PlayerSelector())))
class Preparation(SpellCard):
def __init__(self):
super().__init__("Preparation", 0, CHARACTER_CLASS.ROGUE, CARD_RARITY.EPIC)
def use(self, player, game):
super().use(player, game)
player.add_aura(AuraUntil(ManaChange(-3), CardSelector(condition=IsSpell()), SpellCast()))
class Sap(SpellCard):
def __init__(self):
super().__init__("Sap", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE,
target_func=hearthbreaker.targeting.find_enemy_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.bounce()
class Shadowstep(SpellCard):
def __init__(self):
super().__init__("Shadowstep", 0, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.bounce()
self.target.card.add_buff(Buff(ManaChange(-3)))
class Shiv(SpellCard):
def __init__(self):
super().__init__("Shiv", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(1), self)
player.draw()
class SinisterStrike(SpellCard):
def __init__(self):
super().__init__("Sinister Strike", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
game.other_player.hero.damage(player.effective_spell_damage(3), self)
class Sprint(SpellCard):
def __init__(self):
super().__init__("Sprint", 7, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for i in range(0, 4):
player.draw()
class Vanish(SpellCard):
def __init__(self):
super().__init__("Vanish", 6, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
targets = copy.copy(game.other_player.minions)
targets.extend(player.minions)
# Minions are returned to a player's hand in the order in which they were played.
# Source: http://www.hearthhead.com/card=196/vanish#comments:id=1908549
for minion in sorted(targets, key=lambda m: m.born):
minion.bounce()
class TinkersSharpswordOil(SpellCard):
def __init__(self):
super().__init__("Tinker's Sharpsword Oil", 4, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
player.hero.weapon.base_attack += 3
player.hero.change_temp_attack(3)
if player.cards_played > 0:
targets = hearthbreaker.targeting.find_friendly_minion_battlecry_target(player.game, lambda x: x)
if targets is not None:
target = player.game.random_choice(targets)
target.add_buff(Buff(ChangeAttack(3)))
def can_use(self, player, game):
return super().can_use(player, game) and player.hero.weapon is not None
class Sabotage(SpellCard):
def __init__(self):
super().__init__("Sabotage", 4, CHARACTER_CLASS.ROGUE, CARD_RARITY.EPIC)
def use(self, player, game):
super().use(player, game)
targets = hearthbreaker.targeting.find_enemy_minion_battlecry_target(player.game, lambda x: True)
target = game.random_choice(targets)
target.die(None)
game.check_delayed()
if player.cards_played > 0 and game.other_player.hero.weapon is not None:
game.other_player.hero.weapon.destroy()
def can_use(self, player, game):
return super().can_use(player, game) and len(game.other_player.minions) >= 1
class GangUp(SpellCard):
def __init__(self):
super().__init__("Gang Up", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
for i in range(3):
player.put_back(type(self.target.card)())
|
|
"""Unit tests for evaluators."""
import deepchem as dc
import numpy as np
import unittest
import sklearn
from deepchem.utils.evaluate import Evaluator
from deepchem.utils.evaluate import GeneratorEvaluator
def test_multiclass_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 5)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.argmax(y, axis=1))
def test_binary_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 2)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y, threshold=0.3)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.where(y[:, 1] >= 0.3, np.ones(10), np.zeros(10)))
def test_evaluator_dc_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_sklearn_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
rf = sklearn.ensemble.RandomForestClassifier(50)
model = dc.models.SklearnModel(rf)
model.fit(dataset)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_evaluate_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
multitask_scores = model.evaluate(
dataset, dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_multitask_evaluator():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores, all_task_scores = evaluator.compute_model_performance(
metric, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
def test_model_evaluate_dc_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = model.evaluate(dataset, metric, [])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_multitask_model_evaluate_sklearn():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores, all_task_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['metric-1'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
def test_multitask_model_evaluate():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
multitask_scores, all_task_scores = model.evaluate(
dataset, dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] > 0
assert isinstance(all_task_scores, dict)
def test_evaluator_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric1 = dc.metrics.Metric(dc.metrics.mae_score, n_tasks=2)
metric2 = dc.metrics.Metric(dc.metrics.r2_score, n_tasks=2)
multitask_scores = evaluator.compute_model_performance([metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
def test_model_evaluate_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric1 = dc.metrics.Metric(dc.metrics.mae_score)
metric2 = dc.metrics.Metric(dc.metrics.r2_score)
multitask_scores = model.evaluate(dataset, [metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
def test_generator_evaluator_dc_metric_multitask_single_point():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert len(multitask_scores) == 1
def test_evaluator_sklearn_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
def test_generator_evaluator_dc_metric_multitask():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_model_evaluate_sklearn_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(dataset, dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
def test_evaluator_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
[dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
def test_model_evaluate_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(
dataset, [dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
def test_gc_binary_classification():
"""Test multiclass classification evaluation."""
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_gc_binary_kappa_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC", "CO", "CCC", "CCCC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.kappa_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] <= 1
assert multitask_scores["metric-1"] >= -1
def test_gc_multiclass_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(5, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification", n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
|
|
from twocode.utils.node import Node, range_call, l
import textwrap
from twocode.utils.code import format_exception_only
from twocode import utils
import twocode.utils.string
LOG = set()
#LOG.add("PERF")
stmt_symbol = "stmt"
delims = ['EOL', "';'"]
ws = "WS EOL ENTER LEAVE".split()
class IndentParser:
def __init__(self):
self.parser = None
self.valids = []
self.wrap_code, self.insert = None, None
if "PERF" in LOG:
self.log = utils.Object(
edge=[],
expand_volume=[],
tree_size=[],
)
def validate(self, node):
for valid in self.valids:
valid(node)
def parse(self, lexer):
buffer = list(lexer)
if "PERF" in LOG:
for key in self.log:
self.log[key] = [None for i in range(len(buffer))]
# REMOVED: breaks if inner parser becomes dirty
# self.parser.init_search()
self.num_parses = 1
code, self.errors = self.check(buffer)
self.matches = [code]
def match(self):
if self.errors:
raise Exception("\n".join([""] + [str(error) for error in self.errors]))
return self.matches[0]
def check(self, buffer, tree=None):
if tree is None:
tree = parse_block_tree(buffer)
all_matches = []
all_errors = []
parser = self.parser.copy() # not even that necessary?
i = tree.pos
tree_end = tree.pos + tree.length
def skip_ws():
nonlocal i
while i < tree_end and (buffer[i].type in ws or buffer[i].type in delims):
i += 1
subtrees = tree.children
subs = []
subtree = None
def next_sub():
nonlocal subtree
subtree = subtrees.pop(0) if subtrees else None
next_sub()
matches = []
match_pos, match_end = tree.pos, None
def match():
nonlocal matches, match_end
if bool(parser.matches) and (i >= tree_end or buffer[i].type in delims):
matches = parser.matches
match_end = i
def pack_matches():
nonlocal matches, i
matches_re = []
msg = None
for match in matches:
try:
self.validate(match)
matches_re.append(match)
except Exception as exc:
msg = format_exception_only(exc)
matches = matches_re
if not matches:
if match_pos >= tree_end:
return
# REASON: pack_matches() after leaving a block prints an error at nothing
code = " ".join(str(buffer[j]) for j in skip_subs(match_pos, match_end if msg else i, subs))
if not msg:
msg = Exception("can't parse <stmt> ")
error = "{}at: {}".format(msg, code)
all_errors.append(error)
return
sub_pos = list(skip_subs(match_pos, match_end, subs))
sub_buffer = [buffer[j] for j in sub_pos]
sub_len = len(sub_buffer)
#print(subs)
#print("about to split:", sub_buffer)
remain_match, remain_parses = matches[0], 1
pos = 0
matches = []
offsets = []
parser.init_search(stmt_symbol)
for i1 in range(sub_len):
parser.push(sub_buffer[i1])
if "PERF" in LOG:
for key in self.log:
self.log[key][sub_pos[i1]] = parser.log[key][i1]
if not (i1 + 1 >= sub_len or sub_buffer[i1 + 1].type in delims):
continue
matches1 = []
for match1 in parser.matches:
#try:
self.validate(match1)
matches1.append(match1)
#except:
# continue
if not matches1:
continue
i2 = i1 + 2
while i2 < sub_len and sub_buffer[i2].type in ws:
i2 += 1
parser2 = self.parser.copy()
parser2.init_search(stmt_symbol)
pos2 = i2
for i2 in range(pos2, sub_len):
parser2.push(sub_buffer[i2])
matches2 = []
for match2 in parser2.matches:
#try:
self.validate(match2)
matches2.append(match2)
#except:
# continue
if matches2:
matches.append(matches1[0])
offsets.append(sub_pos[pos])
self.num_parses *= len(matches1)
pos = pos2
remain_match, remain_parses = matches2[0], len(matches2)
if "PERF" in LOG:
for i in range(i1 + 1):
for key in self.log:
self.log[key][sub_pos[i]] = parser.log[key][i]
for i in range(pos2, i2 + 1):
for key in self.log:
self.log[key][sub_pos[i]] = parser2.log[key][i]
parser.init_search(stmt_symbol)
break
matches.append(remain_match)
offsets.append(sub_pos[pos])
self.num_parses *= remain_parses
offsets.append(sub_pos[-1] + 2)
#print(len(matches))
all_code = []
for subtree in subs:
#print("subcheck")
code, errors = self.check(buffer, subtree)
all_code.append(code)
all_errors.extend(errors)
matches_re = []
for match, pos, end in zip(matches, offsets[:-1], offsets[1:]):
length = end - pos - 1
for subtree, code in reversed(list(zip(subs, all_code))):
if pos <= subtree.pos - 1 and subtree.pos + subtree.length <= pos + length:
offset = subtree.pos
for subtree in reversed(subs):
if offset > subtree.pos:
offset -= subtree.length
#print("inserting")
# to test before you fix this
self.insert(match, offset - pos, code)
matches_re.append(match)
all_matches.extend(matches_re)
#print("over")
def delim_split(): #
pass
def loop():
nonlocal i, matches, match_pos, match_end
parser.init_search(stmt_symbol)
skip_ws()
while i < tree_end:
if subtree and i >= subtree.pos - 1:
if i >= subtree.pos:
raise Exception("algorithm error: skipped into subtree")
blocks = [branch for branch in parser.edge if branch.rule.symbol == "block_list" and not branch.node.children]
# note - filters out expr_term by not listing it, and block_list has no problem with either
# the entire motivation behind subs
if blocks:
parser.edge = blocks
#parser.push(buffer[i])
#parser.push(buffer[i + subtree.length + 1])
from twocode.parser import Token
parser.push(Token("'{'"))
parser.push(Token("'}'"))
subs.append(subtree)
i += subtree.length + 2
next_sub()
match()
else:
for st in reversed(subtree.children):
subtrees.insert(0, st)
next_sub()
else:
parser.push(buffer[i])
i += 1
match()
if not (parser.matches or parser.possible()):
pack_matches()
i -= 2
# REASON: trying to merge two lines(EOL VAR >PTR<), panicking would skip the next line
if i < match_pos:
i = match_pos
# REASON: (EOL >PTR<) a wrong start with EOL before jumps in front of it and loops
while i < tree_end:
if subtree:
if i >= subtree.pos - 2:
if buffer[i].type == "EOL" and buffer[i + 1].type == "ENTER":
i += subtree.length + 3
next_sub()
continue
if i >= subtree.pos - 1:
i += subtree.length + 2
next_sub()
continue
if buffer[i].type in delims:
break
i += 1
i += 1
skip_ws()
matches = []
subs.clear()
parser.init_search(stmt_symbol)
match_pos, match_end = i, None
pack_matches()
loop()
return self.wrap_code(all_matches), all_errors
# which parses a token buffer to sform a tree hierarchy
# yield
def parse_block_tree(buffer, leave_token=None, pos=0):
"""
parses into a tree of ranges, identifying nested blocks
LEAVE has higher priority than '}' and can leave without closing some brackets
"""
tree = Node()
i = pos
while i < len(buffer):
token = buffer[i]
if token.type == "ENTER":
child = parse_block_tree(buffer, "LEAVE", pos=i + 1)
i += child.length + 2
tree.children.append(child)
elif token.type == "{":
child = parse_block_tree(buffer, "}", pos=i + 1)
i += child.length + 2
tree.children.append(child)
elif token.type == leave_token:
break
elif token.type == "LEAVE":
break
else:
i += 1
tree.pos = pos
tree.length = i - pos
return tree
def skip_subs(pos, end, subtrees):
subtrees = subtrees.copy()
subtree = subtrees.pop(0) if subtrees else None
i = pos
while i < end:
yield i
i += 1
if subtree and i >= subtree.pos - 1:
yield i
yield i + subtree.length + 1
i += subtree.length + 2
subtree = subtrees.pop(0) if subtrees else None
def gen_insert(rules):
rule_descs = ["{} -> {}".format(rule.symbol, " ".join(symbol.name for symbol in rule.pattern if symbol.name != "_WS")) for rule in rules]
def find_type(s):
rule = rules[rule_descs.index(s)]
pos_map = []
names = [symbol.name for symbol in rule.pattern]
pos = -1
for symbol in rule.pattern:
if symbol.name == "_WS":
continue
pos = names.index(symbol.name, pos + 1)
pos_map.append(pos)
return lambda *args, children_pos=None: Node(rule=rule, children_pos=pos_map[:len(args)] if children_pos is None else children_pos, children=list(args))
block_list, code_append, code_stmt = [find_type(s) for s in textwrap.dedent("""
block_list -> '{' code '}'
code -> code DELIM stmt
code -> stmt
""").strip().splitlines()]
def wrap_code(lines):
if not lines:
return block_list(Node(rule=None, token=None), Node(rule=None, token=None), children_pos=[0, 2])
node = code_stmt(lines[0])
for stmt in lines[1:]:
node = code_append(node, Node(rule=None, token=None), stmt)
node = block_list(Node(rule=None, token=None), node, Node(rule=None, token=None))
# REASON:
# useless fillers, but None and Node() crash the map travel,
# and rule=None requires token=None because it is a terminal
return node
def insert(node, pos, code):
def insert(node, range):
p, len = range
rule = node.rule
if rule and rule.symbol == "block_list":
if p == pos - 1:
return code
range_call(l(insert))(node)
return wrap_code, insert
def gen_valid_indent():
def iter_tokens(node):
for pos, child in zip(node.children_pos, node.children):
if not child.rule:
yield node, pos
else:
for n, j in iter_tokens(child):
yield n, j
def tree_call(node, f):
for child in node.children:
tree_call(child, f)
f(node)
def valid(node):
# twice?
# print(node)
enum_tokens = list(iter_tokens(node))
#for n, i in enum_tokens:
# print(n.rule.symbol, n.rule.pattern[i], repr(n.children[i].token))
valid_tokens_empty_line(n.rule.symbol for n, i in enum_tokens)
for n, i in enum_tokens:
# if n.rule.symbol == "ENTER":
if n.rule.pattern[i].name == "ENTER":
indent = n.children[i].token
valid_indent_order(indent)
indents = []
for n, i in enum_tokens:
rule = n.rule
if rule.symbol == "block_list" and rule.pattern[i].name == "ENTER":
indent = n.children[i].token
indents.append(indent)
for indent in indents:
valid_indent_mixed(indent)
valid_indent_odd(indent)
itervalid_indent_consistent(indents)
tree_call(node, valid_inline_block)
return valid
def valid_indent_order(indent):
if indent.lstrip("\t").lstrip(" "):
raise IndentationError("spaces followed by tabs")
def itervalid_indent_consistent(indents):
style = None
for indent in indents:
if style is None:
style = indent
else:
if indent != style:
raise IndentationError("inconsistent indentation({}, was {})".format(twocode.utils.string.escape(indent), twocode.utils.string.escape(style)))
def valid_indent_mixed(indent):
if ("\t" in indent) == (" " in indent):
raise IndentationError("mixed indentation")
def valid_indent_odd(indent):
if " " in indent:
if len(indent) not in [1, 2, 3, 4, 8]:
raise IndentationError("odd number of spaces")
else:
if len(indent) not in [1]:
raise IndentationError("multiple tabs")
def valid_tokens_empty_line(tokens):
buffer = []
error = "LEAVE EOL ENTER".split()
for token in tokens:
buffer.append(token)
if len(buffer) > 3:
buffer.pop(0)
if buffer == error:
raise IndentationError("blocks separated by whitespace")
def valid_inline_block(node):
rule = node.rule
if not rule:
return
if rule.symbol == "expr" and "block_list" in [symbol.name for symbol in rule.pattern]:
assert "ENTER" not in [symbol.name for symbol in node.children[0].rule.pattern], "whitespace <expr_block>"
# last
# TESTS!
"""
either isolate the ws block rule | look for block parents, we cant sub block_list
"""
# expr block?
# rule. stuff with new grammar
if __name__ == "__main__":
from twocode import Twocode
twocode = Twocode()
compiler = twocode.parser
parser = IndentParser()
from twocode.parser import IncrementalParser
parser.parser = IncrementalParser(compiler.rules)
from twocode.lang.grammar import prec
parser.valids.append(gen_valid_indent())
parser.valids.append(prec(compiler.rules))
parser.wrap_code, parser.insert = gen_insert(compiler.rules)
#parser.parse(compiler.lexer.parse(open("samples/blocky.2c").read()))
parser.parse(compiler.lexer.parse(open("../../code/code/iter.2c").read()))
ast = parser.matches[0]
print()
print(compiler.transform(ast))
parser.match()
|
|
# -*- coding: utf-8 -*-
from . import multielementbase
from . import element
from . import interaction
from . import types
from . import stoichiometry
from ..utils import listtools
from ..utils import instance
import numpy as np
import fisx
class Compound(multielementbase.MultiElementBase):
"""Interface to a compound"""
def __init__(
self, elements, frac, fractype=None, density=None, nrefrac=1, name=None
):
"""
Args:
elements(list): list of elements (["Fe","O"] or [element("Fe"),element("O")])
frac(list[float]): element fractions
fractype(types.fraction): element fraction type
density(num): compound density in g/cm^3
nrefrac(num): refractive index
name(Optional[str]): compound name
"""
# Compound name
if name is None:
self.name = str(id(self))
else:
self.name = name
self.nrefrac = float(nrefrac)
# Element mole fractions
if fractype == types.fraction.mole:
nfrac = frac # keep unnormalized!
elif fractype == types.fraction.volume:
# would be possible if you give the element densities in the compound
# (which is not the same as the pure element density) but that's an unlikely given
raise ValueError("Cannot create a compound from elemental volume fractions")
else:
elements = [element.Element(e) for e in elements]
MM = np.asarray([e.MM for e in elements])
nfrac = stoichiometry.frac_weight_to_mole(
np.asarray(frac), MM
) # normalized
# Elements (no duplicates)
self._compose_elements(elements, nfrac)
# Compound density
if density == 0 or density is None:
if len(self._elements) == 1:
self.density = next(iter(self._elements.keys())).density
else:
# rho = [e.density for e in self._elements]
# self.density = np.mean(rho) # not based on anything, just a value
if len(self._elements) == 0:
self.density = 0.0
else:
self.density = 1.0 # approx. density of water
else:
self.density = float(density)
self.isscatterer = True
def _compose_elements(self, elements, nfrac):
self._elements = {}
for e, n in zip(elements, nfrac):
if not isinstance(e, element.Element):
e = element.Element(e)
if e in self._elements:
self._elements[e] += float(n)
else:
self._elements[e] = float(n)
def __getstate__(self):
return {
"name": self.name,
"nrefrac": self.nrefrac,
"density": self.density,
"elements_keys": list(self._elements.keys()),
"elements_values": list(self._elements.values()),
"isscatterer": self.isscatterer,
}
def __setstate__(self, state):
self.name = state["name"]
self.nrefrac = state["nrefrac"]
self.density = state["density"]
self._elements = dict(zip(state["elements_keys"], state["elements_values"]))
self.isscatterer = state["isscatterer"]
def addelements(self, elements, fractions, fractype, density=None):
"""Add an element to the compound
Args:
elements(str or Element or list): "Fe" or Element("Fe")
frac(num or list): element fraction
fractype(types.fraction): element fraction type
density(Optional(num)): new compound density in g/cm^3
"""
if fractype == types.fraction.volume:
raise ValueError("Cannot create a compound from elemental volume fractions")
if not instance.isarray(elements):
elements = [elements]
if not instance.isarray(fractions):
fractions = [fractions]
fractions = np.asarray(fractions)
if fractype == types.fraction.mole:
nfrac = self.equivalents()
elements = list(nfrac.keys()) + [element.Element(el) for el in elements]
nfrac = np.asarray(list(nfrac.values()))
nfrac = stoichiometry.add_frac(nfrac, fractions)
else:
wfrac = self.massfractions()
elements = list(wfrac.keys()) + [element.Element(el) for el in elements]
wfrac = np.asarray(list(wfrac.values()))
wfrac = stoichiometry.add_frac(wfrac, fractions)
MM = np.asarray([e.MM for e in elements])
nfrac = stoichiometry.frac_weight_to_mole(wfrac, MM)
self._compose_elements(elements, nfrac)
if density:
self.density = density
def change_fractions(self, dfrac, fractype):
"""Change the element fractions
Args:
dfrac(dict): element fractions
fractype(types.fraction): fraction type
"""
elements = list(dfrac.keys())
fractions = np.asarray(list(dfrac.values()))
# Element mole fractions
if fractype == types.fraction.mole:
nfrac = fractions # keep unnormalized!
elif fractype == types.fraction.volume:
# would be possible if you give the element densities in the compound
# (which is not the same as the pure element density) but that's an unlikely given
raise ValueError("Cannot create a compound from elemental volume fractions")
else:
MM = np.asarray([e.MM for e in elements])
nfrac = stoichiometry.frac_weight_to_mole(fractions, MM) # normalized
# Elements
self._compose_elements(elements, nfrac)
def __getitem__(self, el):
return self._elements[el]
@property
def elements(self):
return list(self._elements.keys())
@property
def parts(self):
return self._elements
def molarmass(self):
nfrac = self.equivalents()
MM = np.asarray([e.MM for e in nfrac])
nfrac = np.asarray(list(nfrac.values()))
return (MM * nfrac).sum()
def molarmasseff(self):
nfrac = self.molefractions()
MM = np.asarray([e.MM for e in nfrac])
nfrac = np.asarray(list(nfrac.values()))
return (MM * nfrac).sum()
@property
def Zeff(self):
nfrac = self.molefractions()
Z = np.asarray([e.Z for e in nfrac])
nfrac = np.asarray(list(nfrac.values()))
return (Z * nfrac).sum()
def molefractions(self):
nfrac = np.asarray(list(self._elements.values()))
nfrac /= nfrac.sum()
return dict(zip(self._elements.keys(), nfrac))
def equivalents(self):
return dict(self._elements)
def massfractions(self):
nfrac = self.equivalents()
MM = np.asarray([e.MM for e in nfrac])
wfrac = stoichiometry.frac_mole_to_weight(np.asarray(list(nfrac.values())), MM)
return dict(zip(nfrac.keys(), wfrac))
def elemental_molefractions(self):
return self.molefractions()
def elemental_equivalents(self):
return self.equivalents()
def elemental_massfractions(self):
return self.massfractions()
def fractions(self, fractype):
if fractype == types.fraction.mole:
return self.molefractions()
elif fractype == types.fraction.volume:
raise ValueError("Cannot calculate elemental volume fractions")
else:
return self.massfractions()
@property
def nelements(self):
return len(self._elements)
def markabsorber(self, symb=None, shells=None, fluolines=None, energybounds=None):
"""
Args:
symb(str): element symbol
"""
for e in self._elements:
if energybounds is None:
energybounds2 = None
else:
energybounds2 = energybounds
e.markabsorber(
symb=symb,
shells=shells,
fluolines=fluolines,
energybounds=energybounds2,
)
def unmarkabsorber(self):
for e in self._elements:
e.unmarkabsorber()
def hasabsorbers(self):
return any([e.isabsorber for e in self._elements])
def markscatterer(self, name=None):
"""
Args:
name(str): compound name
"""
if name is None:
self.isscatterer = True
else:
self.isscatterer = self == name
def unmarkscatterer(self):
self.isscatterer = False
def markinfo(self):
yield "{}".format(self.name)
yield " Scatterer: {}".format("yes" if self.isscatterer else "no")
for e in self._elements:
for s in e.markinfo():
yield " {}".format(s)
def _crosssection(self, method, E, fine=False, decomposed=False, **kwargs):
"""Calculate compound cross-sections"""
if self._cs_scattering(method) and not self.isscatterer:
if decomposed:
return {}
else:
return np.zeros_like(E, dtype=float)
e_wfrac = self.massfractions()
if hasattr(self, "structure") and fine:
environ = self
else:
environ = None
kwargs["environ"] = environ
if decomposed:
ret = {}
for e, w in e_wfrac.items():
cs = getattr(e, method)(E, **kwargs)
ret[e] = {"w": w, "cs": cs}
else:
if self._cs_dict(method):
ret = {}
for e, w in e_wfrac.items():
cs = getattr(e, method)(E, **kwargs)
if not cs:
continue
for k, v in cs.items():
ret[k] = ret.get(k, 0) + w * v
else:
ret = sum(
w * getattr(e, method)(E, **kwargs) for e, w in e_wfrac.items()
)
return ret
def get_energy(self, energyrange, defaultinc=1):
"""Get absolute energies (keV) from a relative energy range (eV)
Args:
energyrange(np.array): energy range relative to the absorber's edges (if any)
"""
for e in self._elements:
ret = e.get_energy(energyrange, defaultinc=defaultinc)
if ret is not None:
return ret
return None
def topymca(self, cfg, defaultthickness=1e-4):
r = self.massfractions()
massfractions = list(r.values())
names = ["{}1".format(e) for e in r]
matname = self.pymcaname
cfg["materials"][matname] = {
"Comment": self.pymcacomment,
"CompoundFraction": massfractions,
"Thickness": defaultthickness,
"Density": self.density,
"CompoundList": names,
}
return matname
def tofisx(self, cfg, defaultthickness=1e-4):
r = self.massfractions()
massfractions = list(r.values())
names = ["{}1".format(e) for e in r]
matname = self.pymcaname
o = fisx.Material(matname, self.density, defaultthickness, self.pymcacomment)
o.setCompositionFromLists(names, massfractions)
cfg.addMaterial(o, errorOnReplace=False)
return matname
def tocompound(self, name=None):
return self
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import Timeout
import swift.common.utils
class MessageTimeout(Timeout):
def __init__(self, seconds=None, msg=None):
Timeout.__init__(self, seconds=seconds)
self.msg = msg
def __str__(self):
return '%s: %s' % (Timeout.__str__(self), self.msg)
class SwiftException(Exception):
pass
class InvalidTimestamp(SwiftException):
pass
class DiskFileError(SwiftException):
pass
class DiskFileNotOpen(DiskFileError):
pass
class DiskFileQuarantined(DiskFileError):
pass
class DiskFileCollision(DiskFileError):
pass
class DiskFileNotExist(DiskFileError):
pass
class DiskFileDeleted(DiskFileNotExist):
def __init__(self, metadata=None):
self.metadata = metadata or {}
self.timestamp = swift.common.utils.Timestamp(
self.metadata.get('X-Timestamp', 0))
class DiskFileExpired(DiskFileDeleted):
pass
class DiskFileNoSpace(DiskFileError):
pass
class DiskFileDeviceUnavailable(DiskFileError):
pass
class DeviceUnavailable(SwiftException):
pass
class InvalidAccountInfo(SwiftException):
pass
class PathNotDir(OSError):
pass
class ChunkReadTimeout(Timeout):
pass
class ChunkWriteTimeout(Timeout):
pass
class ConnectionTimeout(Timeout):
pass
class DriveNotMounted(SwiftException):
pass
class LockTimeout(MessageTimeout):
pass
class RingBuilderError(SwiftException):
pass
class RingValidationError(RingBuilderError):
pass
class EmptyRingError(RingBuilderError):
pass
class DuplicateDeviceError(RingBuilderError):
pass
class UnPicklingError(SwiftException):
pass
class FileNotFoundError(SwiftException):
pass
class PermissionError(SwiftException):
pass
class ListingIterError(SwiftException):
pass
class ListingIterNotFound(ListingIterError):
pass
class ListingIterNotAuthorized(ListingIterError):
def __init__(self, aresp):
self.aresp = aresp
class SegmentError(SwiftException):
pass
class ReplicationException(Exception):
pass
class ReplicationLockTimeout(LockTimeout):
pass
class MimeInvalid(SwiftException):
pass
class ClientException(Exception):
def __init__(self, msg, http_scheme='', http_host='', http_port='',
http_path='', http_query='', http_status=0, http_reason='',
http_device='', http_response_content='', http_headers=None):
Exception.__init__(self, msg)
self.msg = msg
self.http_scheme = http_scheme
self.http_host = http_host
self.http_port = http_port
self.http_path = http_path
self.http_query = http_query
self.http_status = http_status
self.http_reason = http_reason
self.http_device = http_device
self.http_response_content = http_response_content
self.http_headers = http_headers or {}
def __str__(self):
a = self.msg
b = ''
if self.http_scheme:
b += '%s://' % self.http_scheme
if self.http_host:
b += self.http_host
if self.http_port:
b += ':%s' % self.http_port
if self.http_path:
b += self.http_path
if self.http_query:
b += '?%s' % self.http_query
if self.http_status:
if b:
b = '%s %s' % (b, self.http_status)
else:
b = str(self.http_status)
if self.http_reason:
if b:
b = '%s %s' % (b, self.http_reason)
else:
b = '- %s' % self.http_reason
if self.http_device:
if b:
b = '%s: device %s' % (b, self.http_device)
else:
b = 'device %s' % self.http_device
if self.http_response_content:
if len(self.http_response_content) <= 60:
b += ' %s' % self.http_response_content
else:
b += ' [first 60 chars of response] %s' \
% self.http_response_content[:60]
return b and '%s: %s' % (a, b) or a
|
|
"""The Elexa Guardian integration."""
import asyncio
from typing import Dict
from aioguardian import Client
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, CONF_IP_ADDRESS, CONF_PORT
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
API_SENSOR_PAIR_DUMP,
API_SENSOR_PAIRED_SENSOR_STATUS,
API_SYSTEM_DIAGNOSTICS,
API_SYSTEM_ONBOARD_SENSOR_STATUS,
API_VALVE_STATUS,
API_WIFI_STATUS,
CONF_UID,
DATA_CLIENT,
DATA_COORDINATOR,
DATA_PAIRED_SENSOR_MANAGER,
DATA_UNSUB_DISPATCHER_CONNECT,
DOMAIN,
LOGGER,
SIGNAL_PAIRED_SENSOR_COORDINATOR_ADDED,
)
from .util import GuardianDataUpdateCoordinator
DATA_LAST_SENSOR_PAIR_DUMP = "last_sensor_pair_dump"
PLATFORMS = ["binary_sensor", "sensor", "switch"]
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the Elexa Guardian component."""
hass.data[DOMAIN] = {
DATA_CLIENT: {},
DATA_COORDINATOR: {},
DATA_LAST_SENSOR_PAIR_DUMP: {},
DATA_PAIRED_SENSOR_MANAGER: {},
DATA_UNSUB_DISPATCHER_CONNECT: {},
}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Elexa Guardian from a config entry."""
client = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id] = Client(
entry.data[CONF_IP_ADDRESS], port=entry.data[CONF_PORT]
)
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id] = {
API_SENSOR_PAIRED_SENSOR_STATUS: {}
}
hass.data[DOMAIN][DATA_UNSUB_DISPATCHER_CONNECT][entry.entry_id] = []
# The valve controller's UDP-based API can't handle concurrent requests very well,
# so we use a lock to ensure that only one API request is reaching it at a time:
api_lock = asyncio.Lock()
# Set up DataUpdateCoordinators for the valve controller:
init_valve_controller_tasks = []
for api, api_coro in [
(API_SENSOR_PAIR_DUMP, client.sensor.pair_dump),
(API_SYSTEM_DIAGNOSTICS, client.system.diagnostics),
(API_SYSTEM_ONBOARD_SENSOR_STATUS, client.system.onboard_sensor_status),
(API_VALVE_STATUS, client.valve.status),
(API_WIFI_STATUS, client.wifi.status),
]:
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
api
] = GuardianDataUpdateCoordinator(
hass,
client=client,
api_name=api,
api_coro=api_coro,
api_lock=api_lock,
valve_controller_uid=entry.data[CONF_UID],
)
init_valve_controller_tasks.append(coordinator.async_refresh())
await asyncio.gather(*init_valve_controller_tasks)
# Set up an object to evaluate each batch of paired sensor UIDs and add/remove
# devices as appropriate:
paired_sensor_manager = hass.data[DOMAIN][DATA_PAIRED_SENSOR_MANAGER][
entry.entry_id
] = PairedSensorManager(hass, entry, client, api_lock)
await paired_sensor_manager.async_process_latest_paired_sensor_uids()
@callback
def async_process_paired_sensor_uids():
"""Define a callback for when new paired sensor data is received."""
hass.async_create_task(
paired_sensor_manager.async_process_latest_paired_sensor_uids()
)
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
API_SENSOR_PAIR_DUMP
].async_add_listener(async_process_paired_sensor_uids)
# Set up all of the Guardian entity platforms:
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_CLIENT].pop(entry.entry_id)
hass.data[DOMAIN][DATA_COORDINATOR].pop(entry.entry_id)
hass.data[DOMAIN][DATA_LAST_SENSOR_PAIR_DUMP].pop(entry.entry_id)
for unsub in hass.data[DOMAIN][DATA_UNSUB_DISPATCHER_CONNECT][entry.entry_id]:
unsub()
hass.data[DOMAIN][DATA_UNSUB_DISPATCHER_CONNECT].pop(entry.entry_id)
return unload_ok
class PairedSensorManager:
"""Define an object that manages the addition/removal of paired sensors."""
def __init__(
self,
hass: HomeAssistant,
entry: ConfigEntry,
client: Client,
api_lock: asyncio.Lock,
) -> None:
"""Initialize."""
self._api_lock = api_lock
self._client = client
self._entry = entry
self._hass = hass
self._listeners = []
self._paired_uids = set()
async def async_pair_sensor(self, uid: str) -> None:
"""Add a new paired sensor coordinator."""
LOGGER.debug("Adding paired sensor: %s", uid)
self._paired_uids.add(uid)
coordinator = self._hass.data[DOMAIN][DATA_COORDINATOR][self._entry.entry_id][
API_SENSOR_PAIRED_SENSOR_STATUS
][uid] = GuardianDataUpdateCoordinator(
self._hass,
client=self._client,
api_name=f"{API_SENSOR_PAIRED_SENSOR_STATUS}_{uid}",
api_coro=lambda: self._client.sensor.paired_sensor_status(uid),
api_lock=self._api_lock,
valve_controller_uid=self._entry.data[CONF_UID],
)
await coordinator.async_request_refresh()
async_dispatcher_send(
self._hass,
SIGNAL_PAIRED_SENSOR_COORDINATOR_ADDED.format(self._entry.data[CONF_UID]),
uid,
)
async def async_process_latest_paired_sensor_uids(self) -> None:
"""Process a list of new UIDs."""
try:
uids = set(
self._hass.data[DOMAIN][DATA_COORDINATOR][self._entry.entry_id][
API_SENSOR_PAIR_DUMP
].data["paired_uids"]
)
except KeyError:
# Sometimes the paired_uids key can fail to exist; the user can't do anything
# about it, so in this case, we quietly abort and return:
return
if uids == self._paired_uids:
return
old = self._paired_uids
new = self._paired_uids = set(uids)
tasks = [self.async_pair_sensor(uid) for uid in new.difference(old)]
tasks += [self.async_unpair_sensor(uid) for uid in old.difference(new)]
if tasks:
await asyncio.gather(*tasks)
async def async_unpair_sensor(self, uid: str) -> None:
"""Remove a paired sensor coordinator."""
LOGGER.debug("Removing paired sensor: %s", uid)
# Clear out objects related to this paired sensor:
self._paired_uids.remove(uid)
self._hass.data[DOMAIN][DATA_COORDINATOR][self._entry.entry_id][
API_SENSOR_PAIRED_SENSOR_STATUS
].pop(uid)
# Remove the paired sensor device from the device registry (which will
# clean up entities and the entity registry):
dev_reg = await self._hass.helpers.device_registry.async_get_registry()
device = dev_reg.async_get_or_create(
config_entry_id=self._entry.entry_id, identifiers={(DOMAIN, uid)}
)
dev_reg.async_remove_device(device.id)
class GuardianEntity(CoordinatorEntity):
"""Define a base Guardian entity."""
def __init__( # pylint: disable=super-init-not-called
self, entry: ConfigEntry, kind: str, name: str, device_class: str, icon: str
) -> None:
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: "Data provided by Elexa"}
self._available = True
self._entry = entry
self._device_class = device_class
self._device_info = {"manufacturer": "Elexa"}
self._icon = icon
self._kind = kind
self._name = name
@property
def device_class(self) -> str:
"""Return the device class."""
return self._device_class
@property
def device_info(self) -> dict:
"""Return device registry information for this entity."""
return self._device_info
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def icon(self) -> str:
"""Return the icon."""
return self._icon
@callback
def _async_update_from_latest_data(self):
"""Update the entity.
This should be extended by Guardian platforms.
"""
raise NotImplementedError
@callback
def _async_update_state_callback(self):
"""Update the entity's state."""
self._async_update_from_latest_data()
self.async_write_ha_state()
class PairedSensorEntity(GuardianEntity):
"""Define a Guardian paired sensor entity."""
def __init__(
self,
entry: ConfigEntry,
coordinator: DataUpdateCoordinator,
kind: str,
name: str,
device_class: str,
icon: str,
) -> None:
"""Initialize."""
super().__init__(entry, kind, name, device_class, icon)
self.coordinator = coordinator
self._paired_sensor_uid = coordinator.data["uid"]
self._device_info["identifiers"] = {(DOMAIN, self._paired_sensor_uid)}
self._device_info["name"] = f"Guardian Paired Sensor {self._paired_sensor_uid}"
self._device_info["via_device"] = (DOMAIN, self._entry.data[CONF_UID])
@property
def name(self) -> str:
"""Return the name of the entity."""
return f"Guardian Paired Sensor {self._paired_sensor_uid}: {self._name}"
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"{self._paired_sensor_uid}_{self._kind}"
async def async_added_to_hass(self) -> None:
"""Perform tasks when the entity is added."""
self._async_update_from_latest_data()
class ValveControllerEntity(GuardianEntity):
"""Define a Guardian valve controller entity."""
def __init__(
self,
entry: ConfigEntry,
coordinators: Dict[str, DataUpdateCoordinator],
kind: str,
name: str,
device_class: str,
icon: str,
) -> None:
"""Initialize."""
super().__init__(entry, kind, name, device_class, icon)
self.coordinators = coordinators
self._device_info["identifiers"] = {(DOMAIN, self._entry.data[CONF_UID])}
self._device_info[
"name"
] = f"Guardian Valve Controller {self._entry.data[CONF_UID]}"
self._device_info["model"] = self.coordinators[API_SYSTEM_DIAGNOSTICS].data[
"firmware"
]
@property
def availabile(self) -> bool:
"""Return if entity is available."""
return any(coordinator.last_update_success for coordinator in self.coordinators)
@property
def name(self) -> str:
"""Return the name of the entity."""
return f"Guardian {self._entry.data[CONF_UID]}: {self._name}"
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"{self._entry.data[CONF_UID]}_{self._kind}"
async def _async_continue_entity_setup(self):
"""Perform additional, internal tasks when the entity is about to be added.
This should be extended by Guardian platforms.
"""
raise NotImplementedError
@callback
def async_add_coordinator_update_listener(self, api: str) -> None:
"""Add a listener to a DataUpdateCoordinator based on the API referenced."""
self.async_on_remove(
self.coordinators[api].async_add_listener(self._async_update_state_callback)
)
async def async_added_to_hass(self) -> None:
"""Perform tasks when the entity is added."""
await self._async_continue_entity_setup()
self.async_add_coordinator_update_listener(API_SYSTEM_DIAGNOSTICS)
self._async_update_from_latest_data()
async def async_update(self) -> None:
"""Update the entity.
Only used by the generic entity update service.
"""
# Ignore manual update requests if the entity is disabled
if not self.enabled:
return
refresh_tasks = [
coordinator.async_request_refresh() for coordinator in self.coordinators
]
await asyncio.gather(*refresh_tasks)
|
|
from __future__ import unicode_literals
import hashlib
import json
import os
import posixpath
import re
from collections import OrderedDict
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.cache import (
InvalidCacheBackendError, cache as default_cache, caches,
)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import LazyObject
from django.utils.six import iteritems
from django.utils.six.moves.urllib.parse import (
unquote, urldefrag, urlsplit, urlunsplit,
)
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class HashedFilesMixin(object):
default_template = """url("%s")"""
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(HashedFilesMixin, self).__init__(*args, **kwargs)
self._patterns = OrderedDict()
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Return a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
opened = False
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def url(self, name, force=False):
"""
Return the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
hashed_name = self.stored_name(clean_name)
final_url = super(HashedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name, template=None):
"""
Return the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matched, url = matchobj.groups()
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self.url(unquote(target_name), force=True)
transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
# Return the hashed version to the file
return template % unquote(transformed_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
adjustable_paths = [
path for path in paths
if matches_patterns(path, self._patterns.keys())
]
# then sort the files by the directory level
def path_level(name):
return len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for extension, patterns in iteritems(self._patterns):
if matches_patterns(path, (extension,)):
for pattern, template in patterns:
converter = self.url_converter(name, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(self.clean_name(saved_name))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(self.clean_name(saved_name))
# and then set the cache accordingly
hashed_files[self.hash_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally store the processed paths
self.hashed_files.update(hashed_files)
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def stored_name(self, name):
hash_key = self.hash_key(name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
# store the hashed name if there was a miss, e.g.
# when the files are still processed
self.hashed_files[hash_key] = cache_name
return cache_name
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
def __init__(self, *args, **kwargs):
super(ManifestFilesMixin, self).__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode('utf-8')
except IOError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return OrderedDict()
try:
stored = json.loads(content, object_pairs_hook=OrderedDict)
except ValueError:
pass
else:
version = stored.get('version')
if version == '1.0':
return stored.get('paths', OrderedDict())
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = OrderedDict()
all_post_processed = super(ManifestFilesMixin,
self).post_process(*args, **kwargs)
for post_processed in all_post_processed:
yield post_processed
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode('utf-8')
self._save(self.manifest_name, ContentFile(contents))
class _MappingCache(object):
"""
A small dict-like wrapper for a given cache backend instance.
"""
def __init__(self, cache):
self.cache = cache
def __setitem__(self, key, value):
self.cache.set(key, value)
def __getitem__(self, key):
value = self.cache.get(key)
if value is None:
raise KeyError("Couldn't find a file name '%s'" % key)
return value
def clear(self):
self.cache.clear()
def update(self, data):
self.cache.set_many(data)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
class CachedFilesMixin(HashedFilesMixin):
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.hashed_files = _MappingCache(caches['staticfiles'])
except InvalidCacheBackendError:
# Use the default backend
self.hashed_files = _MappingCache(default_cache)
def hash_key(self, name):
key = hashlib.md5(force_bytes(self.clean_name(name))).hexdigest()
return 'staticfiles:%s' % key
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
|
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:7332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:7332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Fullcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Fullcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.models.keras_models import mobilenet_v2
from object_detection.models.keras_models import model_utils
from object_detection.models.keras_models import test_utils
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
_layers_to_check = [
'Conv1_relu',
'block_1_expand_relu', 'block_1_depthwise_relu', 'block_1_project_BN',
'block_2_expand_relu', 'block_2_depthwise_relu', 'block_2_project_BN',
'block_3_expand_relu', 'block_3_depthwise_relu', 'block_3_project_BN',
'block_4_expand_relu', 'block_4_depthwise_relu', 'block_4_project_BN',
'block_5_expand_relu', 'block_5_depthwise_relu', 'block_5_project_BN',
'block_6_expand_relu', 'block_6_depthwise_relu', 'block_6_project_BN',
'block_7_expand_relu', 'block_7_depthwise_relu', 'block_7_project_BN',
'block_8_expand_relu', 'block_8_depthwise_relu', 'block_8_project_BN',
'block_9_expand_relu', 'block_9_depthwise_relu', 'block_9_project_BN',
'block_10_expand_relu', 'block_10_depthwise_relu', 'block_10_project_BN',
'block_11_expand_relu', 'block_11_depthwise_relu', 'block_11_project_BN',
'block_12_expand_relu', 'block_12_depthwise_relu', 'block_12_project_BN',
'block_13_expand_relu', 'block_13_depthwise_relu', 'block_13_project_BN',
'block_14_expand_relu', 'block_14_depthwise_relu', 'block_14_project_BN',
'block_15_expand_relu', 'block_15_depthwise_relu', 'block_15_project_BN',
'block_16_expand_relu', 'block_16_depthwise_relu', 'block_16_project_BN',
'out_relu']
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class MobilenetV2Test(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
train: true,
scale: false,
center: true,
decay: 0.2,
epsilon: 0.1,
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _create_application_with_layer_outputs(
self, layer_names, batchnorm_training,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
conv_defs=None):
"""Constructs Keras mobilenetv2 that extracts intermediate layer outputs."""
# Have to clear the Keras backend to ensure isolation in layer naming
tf.keras.backend.clear_session()
if not layer_names:
layer_names = _layers_to_check
full_model = mobilenet_v2.mobilenet_v2(
batchnorm_training=batchnorm_training,
conv_hyperparams=conv_hyperparams,
weights=None,
use_explicit_padding=use_explicit_padding,
alpha=alpha,
min_depth=min_depth,
include_top=False,
conv_defs=conv_defs)
layer_outputs = [full_model.get_layer(name=layer).output
for layer in layer_names]
return tf.keras.Model(
inputs=full_model.inputs,
outputs=layer_outputs)
def _check_returns_correct_shape(
self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False, min_depth=None,
layer_names=None, conv_defs=None):
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False,
use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=depth_multiplier,
conv_defs=conv_defs)
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = model([image_tensor])
for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def _check_returns_correct_shapes_with_dynamic_inputs(
self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False,
layer_names=None):
height = tf.random.uniform([], minval=image_height, maxval=image_height+1,
dtype=tf.int32)
width = tf.random.uniform([], minval=image_width, maxval=image_width+1,
dtype=tf.int32)
image_tensor = tf.random.uniform([batch_size, height, width,
3], dtype=tf.float32)
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
alpha=depth_multiplier)
feature_maps = model(image_tensor)
for feature_map, expected_shape in zip(feature_maps,
expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, depth_multiplier, layer_names=None):
tf.keras.backend.clear_session()
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=False,
alpha=depth_multiplier)
preprocessed_inputs = tf.random.uniform([2, 40, 40, 3])
model(preprocessed_inputs)
return model.variables
def test_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_128)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_128_explicit_padding(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_128_explicit_padding)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=True)
def test_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.mobilenet_v2_expected_feature_map_shape_with_dynamic_inputs)
self._check_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_299)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_enforcing_min_depth)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, min_depth=32)
def test_returns_correct_shapes_with_conv_defs(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
conv_1 = model_utils.ConvDefs(
conv_name='Conv_1', filters=256)
conv_defs = [conv_1]
expected_feature_map_shape = (
test_utils.moblenet_v2_expected_feature_map_shape_with_conv_defs)
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, conv_defs=conv_defs)
def test_hyperparam_override(self):
hyperparams = self._build_conv_hyperparams()
model = mobilenet_v2.mobilenet_v2(
batchnorm_training=True,
conv_hyperparams=hyperparams,
weights=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=32,
include_top=False)
hyperparams.params()
bn_layer = model.get_layer(name='block_5_project_BN')
self.assertAllClose(bn_layer.momentum, 0.2)
self.assertAllClose(bn_layer.epsilon, 0.1)
def test_variable_count(self):
depth_multiplier = 1
variables = self._get_variables(depth_multiplier)
self.assertEqual(len(variables), 260)
if __name__ == '__main__':
tf.test.main()
|
|
import configparser
import itertools
import logging
import os
import pathlib
import re
from collections.abc import Mapping
from mopidy.config import keyring
from mopidy.config.schemas import ConfigSchema, MapConfigSchema
from mopidy.config.types import (
Boolean,
ConfigValue,
Deprecated,
DeprecatedValue,
Hostname,
Integer,
List,
LogColor,
LogLevel,
Path,
Port,
Secret,
String,
)
from mopidy.internal import path, versioning
__all__ = [
# TODO List everything that is reexported, not just the unused parts.
"ConfigValue",
"List",
]
logger = logging.getLogger(__name__)
_core_schema = ConfigSchema("core")
_core_schema["cache_dir"] = Path()
_core_schema["config_dir"] = Path()
_core_schema["data_dir"] = Path()
# MPD supports at most 10k tracks, some clients segfault when this is exceeded.
_core_schema["max_tracklist_length"] = Integer(minimum=1)
_core_schema["restore_state"] = Boolean(optional=True)
_logging_schema = ConfigSchema("logging")
_logging_schema["verbosity"] = Integer(minimum=-1, maximum=4)
_logging_schema["format"] = String()
_logging_schema["color"] = Boolean()
_logging_schema["console_format"] = Deprecated()
_logging_schema["debug_format"] = Deprecated()
_logging_schema["debug_file"] = Deprecated()
_logging_schema["config_file"] = Path(optional=True)
_loglevels_schema = MapConfigSchema("loglevels", LogLevel())
_logcolors_schema = MapConfigSchema("logcolors", LogColor())
_audio_schema = ConfigSchema("audio")
_audio_schema["mixer"] = String()
_audio_schema["mixer_track"] = Deprecated()
_audio_schema["mixer_volume"] = Integer(optional=True, minimum=0, maximum=100)
_audio_schema["output"] = String()
_audio_schema["visualizer"] = Deprecated()
_audio_schema["buffer_time"] = Integer(optional=True, minimum=1)
_proxy_schema = ConfigSchema("proxy")
_proxy_schema["scheme"] = String(
optional=True, choices=["http", "https", "socks4", "socks5"]
)
_proxy_schema["hostname"] = Hostname(optional=True)
_proxy_schema["port"] = Port(optional=True)
_proxy_schema["username"] = String(optional=True)
_proxy_schema["password"] = Secret(optional=True)
# NOTE: if multiple outputs ever comes something like LogLevelConfigSchema
# _outputs_schema = config.AudioOutputConfigSchema()
_schemas = [
_core_schema,
_logging_schema,
_loglevels_schema,
_logcolors_schema,
_audio_schema,
_proxy_schema,
]
_INITIAL_HELP = """
# For further information about options in this file see:
# https://docs.mopidy.com/
#
# The initial commented out values reflect the defaults as of:
# {versions}
#
# Available options and defaults might have changed since then,
# run `mopidy config` to see the current effective config and
# `mopidy --version` to check the current version.
"""
def read(config_file):
"""Helper to load config defaults in same way across core and extensions"""
return pathlib.Path(config_file).read_text(errors="surrogateescape")
def load(files, ext_schemas, ext_defaults, overrides):
config_dir = pathlib.Path(__file__).parent
defaults = [read(config_dir / "default.conf")]
defaults.extend(ext_defaults)
raw_config = _load(files, defaults, keyring.fetch() + (overrides or []))
schemas = _schemas[:]
schemas.extend(ext_schemas)
return _validate(raw_config, schemas)
def format(config, ext_schemas, comments=None, display=True):
schemas = _schemas[:]
schemas.extend(ext_schemas)
return _format(config, comments or {}, schemas, display, False)
def format_initial(extensions_data):
config_dir = pathlib.Path(__file__).parent
defaults = [read(config_dir / "default.conf")]
defaults.extend(d.extension.get_default_config() for d in extensions_data)
raw_config = _load([], defaults, [])
schemas = _schemas[:]
schemas.extend(d.extension.get_config_schema() for d in extensions_data)
config, errors = _validate(raw_config, schemas)
versions = [f"Mopidy {versioning.get_version()}"]
extensions_data = sorted(
extensions_data, key=lambda d: d.extension.dist_name
)
for data in extensions_data:
versions.append(f"{data.extension.dist_name} {data.extension.version}")
header = _INITIAL_HELP.strip().format(versions="\n# ".join(versions))
formatted_config = _format(
config=config, comments={}, schemas=schemas, display=False, disable=True
)
return header + "\n\n" + formatted_config
def _load(files, defaults, overrides):
parser = configparser.RawConfigParser()
# TODO: simply return path to config file for defaults so we can load it
# all in the same way?
logger.info("Loading config from builtin defaults")
for default in defaults:
if isinstance(default, bytes):
default = default.decode()
parser.read_string(default)
# Load config from a series of config files
for f in files:
f = path.expand_path(f)
if f.is_dir():
for g in f.iterdir():
if g.is_file() and g.suffix == ".conf":
_load_file(parser, g.resolve())
else:
_load_file(parser, f.resolve())
raw_config = {}
for section in parser.sections():
raw_config[section] = dict(parser.items(section))
logger.info("Loading config from command line options")
for section, key, value in overrides:
raw_config.setdefault(section, {})[key] = value
return raw_config
def _load_file(parser, file_path):
if not file_path.exists():
logger.debug(
f"Loading config from {file_path.as_uri()} failed; "
f"it does not exist"
)
return
if not os.access(str(file_path), os.R_OK):
logger.warning(
f"Loading config from file_path.as_uri() failed; "
f"read permission missing"
)
return
try:
logger.info(f"Loading config from {file_path.as_uri()}")
with file_path.open("r") as fh:
parser.read_file(fh)
except configparser.MissingSectionHeaderError:
logger.warning(
f"Loading config from {file_path.as_uri()} failed; "
f"it does not have a config section"
)
except configparser.ParsingError as e:
linenos = ", ".join(str(lineno) for lineno, line in e.errors)
logger.warning(
f"Config file {file_path.as_uri()} has errors; "
f"line {linenos} has been ignored"
)
except OSError:
# TODO: if this is the initial load of logging config we might not
# have a logger at this point, we might want to handle this better.
logger.debug(f"Config file {file_path.as_uri()} not found; skipping")
def _validate(raw_config, schemas):
# Get validated config
config = {}
errors = {}
sections = set(raw_config)
for schema in schemas:
sections.discard(schema.name)
values = raw_config.get(schema.name, {})
result, error = schema.deserialize(values)
if error:
errors[schema.name] = error
if result:
config[schema.name] = result
for section in sections:
logger.debug(f"Ignoring unknown config section: {section}")
return config, errors
def _format(config, comments, schemas, display, disable):
output = []
for schema in schemas:
serialized = schema.serialize(
config.get(schema.name, {}), display=display
)
if not serialized:
continue
output.append(f"[{schema.name}]")
for key, value in serialized.items():
if isinstance(value, DeprecatedValue):
continue
comment = comments.get(schema.name, {}).get(key, "")
output.append(f"{key} =")
if value is not None:
output[-1] += " " + value
if comment:
output[-1] += " ; " + comment.capitalize()
if disable:
output[-1] = re.sub(r"^", "#", output[-1], flags=re.M)
output.append("")
return "\n".join(output).strip()
def _preprocess(config_string):
"""Convert a raw config into a form that preserves comments etc."""
results = ["[__COMMENTS__]"]
counter = itertools.count(0)
section_re = re.compile(r"^(\[[^\]]+\])\s*(.+)$")
blank_line_re = re.compile(r"^\s*$")
comment_re = re.compile(r"^(#|;)")
inline_comment_re = re.compile(r" ;")
def newlines(match):
return f"__BLANK{next(counter):d}__ ="
def comments(match):
if match.group(1) == "#":
return f"__HASH{next(counter):d}__ ="
elif match.group(1) == ";":
return f"__SEMICOLON{next(counter):d}__ ="
def inlinecomments(match):
return f"\n__INLINE{next(counter):d}__ ="
def sections(match):
return (
f"{match.group(1)}\n__SECTION{next(counter):d}__ = {match.group(2)}"
)
for line in config_string.splitlines():
line = blank_line_re.sub(newlines, line)
line = section_re.sub(sections, line)
line = comment_re.sub(comments, line)
line = inline_comment_re.sub(inlinecomments, line)
results.append(line)
return "\n".join(results)
def _postprocess(config_string):
"""Converts a preprocessed config back to original form."""
flags = re.IGNORECASE | re.MULTILINE
result = re.sub(r"^\[__COMMENTS__\](\n|$)", "", config_string, flags=flags)
result = re.sub(r"\n__INLINE\d+__ =(.*)$", r" ;\g<1>", result, flags=flags)
result = re.sub(r"^__HASH\d+__ =(.*)$", r"#\g<1>", result, flags=flags)
result = re.sub(r"^__SEMICOLON\d+__ =(.*)$", r";\g<1>", result, flags=flags)
result = re.sub(r"\n__SECTION\d+__ =(.*)$", r"\g<1>", result, flags=flags)
result = re.sub(r"^__BLANK\d+__ =$", "", result, flags=flags)
return result
class Proxy(Mapping):
def __init__(self, data):
self._data = data
def __getitem__(self, key):
item = self._data.__getitem__(key)
if isinstance(item, dict):
return Proxy(item)
return item
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
def __repr__(self):
return f"Proxy({self._data!r})"
|
|
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "assumes floating point values as input, got uint8"
clean_warning_registry()
assert_warns_message(UserWarning, w, scale, X)
assert_warns_message(UserWarning, w, StandardScaler().fit, X)
assert_warns_message(UserWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
|
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Artom Lifshitz <artom.lifshitz@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import re
import os
import argparse
import logging
import dns.zone
from designate.i18n import _LI
from designate.i18n import _LE
logging.basicConfig()
LOG = logging.getLogger(__name__)
class Zone:
"""
Encapsulates a dnspython zone to provide easier printing and writing to
files
"""
def __init__(self, dnszone):
self._dnszone = dnszone
def to_stdout(self):
self.to_file(sys.stdout)
def to_file(self, f):
if type(f) is file:
fd = f
elif type(f) is str:
if os.path.isdir(f):
fd = open(os.path.join(f, self._dnszone.origin.to_text()), 'w')
else:
fd = open(f, 'w')
else:
raise ValueError('f must be a file name or file object')
fd.write('$ORIGIN %s\n' % self._dnszone.origin.to_text())
self._dnszone.to_file(fd, relativize=False)
fd.write('\n')
if fd is not sys.stdout:
fd.close()
class Extractor:
"""
Extracts all the zones configured in a named.conf, including included
files
"""
# The regexes we use to extract information from the config file
_include_regex = re.compile(
r"""
include \s* # The include keyword, possibly followed by
# whitespace
" # Open quote
(?P<file> [^"]+ ) # The included file (without quotes), as group 'file'
" # Close quote
\s* ; # Semicolon, possibly preceded by whitespace
""", re.MULTILINE | re.VERBOSE)
_zone_regex = re.compile(
r"""
zone \s* # The zone keyword, possibly followed by
# whitespace
" # Open quote
(?P<name> [^"]+ ) # The zone name (without quotes), as group 'name'
" # Close quote
\s* # Possible whitespace
{ # Open bracket
(?P<content> [^{}]+ ) # The contents of the zone block (without
# brackets) as group 'content'
} # Close bracket
\s* ; # Semicolon, possibly preceded by whitespace
""", re.MULTILINE | re.VERBOSE)
_type_master_regex = re.compile(
r"""
type \s+ # The type keyword, followed by some whitespace
master # The master keyword
\s* ; # Semicolon, possibly preceded by whitespace
""", re.MULTILINE | re.VERBOSE)
_zonefile_regex = re.compile(r"""
file \s* # The file keyword, possible followed by whitespace
" # Open quote
(?P<file> [^"]+ ) # The zonefile (without quotes), as group 'file'
" # Close quote
\s* ; # Semicolor, possible preceded by whitespace
""", re.MULTILINE | re.VERBOSE)
def __init__(self, conf_file):
self._conf_file = conf_file
self._conf = self._filter_comments(conf_file)
def _skip_until(self, f, stop):
skip = ''
while True:
skip += f.read(1)
if skip.endswith(stop):
break
def _filter_comments(self, conf_file):
"""
Reads the named.conf, skipping comments and returning the filtered
configuration
"""
f = open(conf_file)
conf = ''
while True:
c = f.read(1)
if c == '':
break
conf += c
# If we just appended a commenter:
if conf.endswith('#'):
self._skip_until(f, '\n')
# Strip the '#' we appended earlier
conf = conf[:-1]
elif conf.endswith('//'):
self._skip_until(f, '\n')
# Strip the '//' we appended earlier
conf = conf[:-2]
elif conf.endswith('/*'):
self._skip_until(f, '*/')
# Strip the '/*' we appended earlier
conf = conf[:-2]
f.close()
return conf
def extract(self):
zones = []
zones.extend(self._process_includes())
zones.extend(self._extract_zones())
return zones
def _process_includes(self):
zones = []
for include in self._include_regex.finditer(self._conf):
x = Extractor(include.group('file'))
zones.extend(x.extract())
return zones
def _extract_zones(self):
zones = []
for zone in self._zone_regex.finditer(self._conf):
content = zone.group('content')
name = zone.group('name')
# Make sure it's a master zone:
if self._type_master_regex.search(content):
zonefile = self._zonefile_regex.search(content).group('file')
try:
zone_object = dns.zone.from_file(zonefile,
allow_include=True)
except dns.zone.UnknownOrigin:
LOG.info(_LI('%(zonefile)s is missing $ORIGIN, '
'inserting %(name)s'),
{'zonefile': zonefile, 'name': name})
zone_object = dns.zone.from_file(zonefile,
allow_include=True,
origin=name)
except dns.zone.NoSOA:
LOG.error(_LE('%s has no SOA') % zonefile)
zones.append(Zone(zone_object))
return zones
def main():
parser = argparse.ArgumentParser(
description='Extract zonefiles from named.conf.')
parser.add_argument('named_conf', metavar='FILE', type=str, nargs=1,
help='the named.conf to parse')
parser.add_argument('-w', '--write', metavar='DIR', type=str,
help='Wwrite each extracted zonefile as its own file'
' in DIR')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output')
args = parser.parse_args()
if args.verbose:
LOG.setLevel(logging.INFO)
else:
LOG.setLevel(logging.WARNING)
try:
x = Extractor(args.named_conf[0])
for zone in x.extract():
if args.write is not None:
zone.to_file(args.write)
else:
zone.to_stdout()
except IOError as e:
LOG.error(e)
if __name__ == '__main__':
main()
|
|
from jinja2 import Template
from kvmagent import kvmagent
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import lock
from zstacklib.utils import log
from zstacklib.utils import shell
from zstacklib.utils import ebtables
from zstacklib.utils.bash import *
from prometheus_client.core import GaugeMetricFamily
import netaddr
logger = log.get_logger(__name__)
EBTABLES_CMD = ebtables.get_ebtables_cmd()
class AgentRsp(object):
def __init__(self):
self.success = True
self.error = None
def collect_vip_statistics():
def parse_eip_string(estr):
vnic_ip = ip = vip_uuid = None
ws = estr.split(',')
for w in ws:
if w.startswith('eip_addr'):
ip = w.split(':')[1]
elif w.startswith('vip'):
vip_uuid = w.split(':')[1]
elif w.startswith('vnic_ip'):
vnic_ip = w.split(':')[1]
return ip, vip_uuid, vnic_ip
def find_namespace_name_by_ip(ip):
ns_name_suffix = ip.replace('.', '_')
o = bash_o('ip netns')
for l in o.split('\n'):
if ('%s ' % ns_name_suffix) in l:
# l is like 'br_eth0_172_20_51_136 (id: 3)'
return l.split()[0]
return None
def create_metric(line, ip, vip_uuid, vnic_ip, metrics):
pairs = line.split()
pkts = pairs[0]
bs = pairs[1]
src = pairs[7]
dst = pairs[8]
# out traffic
if src.startswith(vnic_ip):
g = metrics['zstack_vip_out_bytes']
g.add_metric([vip_uuid], float(bs))
g = metrics['zstack_vip_out_packages']
g.add_metric([vip_uuid], float(pkts))
# in traffic
if dst.startswith(vnic_ip):
g = metrics['zstack_vip_in_bytes']
g.add_metric([vip_uuid], float(bs))
g = metrics['zstack_vip_in_packages']
g.add_metric([vip_uuid], float(pkts))
def collect(ip, vip_uuid, vnic_ip):
ns_name = find_namespace_name_by_ip(ip)
if not ns_name:
return []
CHAIN_NAME = "vip-perf"
VIP_LABEL_NAME = 'VipUUID'
o = bash_o("ip netns exec {{ns_name}} iptables -nvxL {{CHAIN_NAME}} | sed '1,2d'")
metrics = {
'zstack_vip_out_bytes': GaugeMetricFamily('zstack_vip_out_bytes', 'VIP outbound traffic in bytes', labels=[VIP_LABEL_NAME]),
'zstack_vip_out_packages': GaugeMetricFamily('zstack_vip_out_packages', 'VIP outbound traffic packages', labels=[VIP_LABEL_NAME]),
'zstack_vip_in_bytes': GaugeMetricFamily('zstack_vip_in_bytes', 'VIP inbound traffic in bytes', labels=[VIP_LABEL_NAME]),
'zstack_vip_in_packages': GaugeMetricFamily('zstack_vip_in_packages', 'VIP inbound traffic packages', labels=[VIP_LABEL_NAME])
}
for l in o.split('\n'):
l = l.strip(' \t\r\n')
if l:
create_metric(l, ip, vip_uuid, vnic_ip, metrics)
return metrics.values()
o = bash_o('ip -o -d link')
words = o.split()
eip_strings = [w for w in words if w.startswith('eip:')]
ret = []
eips = {}
for estr in eip_strings:
ip, vip_uuid, vnic_ip = parse_eip_string(estr)
if ip is None:
logger.warn("no ip field found in %s" % estr)
continue
if vip_uuid is None:
logger.warn("no vip field found in %s" % estr)
continue
if vnic_ip is None:
logger.warn("no vnic_ip field found in %s" % estr)
continue
eips[ip] = (vip_uuid, vnic_ip)
for ip, (vip_uuid, vnic_ip) in eips.items():
ret.extend(collect(ip, vip_uuid, vnic_ip))
return ret
kvmagent.register_prometheus_collector(collect_vip_statistics)
class DEip(kvmagent.KvmAgent):
APPLY_EIP_PATH = "/flatnetworkprovider/eip/apply"
DELETE_EIP_PATH = "/flatnetworkprovider/eip/delete"
BATCH_APPLY_EIP_PATH = "/flatnetworkprovider/eip/batchapply"
BATCH_DELETE_EIP_PATH = "/flatnetworkprovider/eip/batchdelete"
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.APPLY_EIP_PATH, self.apply_eip)
http_server.register_async_uri(self.BATCH_APPLY_EIP_PATH, self.apply_eips)
http_server.register_async_uri(self.DELETE_EIP_PATH, self.delete_eip)
http_server.register_async_uri(self.BATCH_DELETE_EIP_PATH, self.delete_eips)
def stop(self):
pass
@kvmagent.replyerror
def apply_eip(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
self._apply_eips([cmd.eip])
return jsonobject.dumps(AgentRsp())
@kvmagent.replyerror
def apply_eips(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
self._apply_eips(cmd.eips)
return jsonobject.dumps(AgentRsp())
@kvmagent.replyerror
def delete_eips(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
self._delete_eips(cmd.eips)
return jsonobject.dumps(AgentRsp())
@kvmagent.replyerror
def delete_eip(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
self._delete_eips([cmd.eip])
return jsonobject.dumps(AgentRsp())
@in_bash
@lock.file_lock('/run/xtables.lock')
def _delete_eip(self, eip):
dev_base_name = eip.nicName.replace('vnic', '', 1)
dev_base_name = dev_base_name.replace(".", "_")
NIC_NAME = eip.nicName
CHAIN_NAME = '%s-gw' % NIC_NAME
NS_NAME = "%s_%s" % (eip.publicBridgeName, eip.vip.replace(".", "_"))
EIP_UUID = eip.eipUuid[-9:]
PUB_ODEV = "%s_eo" % (EIP_UUID)
PRI_ODEV = "%s_o" % (EIP_UUID)
def delete_namespace():
if bash_r('ip netns | grep -w {{NS_NAME}} > /dev/null') == 0:
bash_errorout('ip netns delete {{NS_NAME}}')
def delete_outer_dev():
if bash_r('ip link | grep -w {{PUB_ODEV}} > /dev/null') == 0:
bash_r('ip link del {{PUB_ODEV}}')
def delete_arp_rules():
if bash_r(EBTABLES_CMD + ' -t nat -L {{CHAIN_NAME}} >/dev/null 2>&1') == 0:
RULE = "-i {{NIC_NAME}} -j {{CHAIN_NAME}}"
if bash_r(EBTABLES_CMD + ' -t nat -L PREROUTING | grep -- "{{RULE}}" > /dev/null') == 0:
bash_errorout(EBTABLES_CMD + ' -t nat -D PREROUTING {{RULE}}')
bash_errorout(EBTABLES_CMD + ' -t nat -F {{CHAIN_NAME}}')
bash_errorout(EBTABLES_CMD + ' -t nat -X {{CHAIN_NAME}}')
for BLOCK_DEV in [PRI_ODEV, PUB_ODEV]:
BLOCK_CHAIN_NAME = '{{BLOCK_DEV}}-arp'
if bash_r(EBTABLES_CMD + ' -t nat -L {{BLOCK_CHAIN_NAME}} > /dev/null 2>&1') == 0:
RULE = '-p ARP -o {{BLOCK_DEV}} -j {{BLOCK_CHAIN_NAME}}'
if bash_r(EBTABLES_CMD + ' -t nat -L POSTROUTING | grep -- "{{RULE}}" > /dev/null') == 0:
bash_errorout(EBTABLES_CMD + ' -t nat -D POSTROUTING {{RULE}}')
bash_errorout(EBTABLES_CMD + ' -t nat -F {{BLOCK_CHAIN_NAME}}')
bash_errorout(EBTABLES_CMD + ' -t nat -X {{BLOCK_CHAIN_NAME}}')
delete_namespace()
delete_outer_dev()
delete_arp_rules()
@lock.lock('eip')
def _delete_eips(self, eips):
for eip in eips:
self._delete_eip(eip)
@in_bash
@lock.file_lock('/run/xtables.lock')
def _apply_eip(self, eip):
dev_base_name = eip.nicName.replace('vnic', '', 1)
dev_base_name = dev_base_name.replace(".", "_")
PUB_BR = eip.publicBridgeName
EIP_UUID = eip.eipUuid[-9:]
OLD_PUB_ODEVS = ["%s_eo" % dev_base_name, "%s_eo_%s" % (dev_base_name, EIP_UUID)]
OLD_PUB_IDEVS = ["%s_ei" % dev_base_name, "%s_ei_%s" % (dev_base_name, EIP_UUID)]
OLD_PRI_ODEVS = ["%s_o" % dev_base_name, "%s_o_%s" % (dev_base_name, EIP_UUID)]
OLD_PRI_IDEVS = ["%s_i" % dev_base_name, "%s_i_%s" % (dev_base_name, EIP_UUID)]
PUB_ODEV = "%s_eo" % (EIP_UUID)
PUB_IDEV = "%s_ei" % (EIP_UUID)
PRI_ODEV = "%s_o" % (EIP_UUID)
PRI_IDEV = "%s_i" % (EIP_UUID)
PRI_BR= eip.vmBridgeName
VIP= eip.vip
VIP_NETMASK= eip.vipNetmask
VIP_GW= eip.vipGateway
NIC_NAME= eip.nicName
NIC_GATEWAY= eip.nicGateway
NIC_NETMASK= eip.nicNetmask
NIC_IP= eip.nicIp
NIC_MAC= eip.nicMac
NS_NAME= "%s_%s" % (eip.publicBridgeName, eip.vip.replace(".", "_"))
EBTABLE_CHAIN_NAME= eip.vmBridgeName
PRI_BR_PHY_DEV= eip.vmBridgeName.replace('br_', '', 1)
EIP_DESC = "eip:%s,eip_addr:%s,vnic:%s,vnic_ip:%s,vm:%s,vip:%s" % (eip.eipUuid, VIP, eip.nicName, NIC_IP, eip.vmUuid, eip.vipUuid)
NS = "ip netns exec {{NS_NAME}}"
# in case the namespace deleted and the orphan outer link leaves in the system,
# deleting the orphan link and recreate it
def delete_orphan_outer_dev(inner_dev, outer_dev):
if bash_r('ip netns exec {{NS_NAME}} ip link | grep -w {{inner_dev}} > /dev/null') != 0:
# ignore error
bash_r('ip link del {{outer_dev}} &> /dev/null')
def create_dev_if_needed(outer_dev, outer_dev_desc, inner_dev, inner_dev_desc):
if bash_r('ip link | grep -w {{outer_dev}} > /dev/null ') != 0:
bash_errorout('ip link add {{outer_dev}} type veth peer name {{inner_dev}}')
bash_errorout('ip link set {{outer_dev}} alias {{outer_dev_desc}}')
bash_errorout('ip link set {{inner_dev}} alias {{inner_dev_desc}}')
bash_errorout('ip link set {{outer_dev}} up')
def add_dev_to_br_if_needed(bridge, device):
if bash_r('brctl show {{bridge}} | grep -w {{device}} > /dev/null') != 0:
bash_errorout('brctl addif {{bridge}} {{device}}')
def add_dev_namespace_if_needed(device, namespace):
if bash_r('eval {{NS}} ip link | grep -w {{device}} > /dev/null') != 0:
bash_errorout('ip link set {{device}} netns {{namespace}}')
def set_ip_to_idev_if_needed(device, ip, netmask):
if bash_r('eval {{NS}} ip addr show {{device}} | grep -w {{ip}} > /dev/null') != 0:
bash_errorout('eval {{NS}} ip addr flush dev {{device}}')
bash_errorout('eval {{NS}} ip addr add {{ip}}/{{netmask}} dev {{device}}')
bash_errorout('eval {{NS}} ip link set {{device}} up')
def create_iptable_rule_if_needed(table, rule, at_head=False):
if bash_r('eval {{NS}} iptables-save | grep -- "{{rule}}" > /dev/null') != 0:
if at_head:
bash_errorout('eval {{NS}} iptables {{table}} -I {{rule}}')
else:
bash_errorout('eval {{NS}} iptables {{table}} -A {{rule}}')
def create_ebtable_rule_if_needed(table, chain, rule):
if bash_r(EBTABLES_CMD + ' -t {{table}} -L {{chain}} | grep -- "{{rule}}" > /dev/null') != 0:
bash_errorout(EBTABLES_CMD + ' -t {{table}} -A {{chain}} {{rule}}')
def set_eip_rules():
DNAT_NAME = "DNAT-{{VIP}}"
if bash_r('eval {{NS}} iptables-save | grep -w ":{{DNAT_NAME}}" > /dev/null') != 0:
bash_errorout('eval {{NS}} iptables -t nat -N {{DNAT_NAME}}')
create_iptable_rule_if_needed("-t nat", 'PREROUTING -d {{VIP}}/32 -j {{DNAT_NAME}}')
create_iptable_rule_if_needed("-t nat", '{{DNAT_NAME}} -j DNAT --to-destination {{NIC_IP}}')
FWD_NAME = "FWD-{{VIP}}"
if bash_r('eval {{NS}} iptables-save | grep -w ":{{FWD_NAME}}" > /dev/null') != 0:
bash_errorout('eval {{NS}} iptables -N {{FWD_NAME}}')
create_iptable_rule_if_needed("-t filter", "FORWARD ! -d {{NIC_IP}}/32 -i {{PUB_IDEV}} -j REJECT --reject-with icmp-port-unreachable")
create_iptable_rule_if_needed("-t filter", "FORWARD -i {{PRI_IDEV}} -o {{PUB_IDEV}} -j {{FWD_NAME}}")
create_iptable_rule_if_needed("-t filter", "FORWARD -i {{PUB_IDEV}} -o {{PRI_IDEV}} -j {{FWD_NAME}}")
create_iptable_rule_if_needed("-t filter", "{{FWD_NAME}} -j ACCEPT")
SNAT_NAME = "SNAT-{{VIP}}"
if bash_r('eval {{NS}} iptables-save | grep -w ":{{SNAT_NAME}}" > /dev/null ') != 0:
bash_errorout('eval {{NS}} iptables -t nat -N {{SNAT_NAME}}')
create_iptable_rule_if_needed("-t nat", "POSTROUTING -s {{NIC_IP}}/32 -j {{SNAT_NAME}}")
create_iptable_rule_if_needed("-t nat", "{{SNAT_NAME}} -j SNAT --to-source {{VIP}}")
def set_default_route_if_needed():
if bash_r('eval {{NS}} ip route | grep -w default > /dev/null') != 0:
bash_errorout('eval {{NS}} ip route add default via {{VIP_GW}}')
def set_gateway_arp_if_needed():
CHAIN_NAME = "{{NIC_NAME}}-gw"
if bash_r(EBTABLES_CMD + ' -t nat -L {{CHAIN_NAME}} > /dev/null 2>&1') != 0:
bash_errorout(EBTABLES_CMD + ' -t nat -N {{CHAIN_NAME}}')
create_ebtable_rule_if_needed('nat', 'PREROUTING', '-i {{NIC_NAME}} -j {{CHAIN_NAME}}')
GATEWAY = bash_o("eval {{NS}} ip link | grep -w {{PRI_IDEV}} -A 1 | awk '/link\/ether/{print $2}'")
if not GATEWAY:
raise Exception('cannot find the device[%s] in the namespace[%s]' % (PRI_IDEV, NS_NAME))
create_ebtable_rule_if_needed('nat', CHAIN_NAME, "-p ARP --arp-op Request --arp-ip-dst {{NIC_GATEWAY}} -j arpreply --arpreply-mac {{GATEWAY}}")
for BLOCK_DEV in [PRI_ODEV, PUB_ODEV]:
BLOCK_CHAIN_NAME = '{{BLOCK_DEV}}-arp'
if bash_r(EBTABLES_CMD + ' -t nat -L {{BLOCK_CHAIN_NAME}} > /dev/null 2>&1') != 0:
bash_errorout(EBTABLES_CMD + ' -t nat -N {{BLOCK_CHAIN_NAME}}')
create_ebtable_rule_if_needed('nat', 'POSTROUTING', "-p ARP -o {{BLOCK_DEV}} -j {{BLOCK_CHAIN_NAME}}")
create_ebtable_rule_if_needed('nat', BLOCK_CHAIN_NAME, "-p ARP -o {{BLOCK_DEV}} --arp-op Request --arp-ip-dst {{NIC_GATEWAY}} --arp-mac-src ! {{NIC_MAC}} -j DROP")
def create_perf_monitor():
o = bash_o("eval {{NS}} ip -o -f inet addr show | awk '/scope global/ {print $4}'")
cidr = None
vnic_ip = netaddr.IPAddress(NIC_IP)
for l in o.split('\n'):
l = l.strip(' \t\n\r')
if not l:
continue
nw = netaddr.IPNetwork(l)
if vnic_ip in nw:
cidr = nw.cidr
break
if not cidr:
raise Exception("cannot find CIDR of vnic ip[%s] in namespace %s" % (NIC_IP, NS_NAME))
CHAIN_NAME = "vip-perf"
bash_r("eval {{NS}} iptables -N {{CHAIN_NAME}} > /dev/null")
create_iptable_rule_if_needed("-t filter", "FORWARD -s {{NIC_IP}}/32 ! -d {{cidr}} -j {{CHAIN_NAME}}", True)
create_iptable_rule_if_needed("-t filter", "FORWARD ! -s {{cidr}} -d {{NIC_IP}}/32 -j {{CHAIN_NAME}}", True)
create_iptable_rule_if_needed("-t filter", "{{CHAIN_NAME}} -s {{NIC_IP}}/32 -j RETURN")
create_iptable_rule_if_needed("-t filter", "{{CHAIN_NAME}} -d {{NIC_IP}}/32 -j RETURN")
if bash_r('eval {{NS}} ip link show > /dev/null') != 0:
bash_errorout('ip netns add {{NS_NAME}}')
# To be compatibled with old version
for i in range(len(OLD_PUB_IDEVS)):
delete_orphan_outer_dev(OLD_PUB_IDEVS[i], OLD_PUB_ODEVS[i])
delete_orphan_outer_dev(OLD_PRI_IDEVS[i], OLD_PRI_ODEVS[i])
delete_orphan_outer_dev(PUB_IDEV, PUB_ODEV)
delete_orphan_outer_dev(PRI_IDEV, PRI_ODEV)
create_dev_if_needed(PUB_ODEV, EIP_DESC, PUB_IDEV, EIP_DESC)
create_dev_if_needed(PRI_ODEV, EIP_DESC, PRI_IDEV, EIP_DESC)
add_dev_to_br_if_needed(PUB_BR, PUB_ODEV)
add_dev_to_br_if_needed(PRI_BR, PRI_ODEV)
add_dev_namespace_if_needed(PUB_IDEV, NS_NAME)
add_dev_namespace_if_needed(PRI_IDEV, NS_NAME)
set_ip_to_idev_if_needed(PUB_IDEV, VIP, VIP_NETMASK)
set_ip_to_idev_if_needed(PRI_IDEV, NIC_GATEWAY, NIC_NETMASK)
# ping VIP gateway
bash_r('eval {{NS}} arping -q -A -w 2.5 -c 3 -I {{PUB_IDEV}} {{VIP}} > /dev/null')
set_gateway_arp_if_needed()
set_eip_rules()
set_default_route_if_needed()
create_perf_monitor()
@lock.lock('eip')
def _apply_eips(self, eips):
for eip in eips:
self._apply_eip(eip)
|
|
import json
import uuid
from django.core.urlresolvers import reverse
from models import CrowdModelSpecification
# Required implementation for a new crowd type
class CrowdInterface(object):
def __init__(self, crowd_name):
self.crowd_name = crowd_name
@staticmethod
def validate_configuration(configuration):
""" Validate crowd-specific configuration options.
`configuration` is a dictionary of crowd-specific options as specified
by the API. This method should verify that all required options are
included and valid, and return `True` if so and `False` otherwise
"""
# Dummy implementation, do no validation.
return True
@staticmethod
def create_task(configuration, content):
""" Do the necessary work to create a task on the crowd platform.
For example, create a HIT on AMT using the APIs. `configuration` is a
dictionary containing settings passed via the public API. `content`
is the actual content for this crowd task. This function must return a
unique identifier for the new task.
"""
# Dummy implementation, return a random string
return uuid.uuid4()
@staticmethod
def task_pre_save(task_object):
""" Process newly created task objects before they are saved to the DB.
`task_object` will be an UNSAVED object with the `task_model` class
according to this crowd's model specification. Its task_id field will be
set to the id returned by the `create_task` method. This method can
modify the unsaved object (e.g., set custom fields) before it is saved
to the database. This method SHOULD NOT save the object--it will be
saved later.
"""
# Dummy implementation, do nothing
pass
@staticmethod
def group_pre_save(group_object):
""" Process new task group objects before they are saved to the DB.
`group_object` will be an UNSAVED object with the `group_model` class
according to this crowd's model specification. Its group_id field will
be set to the id passed in via the external API. This method can
modify the unsaved object (e.g., set custom fields) before it is saved
to the database. This method SHOULD NOT save the object--it will be
saved later.
"""
# Dummy implementation, do nothing
pass
@staticmethod
def pay_worker_bonus(worker_object, task_object, bonus_amount, reason):
""" Pay an additional bonus to a worker.
`worker_object` is an instance of this crowd's worker model,
`task_object` is an instance of this crowd's task model, `bonus_amount`
is a float containing the amount of money to pay the worker in USD, and
`reason` is a string explaining why the bonus has been granted. This
method should attempt to pay the appropriate amount to the worker on the
remote platform, or do nothing if bonus payments aren't supported.
"""
# Dummy implementation, do nothing
pass
@staticmethod
def reject_task(task_object, worker_object, reason):
""" Reject work done by a worker instead of paying them.
`task_object` is an instance of this crowd's task model, `worker_object`
is an instance of this crowd's worker model, and `reason` is a string
explaining why the work has been rejected. This method should reject the
assignment of this worker to this task on the remote crowd platform, or
do nothing if task rejection isn't supported.
"""
# Dummy implementation, do nothing
pass
@staticmethod
def expire_tasks(task_objects):
""" Expire multiple tasks on the crowd platform.
Expiration means making the task no longer available for new workers to
accept. It does not necessarily imply deletion, though some crowd
platforms may choose to implement it that way.
`task_objects` is an iterable containing multiple instances of this
crowd's task_model. This method should not modify the objects
themselves, just handle the remote cleanup.
"""
# Dummy implementation, do nothing
pass
@staticmethod
def delete_tasks(task_objects):
""" Delete multiple tasks on the crowd platform.
`task_objects` is a queryset containing multiple instances of this
crowd's task_model. This method should not delete the objects
themselves, just handle the remote cleanup.
"""
# Dummy implementation, do nothing
pass
@staticmethod
def get_assignment_context(request):
""" Extract crowd context from a request for the interface.
`request` is a Django HttpRequest object created when the crowd platform
requests an assignment interface from this server. This method should
return a dictionary containing custom context needed to render templates
as well as the following fields:
* `task_id`: the task being requested.
* `is_accepted`: has the worker committed to working on the task, or is
it just a preview?
* `worker_id`: the worker working on the task (optional if `is_accepted`
is False).
Additionally, the keys 'content', 'group_context', and 'response_url'
are reserved.
"""
# Base implementation, look for the fields in the request dictionary.
request_data = request.GET if request.method == 'GET' else request.POST
return {'task_id': request_data.get('task_id', None),
'worker_id': request_data.get('worker_id', None),
'is_accepted': request_data.get('is_accepted', True)}
@staticmethod
def get_response_context(request):
""" Extract response data from a request.
`request` is a Django HttpRequest object created when the crowd
interface posts data from an assignment. This method should return a
dictionary containing custom context needed to store models as well as
the following fields:
* `task_id`: the task being requested.
* `worker_id`: the worker working on the task.
* `assignment_id`: a unique id for the assignment of worker to task.
* `answers`: the assignment responses in json form (task-type dependent)
"""
# Base implementation, look for the fields in the request dictionary
request_data = request.GET if request.method == 'GET' else request.POST
return {
'task_id': request_data.get('task_id', None),
'worker_id': request_data.get('worker_id', None),
'assignment_id': request_data.get('assignment_id', None),
'answers': request_data.get('answers', None)
}
@staticmethod
def worker_pre_save(worker_object):
""" Process new worker objects before they are saved to the DB.
`worker_object` will be an UNSAVED object of the `worker_model` class
according to this crowd's model specification. Its worker_id field will
be set according to the context provided by `get_assignment_context`.
This method can modify the unsaved object (e.g., set custom fields)
before it is saved to the database. This method SHOULD NOT save the
object--it will be saved later.
"""
# Dummy implementation, do nothing
pass
@staticmethod
def response_pre_save(assignment_object):
""" Process new responses before they are saved to the DB.
`assignment_object` will be an UNSAVED object of the `assignment_model`
class according to this crowd's model specification. Its worker,
task, content, and assignment_id fields will be set according to the
context provided by `get_response_context`. This method can modify the
unsaved object (e.g., set custom fields) before it is saved to the
database. This method SHOULD NOT save the object--it will be saved
later.
"""
# Dummy implementation, do nothing
pass
def get_frontend_submit_url(self, crowd_config):
""" Returns a url path to redirect to after a worker submits a task."""
# Dummy implementation, just refresh the page on submit.
return ''
def get_assignment_url(self):
""" Return a url path to the view which will produce the task interface.
Subclasses shouldn't need to override this.
"""
return reverse('basecrowd:get_assignment', args=[self.crowd_name])
def get_backend_submit_url(self):
""" Return a url path to the view which will handle crowd responses.
Subclasses shouldn't need to override this.
"""
return reverse('basecrowd:post_response', args=[self.crowd_name])
###############################################################################
# Internal methods that don't need to be overwritten or called by subclasses. #
###############################################################################
# Validate context dictionaries
@staticmethod
def require_context(context_dictionary, required_keys, exc):
if any([context_dictionary.get(k) is None for k in required_keys]):
raise exc
# Validate the API create request.
def validate_create_request(self, request_json):
try:
json_dict = json.loads(request_json)
except ValueError: # data was invalid JSON
return False
try:
# require top-level fields
self.require_context(
json_dict,
['configuration', 'group_id', 'group_context', 'content'],
ValueError())
# require configuration options
configuration = json_dict['configuration']
self.require_context(
configuration,
['task_type', 'task_batch_size', 'num_assignments',
'callback_url'],
ValueError())
# require retainer pool sub-options, if present
if 'retainer_pool' in configuration:
retainer_config = configuration['retainer_pool']
if retainer_config.get('create_pool', True):
self.require_context(
retainer_config,
['pool_size',
'min_tasks_per_worker',
'waiting_rate',
'task_rate',
'list_rate'],
ValueError())
else:
self.require_context(
retainer_config,
['pool_id'],
ValueError())
except ValueError:
return False
# Require at least one record for crowd processing.
content = json_dict['content']
point_identifiers = content.keys()
if len(point_identifiers) == 0:
return False
# Do crowd-specific validation
crowd_config = configuration.get(self.crowd_name, {})
return self.validate_configuration(crowd_config)
class CrowdRegistry(object):
registered_crowds = {}
# Register a new crowd with an interface.
@classmethod
def register_crowd(cls, interface, **model_classes):
name = interface.crowd_name
if name in cls.registered_crowds:
raise ValueError("Crowd already registered: " + name)
model_spec = CrowdModelSpecification(name, **model_classes)
model_spec.add_model_rels()
cls.registered_crowds[name] = (interface, model_spec)
# Look up the model specification for a crowd.
@classmethod
def get_registry_entry(cls, crowd_name):
interface, model_spec = cls.registered_crowds.get(crowd_name,
(None, None))
if not interface and not model_spec:
raise ValueError("Invalid crowd name: " + crowd_name)
return interface, model_spec
# Get the entire registry
@classmethod
def get_registry(cls):
return cls.registered_crowds
|
|
# Copyright 2013 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to the inspyred heuristic optimization framework
Wraps the GLPK solver by subclassing and extending :class:`Model`,
:class:`Variable`, and :class:`Constraint` from :mod:`interface`.
"""
import logging
import random
import types
log = logging.getLogger(__name__)
import sympy
import interface
class Variable(interface.Variable):
def __init__(self, *args, **kwargs):
super(Variable, self).__init__(*args, **kwargs)
class Objective(interface.Objective):
"""docstring for Objective"""
def __init__(self, expression, *args, **kwargs):
super(Objective, self).__init__(expression, *args, **kwargs)
@property
def value(self):
return self._value
def __str__(self):
if isinstance(self.expression, sympy.Basic):
return super(Objective, self).__str__()
else:
return self.expression.__str__()
# return ' '.join((self.direction, str(self.expression)))
@property
def expression(self):
return self._expression
@expression.setter
def expression(self, value):
self._expression = value
class VariableBounder(object):
"""This class defines a inspyred like Bounder.__init__.py
TODO: Make this work also for integer and binary type variables?
"""
def __init__(self, model):
self.model = model
def __call__(self, candidate, args):
variables = self.model.variables
bounded_candidate = list()
for c, variable in zip(candidate, variables):
if variable.type == 'continuous':
bounded_candidate.append(max(min(c, variable.ub), variable.lb))
elif variable.type == 'integer':
bounded_candidate.append(min(range(variable.lb, variable.ub + 1), key=lambda x: abs(x - c)))
elif variable.type == 'binary':
# print min([0, 1], key=lambda x: abs(x-c))
bounded_candidate.append(min([0, 1], key=lambda x: abs(x - c)))
return bounded_candidate
class Configuration(interface.EvolutionaryOptimizationConfiguration):
"""docstring for Configuration"""
class SubConfiguration(object):
pass
def __init__(self, *args, **kwargs):
super(Configuration, self).__init__(*args, **kwargs)
self._algorithm = inspyred.ec.GA
self._algorithm.terminator = [inspyred.ec.terminators.time_termination,
inspyred.ec.terminators.generation_termination,
inspyred.ec.terminators.evaluation_termination,
inspyred.ec.terminators.diversity_termination,
inspyred.ec.terminators.average_fitness_termination]
self.pop_size = 100
self.seeds = []
self.max_generations = 1
self.max_evaluations = None
self.max_time = None
self.selector_config = self.SubConfiguration()
self.selector_config.num_selected = None
self.selector_config.tournament_size = 2
self.selector_config.num_elites = 0
self.variator_config = self.SubConfiguration()
self.variator_config.mutation_rate = .1
self.variator_config.crossover_rate = 1.
self.variator_config.num_crossover_points = 1
self.topology_config = self.SubConfiguration()
self.topology_config.neighborhood_size = 5
self.swarm_config = self.SubConfiguration()
self.swarm_config.inertia = 0.5
self.swarm_config.cognitive_rate = 2.1
self.swarm_config.social_rate = 2.1
@property
def selector(self):
return self._algorithm.selector
@selector.setter
def selector(self, value):
self.algorithm.selector = value
@property
def variator(self):
return self._algorithm.variator
@variator.setter
def variator(self, value):
self._algorithm.variator = value
@property
def replacer(self):
return self._algorithm.replacer
@replacer.setter
def replacer(self, value):
self._algorithm.replacer = value
@property
def migrator(self):
return self._algorithm.migrator
@migrator.setter
def migrator(self, value):
self._algorithm.migrator = value
@property
def archiver(self):
return self._algorithm.archiver
@archiver.setter
def archiver(self, value):
self._algorithm.archiver = value
@property
def observer(self):
return self._algorithm.observer
@observer.setter
def observer(self, value):
self._algorithm.observer = value
@property
def terminator(self):
return self._algorithm.terminator
@terminator.setter
def terminator(self, value):
self._algorithm.terminator = value
@property
def topology(self):
return self._algorithm.topology
@topology.setter
def topology(self, value):
if value == 'Ring':
self._algorithm.topology = inspyred.swarm.topologies.ring_topology
elif value == 'Star':
self._algorithm.topology = inspyred.swarm.topologies.star_topology
elif isinstance(value, types.FunctionType):
self._algorithm.topology = value
else:
raise ValueError("%s is not a supported topology. Try 'Star' or 'Ring' instead.")
@property
def algorithm(self):
return self._algorithm
@algorithm.setter
def algorithm(self, value):
init = False
try:
previous_selector = self._algorithm.selector
previous_variator = self._algorithm.variator
previous_replacer = self._algorithm.replacer
# previous_migrator = self._algorithm.migrator
# previous_archiver = self._algorithm.archiver
# previous_observer = self._algorithm.observer
# previous_terminator = self._algorithm.terminator
except AttributeError:
init = True
if value == "EvolutionaryComputation":
self._algorithm = inspyred.ec.EvolutionaryComputation
elif value == "GeneticAlgorithm" or value == "GA":
self._algorithm = inspyred.ec.GA(random)
elif value == "ParticleSwarmOptimization" or value == "PSO":
self._algorithm = inspyred.swarm.PSO(random)
elif value == "AntColonySystem" or value == "ACS":
self._algorithm = inspyred.swarm.ACS(random)
elif value == "EvolutionaryStrategy" or value == "ES":
self._algorithm = inspyred.ec.ES(random)
elif value == "DifferentialEvolutionaryAlgorithm" or value == "DEA":
self._algorithm = inspyred.ec.DEA(random)
elif value == "SimulatedAnnealing" or value == "SA":
self._algorithm = inspyred.ec.SA(random)
elif value == "NSGA2":
self._algorithm = inspyred.emo.NSGA2(random)
elif value == "PAES":
self._algorithm = inspyred.emo.PAES(random)
elif value == "Pareto":
self._algorithm = inspyred.emo.Pareto(random)
else:
raise ValueError(
"%s is not a supported. Try one of the following instead:"
"'GeneticAlgorithm', 'ParticleSwarmOptimization', 'EvolutionaryStrategy'."
"TODO: be more specific here")
# self._algorithm.terminator = self._default_terminator
if init is False:
self._algorithm.selector = previous_selector
self._algorithm.variator = previous_variator
self._algorithm.replacer = previous_replacer
# previous_migrator = self._algorithm.migrator
# previous_archiver = self._algorithm.archiver
# previous_observer = self._algorithm.observer
# previous_terminator = self._algorithm.terminator
# TODO: setting a new algorithm should recycle old variators, selectors etc.
def _evolve_kwargs(self):
"""Filter None keyword arguments. Intended to be passed on to algorithm.evolve(...)"""
valid_evolve_kwargs = (
'max_generations', 'max_evaluations', 'pop_size', 'neighborhood_size', 'tournament_size', 'mutation_rate')
filtered_evolve_kwargs = dict()
for key in valid_evolve_kwargs:
attr_value = getattr(self, key)
if attr_value is not None:
filtered_evolve_kwargs[key] = attr_value
# return filtered_evolve_kwargs
return {}
class Model(interface.Model):
"""Interface"""
def __init__(self, algorithm=None, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self.configuration = Configuration()
if algorithm is None:
self.configuration.algorithm = "GA"
else:
self.configuration.algorithm = algorithm
self._bounder = VariableBounder(self)
self._generator = self._generator
def _generator(self, random, args):
individual = list()
for variable in self.variables:
if variable.type == 'continuous':
individual.append(random.uniform(variable.lb, variable.ub))
else:
individual.append(random.choice(range(variable.lb, variable.ub + 1)))
return individual
def _evaluator(self, candidates, args):
fitness = list()
for candidate in candidates:
substitution_dict = dict(zip(self.variables, candidate))
if isinstance(self.objective.expression, sympy.Basic):
fitness.append(self.objective.expression.subs(substitution_dict))
else:
fitness.append(self.objective.expression(substitution_dict))
return fitness
# @inspyred.ec.evaluators.evaluator
# def _evaluate(self, candidate, args):
# substitution_dict = dict(zip(self.variables, candidate))
# try:
# fitness = self.objective.expression.subs(substitution_dict)
# except AttributeError:
# fitness = self.objective.expression(substitution_dict)
# return fitness
def optimize(self, *args, **kwargs):
# import pdb; pdb.set_trace();
final_population = self.configuration.algorithm.evolve(
generator=self._generator,
evaluator=self._evaluator,
bounder=self._bounder,
pop_size=self.configuration.pop_size,
maximize={'max': True, 'min': False}[self.objective.direction],
max_generations=self.configuration.max_generations,
max_evaluations=self.configuration.max_evaluations,
neighborhood_size=self.configuration.topology_config.neighborhood_size,
mutation_rate=self.configuration.variator_config.mutation_rate,
tournament_size=self.configuration.selector_config.tournament_size
)
return final_population
if __name__ == '__main__':
# from optlang.interface import Objective, Variable
import numpy
import inspyred
x = Variable('x', lb=0, ub=2)
y = Variable('y', lb=0, ub=2)
rosenbrock_obj = Objective((1 - x) ** 2 + 100 * (y - x ** 2) ** 2, name="Rosenbrock function", direction='min')
print("The rosenbrock function:", rosenbrock_obj)
print("The global minimum at (x,y) = (1,1) is", rosenbrock_obj.expression.subs({x: 1, y: 1}))
problem = Model(name='rosenbrock', algorithm='PSO')
# problem = Model(name='rosenbrock')
problem.objective = rosenbrock_obj
def my_observer(population, num_generations, num_evaluations, args):
best = max(population)
print(('{0:6} -- {1} : {2}'.format(num_generations,
best.fitness,
str(best.candidate))))
problem.configuration.max_generations = 100
problem.configuration.terminator = inspyred.ec.terminators.generation_termination
problem.configuration.observer = my_observer
problem.configuration.selector = inspyred.ec.selectors.tournament_selection
final_pop = problem.optimize()
fitnesses = [individual.fitness for individual in final_pop]
print(fitnesses)
print("mean", numpy.mean(fitnesses))
print("max", numpy.max(fitnesses))
print("min", numpy.min(fitnesses))
# print numpy.std(fitnesses)
|
|
import asyncio
from contextlib import AsyncExitStack, ExitStack
import random
import string
import aiobotocore.session
from aiobotocore.config import AioConfig
from unittest.mock import patch
from itertools import chain
import tempfile
import os
# Third Party
import pytest
import aiohttp
host = '127.0.0.1'
_PYCHARM_HOSTED = os.environ.get('PYCHARM_HOSTED') == '1'
@pytest.fixture(scope="session", params=[True, False],
ids=['debug[true]', 'debug[false]'])
def debug(request):
return request.param
def random_bucketname():
# 63 is the max bucket length.
return random_name()
def random_tablename():
return random_name()
def random_name():
"""Return a string with presumably unique contents
The string contains only symbols allowed for s3 buckets
(alphanumeric, dot and hyphen).
"""
return ''.join(random.sample(string.ascii_lowercase, k=26))
def assert_status_code(response, status_code):
assert response['ResponseMetadata']['HTTPStatusCode'] == status_code
async def assert_num_uploads_found(
s3_client, bucket_name, operation, num_uploads, *, max_items=None,
num_attempts=5):
paginator = s3_client.get_paginator(operation)
for _ in range(num_attempts):
pages = paginator.paginate(Bucket=bucket_name,
PaginationConfig={'MaxItems': max_items})
responses = []
async for page in pages:
responses.append(page)
# It sometimes takes a while for all the uploads to show up,
# especially if the upload was just created. If we don't
# see the expected amount, we retry up to num_attempts time
# before failing.
amount_seen = len(responses[0]['Uploads'])
if amount_seen == num_uploads:
# Test passed.
return
else:
# Sleep and try again.
await asyncio.sleep(2)
pytest.fail("Expected to see %s uploads, instead saw: %s" % (
num_uploads, amount_seen))
@pytest.fixture
def aa_fail_proxy_config(monkeypatch):
# NOTE: name of this fixture must be alphabetically first to run first
monkeypatch.setenv('HTTP_PROXY', f'http://{host}:54321')
monkeypatch.setenv('HTTPS_PROXY', f'http://{host}:54321')
@pytest.fixture
def aa_succeed_proxy_config(monkeypatch):
# NOTE: name of this fixture must be alphabetically first to run first
monkeypatch.setenv('HTTP_PROXY', f'http://{host}:54321')
monkeypatch.setenv('HTTPS_PROXY', f'http://{host}:54321')
# this will cause us to skip proxying
monkeypatch.setenv('NO_PROXY', 'amazonaws.com')
@pytest.fixture
def session():
session = aiobotocore.session.AioSession()
return session
@pytest.fixture
def region():
return 'us-east-1'
@pytest.fixture
def alternative_region():
return 'us-west-2'
@pytest.fixture
def signature_version():
return 's3'
@pytest.fixture
def s3_verify():
return None
@pytest.fixture
def config(request, region, signature_version):
config_kwargs = request.node.get_closest_marker("config_kwargs") or {}
if config_kwargs:
assert not config_kwargs.kwargs, config_kwargs
assert len(config_kwargs.args) == 1
config_kwargs = config_kwargs.args[0]
connect_timeout = read_timout = 5
if _PYCHARM_HOSTED:
connect_timeout = read_timout = 180
return AioConfig(region_name=region, signature_version=signature_version,
read_timeout=read_timout, connect_timeout=connect_timeout,
**config_kwargs)
@pytest.fixture
def mocking_test():
# change this flag for test with real aws
# TODO: this should be merged with pytest.mark.moto
return True
def moto_config(endpoint_url):
kw = dict(endpoint_url=endpoint_url,
aws_secret_access_key="xxx",
aws_access_key_id="xxx")
return kw
@pytest.fixture
def patch_attributes(request):
"""Call unittest.mock.patch on arguments passed through a pytest mark.
This fixture looks at the @pytest.mark.patch_attributes mark. This mark is a list
of arguments to be passed to unittest.mock.patch (see example below). This fixture
returns the list of mock objects, one per element in the input list.
Why do we need this? In some cases, we want to perform the patching before other
fixtures are run. For instance, the `s3_client` fixture creates an aiobotocore
client. During the client creation process, some event listeners are registered.
When we want to patch the target of these event listeners, we must do so before
the `s3_client` fixture is executed. Otherwise, the aiobotocore client will store
references to the unpatched targets.
In such situations, make sure that subsequent fixtures explicitly depends on
`patch_attribute` to enforce the ordering between fixtures.
Example:
@pytest.mark.patch_attributes([
dict(
target="aiobotocore.retries.adaptive.AsyncClientRateLimiter.on_sending_request",
side_effect=aiobotocore.retries.adaptive.AsyncClientRateLimiter.on_sending_request,
autospec=True
)
])
async def test_client_rate_limiter_called(s3_client, patch_attributes):
await s3_client.get_object(Bucket="bucket", Key="key")
# Just for illustration (this test doesn't pass).
# mock_attributes is a list of 1 element, since we passed a list of 1 element
# to the patch_attributes marker.
mock_attributes[0].assert_called_once()
"""
marker = request.node.get_closest_marker("patch_attributes")
if marker is None:
yield
else:
with ExitStack() as stack:
yield [stack.enter_context(patch(**kwargs)) for kwargs in marker.args[0]]
@pytest.fixture
async def s3_client(session, region, config, s3_server, mocking_test, s3_verify,
patch_attributes):
# This depends on mock_attributes because we may want to test event listeners.
# See the documentation of `mock_attributes` for details.
kw = moto_config(s3_server) if mocking_test else {}
async with session.create_client('s3', region_name=region,
config=config, verify=s3_verify, **kw) as client:
yield client
@pytest.fixture
async def alternative_s3_client(session, alternative_region, signature_version,
s3_server, mocking_test):
kw = moto_config(s3_server) if mocking_test else {}
config = AioConfig(
region_name=alternative_region, signature_version=signature_version,
read_timeout=5, connect_timeout=5)
async with session.create_client('s3', region_name=alternative_region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def dynamodb_client(session, region, config, dynamodb2_server,
mocking_test):
kw = moto_config(dynamodb2_server) if mocking_test else {}
async with session.create_client('dynamodb', region_name=region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def cloudformation_client(session, region, config, cloudformation_server,
mocking_test):
kw = moto_config(cloudformation_server) if mocking_test else {}
async with session.create_client('cloudformation', region_name=region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def sns_client(session, region, config, sns_server, mocking_test):
kw = moto_config(sns_server) if mocking_test else {}
async with session.create_client('sns', region_name=region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def sqs_client(session, region, config, sqs_server, mocking_test):
kw = moto_config(sqs_server) if mocking_test else {}
async with session.create_client('sqs', region_name=region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def batch_client(session, region, config, batch_server, mocking_test):
kw = moto_config(batch_server) if mocking_test else {}
async with session.create_client('batch', region_name=region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def lambda_client(session, region, config, lambda_server, mocking_test):
kw = moto_config(lambda_server) if mocking_test else {}
async with session.create_client('lambda', region_name=region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def iam_client(session, region, config, iam_server, mocking_test):
kw = moto_config(iam_server) if mocking_test else {}
async with session.create_client('iam', region_name=region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def rds_client(session, region, config, rds_server, mocking_test):
kw = moto_config(rds_server) if mocking_test else {}
async with session.create_client('rds', region_name=region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def ec2_client(session, region, config, ec2_server, mocking_test):
kw = moto_config(ec2_server) if mocking_test else {}
async with session.create_client('ec2', region_name=region,
config=config, **kw) as client:
yield client
@pytest.fixture
async def kinesis_client(session, region, config, kinesis_server, mocking_test):
kw = moto_config(kinesis_server) if mocking_test else {}
async with session.create_client('kinesis', region_name=region,
config=config, **kw) as client:
yield client
async def recursive_delete(s3_client, bucket_name):
# Recursively deletes a bucket and all of its contents.
paginator = s3_client.get_paginator('list_object_versions')
async for n in paginator.paginate(
Bucket=bucket_name, Prefix=''):
for obj in chain(
n.get('Versions', []),
n.get('DeleteMarkers', []),
n.get('Contents', []),
n.get('CommonPrefixes', [])):
kwargs = dict(Bucket=bucket_name, Key=obj['Key'])
if 'VersionId' in obj:
kwargs['VersionId'] = obj['VersionId']
resp = await s3_client.delete_object(**kwargs)
assert_status_code(resp, 204)
resp = await s3_client.delete_bucket(Bucket=bucket_name)
assert_status_code(resp, 204)
@pytest.fixture
async def bucket_name(region, create_bucket):
name = await create_bucket(region)
yield name
@pytest.fixture
async def table_name(create_table):
name = await create_table()
yield name
@pytest.fixture
async def create_bucket(s3_client):
_bucket_name = None
async def _f(region_name, bucket_name=None):
nonlocal _bucket_name
if bucket_name is None:
bucket_name = random_bucketname()
_bucket_name = bucket_name
bucket_kwargs = {'Bucket': bucket_name}
if region_name != 'us-east-1':
bucket_kwargs['CreateBucketConfiguration'] = {
'LocationConstraint': region_name,
}
response = await s3_client.create_bucket(**bucket_kwargs)
assert_status_code(response, 200)
await s3_client.put_bucket_versioning(
Bucket=bucket_name, VersioningConfiguration={'Status': 'Enabled'})
return bucket_name
try:
yield _f
finally:
await recursive_delete(s3_client, _bucket_name)
@pytest.fixture
async def create_table(dynamodb_client):
_table_name = None
async def _is_table_ready(table_name):
response = await dynamodb_client.describe_table(
TableName=table_name
)
return response['Table']['TableStatus'] == 'ACTIVE'
async def _f(table_name=None):
nonlocal _table_name
if table_name is None:
table_name = random_tablename()
_table_name = table_name
table_kwargs = {
'TableName': table_name,
'AttributeDefinitions': [
{
'AttributeName': 'testKey',
'AttributeType': 'S'
},
],
'KeySchema': [
{
'AttributeName': 'testKey',
'KeyType': 'HASH'
},
],
'ProvisionedThroughput': {
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
},
}
response = await dynamodb_client.create_table(**table_kwargs)
while not (await _is_table_ready(table_name)):
pass
assert_status_code(response, 200)
return table_name
try:
yield _f
finally:
await delete_table(dynamodb_client, _table_name)
async def delete_table(dynamodb_client, table_name):
response = await dynamodb_client.delete_table(
TableName=table_name
)
assert_status_code(response, 200)
@pytest.fixture
def tempdir():
with tempfile.TemporaryDirectory() as td:
yield td
@pytest.fixture
def create_object(s3_client, bucket_name):
async def _f(key_name, body='foo'):
r = await s3_client.put_object(Bucket=bucket_name, Key=key_name,
Body=body)
assert_status_code(r, 200)
return r
return _f
@pytest.fixture
def create_multipart_upload(request, s3_client, bucket_name, event_loop):
_key_name = None
upload_id = None
async def _f(key_name):
nonlocal _key_name
nonlocal upload_id
_key_name = key_name
parsed = await s3_client.create_multipart_upload(
Bucket=bucket_name, Key=key_name)
upload_id = parsed['UploadId']
return upload_id
def fin():
event_loop.run_until_complete(s3_client.abort_multipart_upload(
UploadId=upload_id, Bucket=bucket_name, Key=_key_name))
request.addfinalizer(fin)
return _f
@pytest.fixture
async def aio_session():
async with aiohttp.ClientSession() as session:
yield session
def pytest_configure():
class AIOUtils:
def __init__(self):
self.assert_status_code = assert_status_code
self.assert_num_uploads_found = assert_num_uploads_found
pytest.aio = AIOUtils()
@pytest.fixture
def dynamodb_put_item(dynamodb_client, table_name):
async def _f(key_string_value):
response = await dynamodb_client.put_item(
TableName=table_name,
Item={
'testKey': {
'S': key_string_value
}
},
)
assert_status_code(response, 200)
return _f
@pytest.fixture
def topic_arn(region, create_topic, sns_client, event_loop):
arn = event_loop.run_until_complete(create_topic())
return arn
async def delete_topic(sns_client, topic_arn):
response = await sns_client.delete_topic(
TopicArn=topic_arn
)
assert_status_code(response, 200)
@pytest.fixture
def create_topic(request, sns_client, event_loop):
_topic_arn = None
async def _f():
nonlocal _topic_arn
response = await sns_client.create_topic(Name=random_name())
_topic_arn = response['TopicArn']
assert_status_code(response, 200)
return _topic_arn
def fin():
event_loop.run_until_complete(delete_topic(sns_client, _topic_arn))
request.addfinalizer(fin)
return _f
@pytest.fixture
async def sqs_queue_url(sqs_client):
response = await sqs_client.create_queue(QueueName=random_name())
queue_url = response['QueueUrl']
assert_status_code(response, 200)
try:
yield queue_url
finally:
response = await sqs_client.delete_queue(
QueueUrl=queue_url
)
assert_status_code(response, 200)
@pytest.fixture
async def exit_stack():
async with AsyncExitStack() as es:
yield es
pytest_plugins = ['mock_server']
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from Crypto.Util import asn1
import fixtures
import mock
from OpenSSL import crypto
from oslo_config import fixture as oslo_fixture
import barbican.plugin.interface.certificate_manager as cm
from barbican.plugin import snakeoil_ca
from barbican.tests import certificate_utils
from barbican.tests import utils
class BaseTestCase(utils.BaseTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.conf = self.useFixture(oslo_fixture.Config(
conf=snakeoil_ca.CONF)).conf
self.tmp_dir = self.useFixture(fixtures.TempDir()).path
def tearDown(self):
super(BaseTestCase, self).tearDown()
class CaTestCase(BaseTestCase):
def test_gen_cacert_no_file_storage(self):
ca = snakeoil_ca.SnakeoilCA(cert_path=None, key_path=None,
key_size=512, subject_st='Test ST',
subject_l='Test L', subject_o='Test O',
subject_cn='Test CN')
subject = ca.cert.get_subject()
self.assertNotEqual(ca.key, None)
self.assertEqual("Test ST", subject.ST)
self.assertEqual("Test L", subject.L)
self.assertEqual("Test O", subject.O)
self.assertEqual("Test CN", subject.CN)
def test_gen_cacert_with_file_storage(self):
cert_path = self.tmp_dir + 'cert.pem'
key_path = self.tmp_dir + 'key.pem'
ca = snakeoil_ca.SnakeoilCA(cert_path=cert_path, key_path=key_path,
key_size=512, subject_st='Test ST',
subject_l='Test L', subject_o='Test O',
subject_cn='Test CN')
subject = ca.cert.get_subject()
self.assertNotEqual(ca.key, None)
self.assertEqual("Test ST", subject.ST)
self.assertEqual("Test L", subject.L)
self.assertEqual("Test O", subject.O)
self.assertEqual("Test CN", subject.CN)
# Make sure we preserve existing keypairs
ca = snakeoil_ca.SnakeoilCA(cert_path=cert_path, key_path=key_path)
subject = ca.cert.get_subject()
self.assertEqual("Test ST", subject.ST)
self.assertEqual("Test L", subject.L)
self.assertEqual("Test O", subject.O)
self.assertEqual("Test CN", subject.CN)
class CertManagerTestCase(BaseTestCase):
def setUp(self):
super(CertManagerTestCase, self).setUp()
self.ca = snakeoil_ca.SnakeoilCA(cert_path=None, key_path=None,
key_size=512, subject_st='Test ST',
subject_l='Test L',
subject_o='Test O',
subject_cn='Test CN')
def verify_sig(self, encoded_cert):
der = asn1.DerSequence()
der.decode(encoded_cert)
der_sig = asn1.DerObject()
der_sig.decode(der[2])
sig = der_sig.payload
self.assertIs('\x00', sig[0])
crypto.verify(self.ca.cert, sig[1:], der[0], 'sha256')
def test_gen_cert_no_file_storage(self):
req = certificate_utils.get_valid_csr_object()
cm = snakeoil_ca.CertManager(self.ca)
cert = cm.make_certificate(req)
first_serial = cert.get_serial_number()
cert_enc = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
self.verify_sig(cert_enc)
cert = cm.make_certificate(req)
self.assertNotEqual(first_serial, cert.get_serial_number())
self.verify_sig(cert_enc)
cm = snakeoil_ca.CertManager(self.ca)
cert = cm.make_certificate(req)
def test_gen_cert_with_file_storage(self):
req = certificate_utils.get_valid_csr_object()
cm = snakeoil_ca.CertManager(self.ca)
cert = cm.make_certificate(req)
cert_enc = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
first_serial = cert.get_serial_number()
self.verify_sig(cert_enc)
cm = snakeoil_ca.CertManager(self.ca)
cert = cm.make_certificate(req)
self.assertNotEqual(first_serial, cert.get_serial_number())
class SnakeoilCAPluginTestCase(BaseTestCase):
def setUp(self):
super(SnakeoilCAPluginTestCase, self).setUp()
self.ca_cert_path = os.path.join(self.tmp_dir, 'ca.pem')
self.ca_key_path = os.path.join(self.tmp_dir, 'ca.pem')
self.db_dir = self.tmp_dir
self.plugin = snakeoil_ca.SnakeoilCACertificatePlugin(
self.conf)
self.order_id = mock.MagicMock()
self.barbican_meta_dto = cm.BarbicanMetaDTO()
def test_issue_certificate_request(self):
req = certificate_utils.get_valid_csr_object()
req_enc = crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)
order_meta = {'request_data': req_enc}
resp = self.plugin.issue_certificate_request(self.order_id,
order_meta, {},
self.barbican_meta_dto)
crypto.load_certificate(
crypto.FILETYPE_PEM, resp.certificate.decode('base64'))
def test_issue_certificate_request_set_subject(self):
req = certificate_utils.get_valid_csr_object()
subj = req.get_subject()
subj.countryName = 'US'
subj.stateOrProvinceName = 'OR'
subj.localityName = 'Testlandia'
subj.organizationName = 'Testers Anon'
subj.organizationalUnitName = 'Testers OU'
subj.commonName = 'Testing'
req_enc = crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)
order_meta = {'request_data': req_enc}
resp = self.plugin.issue_certificate_request(self.order_id,
order_meta, {},
self.barbican_meta_dto)
cert = crypto.load_certificate(
crypto.FILETYPE_PEM, resp.certificate.decode('base64'))
cert_subj = cert.get_subject()
self.assertEqual(cert_subj.C, 'US')
self.assertEqual(cert_subj.ST, 'OR')
self.assertEqual(cert_subj.L, 'Testlandia')
self.assertEqual(cert_subj.O, 'Testers Anon')
self.assertEqual(cert_subj.OU, 'Testers OU')
self.assertEqual(cert_subj.CN, 'Testing')
def test_issue_certificate_request_stored_key(self):
req = certificate_utils.get_valid_csr_object()
req_enc = crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)
self.barbican_meta_dto.generated_csr = req_enc
resp = self.plugin.issue_certificate_request(
self.order_id, {}, {}, self.barbican_meta_dto)
crypto.load_certificate(
crypto.FILETYPE_PEM, resp.certificate.decode('base64'))
def test_no_request_data(self):
res = self.plugin.issue_certificate_request(
self.order_id, {}, {}, self.barbican_meta_dto)
self.assertIs(cm.CertificateStatus.CLIENT_DATA_ISSUE_SEEN,
res.status)
self.assertEqual("No request_data specified", res.status_message)
def test_get_default_ca_name(self):
self.assertEqual(self.plugin.get_default_ca_name(), "Snakeoil CA")
def test_get_default_signing_cert(self):
ca_cert = self.plugin.get_default_signing_cert()
self.assertEqual(self.plugin.ca._cert_val, ca_cert)
def test_get_default_intermediates_none(self):
intermediates = self.plugin.get_default_intermediates()
self.assertIsNone(intermediates)
def test_not_implemented(self):
self.assertRaises(NotImplementedError,
self.plugin.modify_certificate_request,
'', {}, {}, {})
self.assertRaises(NotImplementedError,
self.plugin.cancel_certificate_request,
'', {}, {}, {})
self.assertRaises(NotImplementedError,
self.plugin.check_certificate_status,
'', {}, {}, {})
def test_support_request_types(self):
manager = cm.CertificatePluginManager()
manager.extensions = [mock.MagicMock(obj=self.plugin)]
cert_spec = {
cm.REQUEST_TYPE: cm.CertificateRequestType.CUSTOM_REQUEST}
self.assertEqual(self.plugin, manager.get_plugin(cert_spec))
self.assertTrue(self.plugin.supports(cert_spec))
cert_spec = {
cm.REQUEST_TYPE: cm.CertificateRequestType.STORED_KEY_REQUEST}
self.assertEqual(self.plugin, manager.get_plugin(cert_spec))
self.assertTrue(self.plugin.supports(cert_spec))
cert_spec = {
cm.REQUEST_TYPE: cm.CertificateRequestType.FULL_CMC_REQUEST}
self.assertRaises(cm.CertificatePluginNotFound,
manager.get_plugin, cert_spec)
self.assertFalse(self.plugin.supports(cert_spec))
|
|
from generator.actions import Actions
import random
import re
import struct
def random_alpha():
alphabet = list(set([chr(x) for x in xrange(256)]) - set(['\0']))
length = random.randint(10, 50)
return ''.join([random.choice(alphabet) for x in xrange(length)])
def random_number():
return random.randint(10, 100000)
def get_f(v, f, n):
if n == 0 and f[0] is None:
s = ''
last = sorted([int(x) for x in f.keys()])[-1]
for x in xrange(last+1):
if x == 0:
continue
if x > 1:
s += v['OFS']
s += f.get(x, '')
f[0] = s
elif not 1 in f:
x = 1
for w in f[0].split(' '):
f[x] = w
x += 1
return f.get(n, '')
def truncate_i(v):
i = v.get('i') & 0xffffffff
i = struct.unpack('<i', struct.pack('<I', i))[0]
v['i'] = i
def print_i(v):
i = v.get('i', '')
return str(i) + '\n'
def to_bool(s):
if isinstance(s, int):
return 1 if s else 0
if s == '':
return 0
return 1
def to_int(s):
if isinstance(s, int):
return s
m = re.match('([+-]?[0-9]+)', s)
if not m:
return 0
i = int(m.group(1))
i = struct.unpack('<i', struct.pack('<I', i & 0xffffffff))[0]
return i
def test1(v, f):
return get_f(v, f, 0) + '\n'
def test2(v, f):
return get_f(v, f, 1) + get_f(v, f, 2) + '\n'
def test3(v, f):
return get_f(v, f, 1) + v['OFS'] + get_f(v, f, 2) + '\n'
def test4(v, f):
v['OFS'] = 'a'
get_f(v, f, 1)
f[3] = 'foo'
f[0] = None
return get_f(v, f, 0) + '\n'
def test4b(v, f):
v['OFS'] = ' '
return ''
def test5(v, f):
return ''.join (['%d\n' % x for x in xrange(8)])
def test6(v, f):
v['i'] = 0
return print_i(v)
def test7(v, f):
return print_i(v)
def test8(v, f):
v['i'] = to_int(v.get('i', 0)) + 10
truncate_i(v)
return print_i(v)
def test9(v, f):
v['i'] = to_int(v.get('i', 0)) * 90
truncate_i(v)
return print_i(v)
def test10(v, f):
v['i'] = to_int(v.get('i', 0)) - 2
truncate_i(v)
return print_i(v)
def test11(v, f):
v['i'] = int(float(to_int(v.get('i', 0))) / 8)
truncate_i(v)
return print_i(v)
def test12(v, f):
v['i'] = to_int(v.get('i', 0)) * 3
truncate_i(v)
return print_i(v)
def test13(v, f):
v['i'] = to_int(v.get('i', 0))
if v['i'] < 0:
v['i'] = -(v['i'] % 2)
else:
v['i'] = v['i'] % 2
truncate_i(v)
return print_i(v)
def test14(v, f):
v['i'] = 0 if to_bool(v.get('i', 0)) != 0 else 1
return print_i(v)
def test15(v, f):
v['i'] = -to_int(v.get('i', 0))
truncate_i(v)
return print_i(v)
def test16(v, f):
i = "a" + str(v.get('i', "")) + "b"
v['i'] = i
return print_i(v)
def test17(v, f):
i = "10" + str(v.get('i', ""))
v['i'] = i
return print_i(v)
def test18(v, f):
i = v.get('i', 0)
j = 4
if isinstance(i, str):
j = str(j)
if i > j:
i = 1
else:
i = -1
v['i'] = i
return print_i(v)
def test19(v, f):
i = to_int(v.get('i', 0))
if to_bool(v.get('i', 0)) != 0 and (i - 1) != 0:
i = 1
else:
i = 0
v['i'] = i
return print_i(v)
def test20(v, f):
i = to_int(v.get('i', 0))
if (i * 8) != 0 or (i - 2) != 0:
i = 1
else:
i = 0
v['i'] = i
return print_i(v)
def test21(v, f):
return '0\n'
class Fuzzy(Actions):
TESTS = [
(test1, 'print;'),
(test2, 'print $1 $2;'),
(test3, 'print $1, $2;'),
(test4, 'OFS = "a"; $3 = "foo"; print;'),
(test4b, 'OFS = " ";'),
(test5, 'for (loop = 0; loop < 8; loop += 1) print loop;'),
(test6, 'for (i = 0; i < 10; --i) break; print i;'),
(test7, 'print i;'),
(test8, 'i += 10; print i;'),
(test9, 'i = i * 90; print i;'),
(test10, 'i -= 2; print i;'),
(test11, 'i /= 8; print i;'),
(test12, 'i *= 3; print i;'),
(test13, 'i %= 2; print i;'),
(test14, 'i = !i; print i;'),
(test15, 'i = -i; print i;'),
(test16, 'i = "a" i "b"; print i;'),
(test17, 'i = "10" i; print i;'),
(test18, 'i = i > 4 ? 1 : -1; print i;'),
(test19, 'i = i && (i - 1); print i;'),
(test20, 'i = (i * 8) || (i - 2); print i;'),
(test21, 'printf "#d#s", foobar, ORS;'),
]
def execute(self, stmt):
out = ''
for x in stmt:
out += x[0](self.state['vars'], self.state['fields'])
return out
def generate_stmt(self):
ntests = random.randint(len(self.TESTS), len(self.TESTS) * 8)
stmt = [random.choice(self.TESTS) for x in xrange(ntests)]
s = '\n'.join([x[1] for x in stmt])
self.state['results'][s] = stmt
return s
def start(self):
self.state['data'] = '''Lorem ipsum dolor sit amet,
consectetur adipiscing elit, sed do eiusmod tempor
incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute
irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum.'''
self.state['results'] = {}
self.state['programs'] = []
self.state['vars'] = {
'OFS': ' '
}
def generate(self):
pass
def begin(self):
s = self.generate_stmt()
self.state['programs'].append(('BEGIN', s))
self.write('\nBEGIN { %s }' % s)
def normal(self):
word = random.choice(self.state['data'].split())
s = self.generate_stmt()
self.state['programs'].append((word, s))
self.write('\n/%s/ { %s }' % (word, s))
def end(self):
s = self.generate_stmt()
self.state['programs'].append(('END', s))
self.write('\nEND { %s }' % s)
def send_data(self):
self.write(self.state['data'] + '\n')
for line in self.state['data'].split('\n'):
line = line.strip('\n')
self.state['fields'] = {
0: line
}
for x in self.state['programs']:
if x[0] == 'BEGIN':
s = self.execute(self.state['results'][x[1]])
self.read(length=len(s), expect=s)
for x in self.state['programs']:
line = get_f(self.state['vars'], self.state['fields'], 0)
if x[0] in line and x[0] not in ('BEGIN', 'END'):
s = self.execute(self.state['results'][x[1]])
self.read(length=len(s), expect=s)
for x in self.state['programs']:
if x[0] == 'END':
s = self.execute(self.state['results'][x[1]])
self.read(length=len(s), expect=s)
|
|
import math
import bitify.python.utils.i2cutils as I2CUtils
class MPU6050(object):
'''
Simple MPU-6050 implementation
'''
PWR_MGMT_1 = 0x6b
FS_SEL = 0x1b
FS_250 = 0
FS_500 = 1
FS_1000 = 2
FS_2000 = 3
AFS_SEL = 0x1c
AFS_2g = 0
AFS_4g = 1
AFS_8g = 2
AFS_16g = 3
ACCEL_START_BLOCK = 0x3b
ACCEL_XOUT_H = 0
ACCEL_XOUT_L = 1
ACCEL_YOUT_H = 2
ACCEL_YOUT_L = 3
ACCEL_ZOUT_H = 4
ACCEL_ZOUT_L = 5
ACCEL_SCALE = { AFS_2g : [ 2, 16384.0], AFS_4g : [ 4, 8192.0], AFS_8g : [ 8, 4096.0], AFS_16g : [16, 2048.0] }
TEMP_START_BLOCK = 0x41
TEMP_OUT_H = 0
TEMP_OUT_L = 1
GYRO_START_BLOCK = 0x43
GYRO_XOUT_H = 0
GYRO_XOUT_L = 1
GYRO_YOUT_H = 2
GYRO_YOUT_L = 3
GYRO_ZOUT_H = 4
GYRO_ZOUT_L = 5
GYRO_SCALE = { FS_250 : [ 250, 131.0], FS_500 : [ 500, 65.5], FS_1000 : [1000, 32.8], FS_2000 : [2000, 16.4] }
K = 0.98
K1 = 1 - K
def __init__(self, bus, address, name, fs_scale=FS_250, afs_scale=AFS_2g):
'''
Constructor
'''
self.bus = bus
self.address = address
self.name = name
self.fs_scale = fs_scale
self.afs_scale = afs_scale
self.raw_gyro_data = [0, 0, 0, 0, 0, 0]
self.raw_accel_data = [0, 0, 0, 0, 0, 0]
self.raw_temp_data = [0, 0]
self.gyro_raw_x = 0
self.gyro_raw_y = 0
self.gyro_raw_z = 0
self.gyro_scaled_x = 0
self.gyro_scaled_y = 0
self.gyro_scaled_z = 0
self.raw_temp = 0
self.scaled_temp = 0
self.accel_raw_x = 0
self.accel_raw_y = 0
self.accel_raw_z = 0
self.accel_scaled_x = 0
self.accel_scaled_y = 0
self.accel_scaled_z = 0
self.pitch = 0.0
self.roll = 0.0
# We need to wake up the module as it start in sleep mode
I2CUtils.i2c_write_byte(self.bus, self.address, MPU6050.PWR_MGMT_1, 0)
# Set the gryo resolution
I2CUtils.i2c_write_byte(self.bus, self.address, MPU6050.FS_SEL, self.fs_scale << 3)
# Set the accelerometer resolution
I2CUtils.i2c_write_byte(self.bus, self.address, MPU6050.AFS_SEL, self.afs_scale << 3)
def read_raw_data(self):
'''
Read the raw data from the sensor, scale it appropriately and store for later use
'''
self.raw_gyro_data = I2CUtils.i2c_read_block(self.bus, self.address, MPU6050.GYRO_START_BLOCK, 6)
self.raw_accel_data = I2CUtils.i2c_read_block(self.bus, self.address, MPU6050.ACCEL_START_BLOCK, 6)
self.raw_temp_data = I2CUtils.i2c_read_block(self.bus, self.address, MPU6050.TEMP_START_BLOCK, 2)
self.gyro_raw_x = I2CUtils.twos_compliment(self.raw_gyro_data[MPU6050.GYRO_XOUT_H], self.raw_gyro_data[MPU6050.GYRO_XOUT_L])
self.gyro_raw_y = I2CUtils.twos_compliment(self.raw_gyro_data[MPU6050.GYRO_YOUT_H], self.raw_gyro_data[MPU6050.GYRO_YOUT_L])
self.gyro_raw_z = I2CUtils.twos_compliment(self.raw_gyro_data[MPU6050.GYRO_ZOUT_H], self.raw_gyro_data[MPU6050.GYRO_ZOUT_L])
self.accel_raw_x = I2CUtils.twos_compliment(self.raw_accel_data[MPU6050.ACCEL_XOUT_H], self.raw_accel_data[MPU6050.ACCEL_XOUT_L])
self.accel_raw_y = I2CUtils.twos_compliment(self.raw_accel_data[MPU6050.ACCEL_YOUT_H], self.raw_accel_data[MPU6050.ACCEL_YOUT_L])
self.accel_raw_z = I2CUtils.twos_compliment(self.raw_accel_data[MPU6050.ACCEL_ZOUT_H], self.raw_accel_data[MPU6050.ACCEL_ZOUT_L])
self.raw_temp = I2CUtils.twos_compliment(self.raw_temp_data[MPU6050.TEMP_OUT_H], self.raw_temp_data[MPU6050.TEMP_OUT_L])
# We convert these to radians for consistency and so we can easily combine later in the filter
self.gyro_scaled_x = math.radians(self.gyro_raw_x / MPU6050.GYRO_SCALE[self.fs_scale][1])
self.gyro_scaled_y = math.radians(self.gyro_raw_y / MPU6050.GYRO_SCALE[self.fs_scale][1])
self.gyro_scaled_z = math.radians(self.gyro_raw_z / MPU6050.GYRO_SCALE[self.fs_scale][1])
self.scaled_temp = self.raw_temp / 340 + 36.53
self.accel_scaled_x = self.accel_raw_x / MPU6050.ACCEL_SCALE[self.afs_scale][1]
self.accel_scaled_y = self.accel_raw_y / MPU6050.ACCEL_SCALE[self.afs_scale][1]
self.accel_scaled_z = self.accel_raw_z / MPU6050.ACCEL_SCALE[self.afs_scale][1]
self.pitch = self.read_x_rotation(self.read_scaled_accel_x(),self.read_scaled_accel_y(),self.read_scaled_accel_z())
self.roll = self.read_y_rotation(self.read_scaled_accel_x(),self.read_scaled_accel_y(),self.read_scaled_accel_z())
def distance(self, x, y):
'''Returns the distance between two point in 2d space'''
return math.sqrt((x * x) + (y * y))
def read_x_rotation(self, x, y, z):
'''Returns the rotation around the X axis in radians'''
return math.atan2(y, self.distance(x, z))
def read_y_rotation(self, x, y, z):
'''Returns the rotation around the Y axis in radians'''
return -math.atan2(x, self.distance(y, z))
def read_raw_accel_x(self):
'''Return the RAW X accelerometer value'''
return self.accel_raw_x
def read_raw_accel_y(self):
'''Return the RAW Y accelerometer value'''
return self.accel_raw_y
def read_raw_accel_z(self):
'''Return the RAW Z accelerometer value'''
return self.accel_raw_z
def read_scaled_accel_x(self):
'''Return the SCALED X accelerometer value'''
return self.accel_scaled_x
def read_scaled_accel_y(self):
'''Return the SCALED Y accelerometer value'''
return self.accel_scaled_y
def read_scaled_accel_z(self):
'''Return the SCALED Z accelerometer value'''
return self.accel_scaled_z
def read_raw_gyro_x(self):
'''Return the RAW X gyro value'''
return self.gyro_raw_x
def read_raw_gyro_y(self):
'''Return the RAW Y gyro value'''
return self.gyro_raw_y
def read_raw_gyro_z(self):
'''Return the RAW Z gyro value'''
return self.gyro_raw_z
def read_scaled_gyro_x(self):
'''Return the SCALED X gyro value in radians/second'''
return self.gyro_scaled_x
def read_scaled_gyro_y(self):
'''Return the SCALED Y gyro value in radians/second'''
return self.gyro_scaled_y
def read_scaled_gyro_z(self):
'''Return the SCALED Z gyro value in radians/second'''
return self.gyro_scaled_z
def read_temp(self):
'''Return the temperature'''
return self.scaled_temp
def read_pitch(self):
'''Return the current pitch value in radians'''
return self.pitch
def read_roll(self):
'''Return the current roll value in radians'''
self.roll
def read_all(self):
'''Return pitch and roll in radians and the scaled x, y & z values from the gyroscope and accelerometer'''
self.read_raw_data()
return (self.pitch, self.roll, self.gyro_scaled_x, self.gyro_scaled_y, self.gyro_scaled_z, self.accel_scaled_x, self.accel_scaled_y, self.accel_scaled_z)
|
|
"""
Player.py - File for defining a Player class.
"""
import logging
import numpy as np
from monopoly.Constants import CARD_WIDTH, MAX_LEVEL, MAX_HOUSE_LEVEL, MAX_DOUBLES, JAIL_COST, NUM_PROPERTIES, INIT_CASH
from monopoly.Constants import COLOR_COUNTS, COLOR_PROPERTIES, PROPERTY_PRICES, PROPERTY_NAMES, SPACE
from monopoly.Constants import GO, JAIL, GO_TO_JAIL, LUXURY_TAX, INCOME_TAX, FREE_PARKING, COMMUNITY_CHEST, CHANCE
class Player(object):
"""Defines a Player of the Game."""
def __init__(self, number, game, risk_tol=None):
"""Initialize a player."""
self.number = number
self.game = game
self.position = 0
self.money = INIT_CASH
self.debt = 0.0
self.properties = []
self.cards = []
self.turn = 0
self.round = 1
self.risk_tolerance = risk_tol if risk_tol else np.random.normal(0.75, 0.1)
self.just_visiting = True
self.jail_double_try = 0
logging.info('Player %s is created (M: $%s, D: $%s, NW: $%s).', self.number, self.money, self.debt, self.networth)
@property
def networth(self):
"""Calculate the instantaneous networth of the player."""
return self.money - self.debt + sum([p.value for p in self.properties])
@property
def num_houses(self):
"""Calculate the instantaneous number of houses a player has."""
return sum([p.houses for p in self.properties])
@property
def num_hotels(self):
"""Calculate the instantaneous number of hotels a player has."""
return sum([p.hotels for p in self.properties])
@property
def bankrupt(self):
"""Check is player has no money and no properties or cards."""
return self.debt > 0 # and all([prop.mortgaged for prop in self.properties])
def __str__(self):
"""String print out of Player."""
card_str = ['-' * CARD_WIDTH]
card_str.append('Player {} (Game {})'.format(self.number, self.game.number).center(CARD_WIDTH))
card_str.append('-' * CARD_WIDTH)
card_str.append(''.center(CARD_WIDTH))
L = len(' Position ')
card_str.append(' Position ' + ' {} '.format(self.position).rjust(CARD_WIDTH - L))
card_str.append(' Turn ' + ' {} '.format(self.turn).rjust(CARD_WIDTH - L))
card_str.append(' Round ' + ' {} '.format(self.round).rjust(CARD_WIDTH - L))
card_str.append(' Money ' + '${} '.format(self.money).rjust(CARD_WIDTH - L))
card_str.append(' Debt ' + '${} '.format(self.debt).rjust(CARD_WIDTH - L))
card_str.append(' Net Worth ' + '${} '.format(self.networth).rjust(CARD_WIDTH - L))
card_str.append(' Properties'.ljust(CARD_WIDTH))
for prop in self.properties:
modifier = 'h' * prop.houses + 'H' * prop.hotels + ('*' if prop.mortgaged else '')
modifier = '({})'.format(modifier) if modifier else ''
card_str.append('{} {}'.format(prop.name, modifier).center(CARD_WIDTH))
card_str.append(' Cards'.ljust(CARD_WIDTH))
for c in self.cards:
card_str.append(repr(c).center(CARD_WIDTH))
card_str.append(''.center(CARD_WIDTH))
card_str.append('-' * CARD_WIDTH)
return '\n'.join('|{}|'.format(l) for l in card_str)
def __repr__(self):
return 'Player {}'.format(self.number)
@staticmethod
def roll_dice():
"""Roll two dice with uniform randomness. Return dice sum and doubles boolean."""
dice1 = np.random.randint(1, 6)
dice2 = np.random.randint(1, 6)
return dice1 + dice2, dice1 == dice2, (dice1, dice2)
@staticmethod
def bid(number=0, what=None, min_bid=0, max_bid=1e7):
"""Make a bid on a property."""
price = PROPERTY_PRICES[number]
price_bid = np.random.normal(price, 0.5 * price)
return min(max(price_bid, min_bid), max_bid)
@staticmethod
def jail_pay_or_roll():
"""Pay or roll to get out of jail."""
r = np.random.rand()
return 0 if r > 0.5 else 1
def owns(self, number=0):
"""Check if player owns a property position."""
return number in [p.loc for p in self.properties]
def get_property(self, number=0):
"""Find the property in the player's deeds. Assumes ownership is already determined."""
prop = [prop for prop in self.properties if number == prop.loc]
return prop[0] if prop else None
def pop_property(self, number=0):
"""Find a property position in the player's deed list. Removes from list"""
indx = [i for i, prop in enumerate(self.properties) if number == prop.loc]
return self.properties.pop(indx[0]) if indx else None
def add(self, amount=0.0, addfrom=None):
"""Add an amount of money from someone."""
self.money += amount
if addfrom:
addfrom.pay(amount)
return self
def pay(self, amount=0.0, payto=None):
"""Pay an amount of money to someone."""
# Pay if player has enough money
if self.money > amount:
self.money -= amount
if payto:
payto.add(amount)
# Check for liquidate money
else:
logging.debug(
'Player %s does not have enough money to pay $%s (M: $%s, D: $%s, NW: $%s).',
self.number, amount, self.money, self.debt, self.networth
)
logging.debug(
'Player %s has %s properties, %s hotels, %s houses, %s cards and %s networth (M: $%s, D: $%s, NW: $%s).',
self.number, len(self.properties), self.num_hotels, self.num_houses, len(self.cards), self.networth,
self.money, self.debt, self.networth
)
# Look for enough money until you have it or run out of things to sell
self.liquidate(amount)
# Pay the amount owed or the money you have
if self.money > amount:
self.money -= amount
if payto:
payto.add(amount)
else:
if payto:
payto.add(self.money)
self.money = 0.0
self.debt = amount - self.money
# Check for bankruptcy
if self.bankrupt:
# Look up player
bankrupt_player_index = [indx for indx, plyr in enumerate(self.game.players) if plyr.number == self.number]
# Remove player from game
self.game.bankrupted.append(self.game.players.pop(bankrupt_player_index[0]))
# Reset properties
for prop in self.properties:
prop.hotels = 0
prop.houses = 0
prop.mortgaged = False
# Transfer player properties back to Game
self.game.properties = self.game.properties + self.properties
self.properties = []
logging.info('Player %s is bankrupt.', self.number)
return self
def buy_property(self, number=None, price=None):
"""Purchase a property. Pay money to bank and take property from Game list."""
number = number if number else self.position
prop = self.game.get_property(number)
if prop:
price = price if price else prop.price
self.pay(price, self.game.bank)
self.properties.append(prop)
logging.debug(
'Player %s purchases %s for $%s. (M: $%s, D: $%s, NW: $%s)',
self.number, prop.name, price, self.money, self.debt, self.networth
)
else:
logging.warning(
'This should not happen, if property (number=%s) is not in Game list, somebody should own it.',
number
)
return self
def follow(self, card=None):
"""Follow the rules of a card."""
card.rule(self, self.game, card)
logging.debug(
'Player %s followed %s card. (M: $%s, D: $%s, NW: $%s)',
self.number, card.name, self.money, self.debt, self.networth
)
return self
def ask_to_buy(self, player=None, number=0, price=0):
"""Ask to buy something from another player."""
answer = player.reply_to_buy(self, number, price)
if answer:
self.pay(price, player)
self.properties.append(player.pop_property(number))
return self
def reply_to_buy(self, player=None, number=0, price=0):
"""Reply to another player's buy offer."""
# If player has little money accept the offer
if price > self.money:
return True
else:
r = np.random.rand()
return r > 0.5
def ask_to_sell(self, player=None, number=0, price=0):
"""Ask to sell something to another player."""
answer = player.reply_to_sell(self, number, price)
if answer:
player.pay(price, self)
player.properties.append(self.pop_property(number))
return self
def reply_to_sell(self, player=None, number=0, price=0):
"""Reply to another player's sell offer."""
# If the player does not have enough money reply no
if self.money < price:
return False
else:
r = np.random.rand()
return r > 0.5
def guess_income_tax(self):
"""Randomly guess if 10% or $200 income tax is better to pay"""
r = np.random.rand()
return 0.1 * self.networth if r > 0.5 else 200.0
@staticmethod
def liquidate_or_auction():
"""Liquidate money or just put up for auction."""
r = np.random.rand()
return 0 if r > 0.5 else 1
def go_to_space(self, number=0, pass_go=True, just_visiting=True):
"""Move player to specific space."""
# Handle passing go
if pass_go and self.position > number:
self.round += 1
self.game.bank.pay(200.0, self)
logging.debug(
'Player %s passed go, bank paid $200. (M: $%s, D: $%s, NW: $%s)',
self.number, self.money, self.debt, self.networth
)
# Go to new position
self.position = number
# Set visiting flag
self.just_visiting = just_visiting
# self.game.record.append([
# self.game.number,
# self.number,
# self.turn,
# self.round,
# 0,
# False,
# self.position,
# SPACE[self.position][0]
# ])
jailvisit = ' (just visiting)' if number == JAIL and just_visiting else ''
logging.debug('Player %s goes to %s%s.', self.number, SPACE[self.position][0], jailvisit)
self.handle_space()
return self
def go_to_nearest(self, space_list=None):
"""Helper function to go to the nearest instance of a space."""
found = False
for p in space_list:
if self.position < p:
found = True
self.go_to_space(p)
if not found:
self.go_to_space(space_list[0])
return self
def liquidate(self, money=0.0):
"""Make decisions to liquidate some assets."""
procedes = 0.0
# Sell get out of jail free cards
# if len(self.cards) > 0 and self.cards[0].name == 'Get out of Jail Free':
# # See if other will buy cards
# for plyr in self.game.others(self):
# self.ask_to_sell(plyr, self.cards.pop)
# if procedes >= money:
# break
# Get monolopies, if any
monopolies = self.check_monopolies()
# Mortgage lowest valued / unmortaged, undeveloped, non monopolied properties first
while procedes < money:
candidate_prop = None
min_value = 1e20
for prop in self.properties:
if (not prop.mortgaged) & (prop.houses == 0) & (prop.hotels == 0) & (prop.color not in monopolies):
if prop.value < min_value:
min_value = prop.value
candidate_prop = prop
if candidate_prop:
candidate_prop.mortgaged = True
procedes += candidate_prop.mortgage
self.add(candidate_prop.mortgage, self.game.bank)
logging.debug(
'Player %s mortgaged %s for $%s. (M: $%s, D: $%s, NW: $%s)',
self.number, candidate_prop.name, candidate_prop.mortgage, self.money, self.debt, self.networth
)
else:
logging.debug(
'Player %s has nothing to mortgage! (M: $%s, D: $%s, NW: $%s)',
self.number, self.money, self.debt, self.networth
)
break
# Sell hotels
if procedes < money:
for prop in self.properties:
if prop.category == 'property':
if prop.hotels > 0:
prop.hotels -= 1
procedes += prop.cost
self.add(prop.cost, self.game.bank)
logging.debug(
'Player %s sold %s hotel for %s. (M: $%s, D: $%s, NW: $%s)',
self.number, prop.name, prop.cost, self.money, self.debt, self.networth
)
if procedes >= money:
break
# Sell houses
if procedes < money:
for prop in self.properties:
if prop.category == 'property':
if prop.houses > 0:
prop.houses -= 1
procedes += prop.cost
self.add(prop.cost, self.game.bank)
logging.debug(
'Player %s sold %s house for %s. (M: $%s, D: $%s, NW: $%s)',
self.number, prop.name, prop.cost, self.money, self.debt, self.networth
)
if procedes >= money:
break
if procedes < money:
logging.debug(
'Player %s with $%s networh was unable to liquidate $%s to pay $%s.',
self.number, self.networth, procedes, money
)
def jail_time(self):
"""Make decision for what to do in jail."""
# Set defaults
roll, doubles, dice = None, None, None
# Use get out of jail free card, if player has one
if len(self.cards) > 0 and self.cards[0].name == 'Get out of Jail Free':
logging.debug('Player %s using "Get out of Jail Free" Card.', self.number)
self.just_visiting = True
self.jail_double_try = 0
card = self.cards.pop(0)
# replace card in deck
if card.deck == 'Chance':
self.game.chance.append(card)
logging.debug('Player %s returned card to chance.', self.number)
elif card.deck == 'Chest':
self.game.community_chest.append(card)
logging.debug('Player %s returned card to community chest.', self.number)
else:
logging.warning('Card is not chance or community chest.')
# Either roll or pay
elif self.money > JAIL_COST:
# Pay or Roll decision unless, already tried doubles 3 times
pay_or_roll = self.jail_pay_or_roll() if self.jail_double_try < 3 else 1
# Try to roll doubles
if pay_or_roll == 0:
roll, doubles, dice = self.roll_dice()
if doubles:
self.just_visiting = True
self.jail_double_try = 0
logging.debug('Player %s rolled doubles to get out of jail.', self.number)
else:
self.jail_double_try += 1
logging.debug('Player %s did NOT roll doubles. Still in jail.', self.number)
# Just pay to get out
elif pay_or_roll == 1:
self.pay(JAIL_COST, self.game.freeparking)
self.just_visiting = True
self.jail_double_try = 0
logging.debug(
'Player %s paid %s to get out of jail. (M: $%s, D: $%s, NW: $%s)',
self.number, JAIL_COST, self.money, self.debt, self.networth
)
else:
logging.warning('Pay or Roll decision not possible.')
else:
logging.debug(
'Player %s does not have enough money, liquidate some assets to get Jail fee. (M: $%s, D: $%s, NW: $%s)',
self.number, self.money, self.debt, self.networth
)
self.liquidate(JAIL_COST)
if self.money > JAIL_COST:
self.pay(JAIL_COST, self.game.freeparking)
self.just_visiting = True
self.jail_double_try = 0
logging.debug(
'Player %s paid %s to get out of jail. (M: $%s, D: $%s, NW: $%s)',
self.number, JAIL_COST, self.money, self.debt, self.networth
)
else:
logging.debug(
'Player %s could not find enough money. Still in jail. (M: $%s, D: $%s, NW: $%s)',
self.number, self.money, self.debt, self.networth
)
return (roll, doubles, dice)
def take_turn(self):
"""Player takes a turn."""
# Set up Turn
self.turn += 1
doubles_count = 0
roll, doubles, dice = None, None, None
# Do Jail time or get out
if self.position == JAIL and not self.just_visiting:
roll, doubles, dice = self.jail_time()
# Do normal turn if not in jail, just visiting jail or just got out of jail
if self.position != JAIL or (self.position == JAIL and self.just_visiting):
# Roll Dice
if roll is None:
roll, doubles, dice = self.roll_dice()
# If rolled doubles and max doubles not rolled continue rolling
# TODO: Code turn actions in between double rolls
while doubles and doubles_count < MAX_DOUBLES:
doubles_count += 1
logging.debug(
'Player %s rolls %s = %s. Double count = %s. (M: $%s, D: $%s, NW: $%s)',
self.number, dice, roll, doubles_count, self.money, self.debt, self.networth
)
roll, doubles, dice = self.roll_dice()
logging.debug(
'Player %s rolls %s = %s. Double count = %s. (M: $%s, D: $%s, NW: $%s)',
self.number, dice, roll, doubles_count, self.money, self.debt, self.networth
)
# self.game.record.append([
# self.game.number,
# self.number,
# self.turn,
# self.round,
# roll,
# doubles,
# self.position,
# SPACE[self.position][0]
# ])
# Go to rolled property or jail
if doubles_count >= 3:
self.go_to_space(JAIL, pass_go=False, just_visiting=False)
logging.debug('Player %s rolled doubles 3 times, going to jail.', self.number)
else:
self.go_to_space((self.position + roll) % NUM_PROPERTIES)
# Check for bankruptcy at end of turn
if self.bankrupt:
logging.debug(
'Player %s is bankrupt - why did this happen here. (M: $%s, D: $%s, NW: $%s)',
self.number, self.money, self.debt, self.networth
)
return self
def handle_space(self):
"""Determine what to do on specific space."""
# Landing on Go, collect money
if self.position == GO:
self.game.bank.pay(200.0, self)
logging.debug(
'Player %s landed on Go and collects another $200! (M: $%s, D: $%s, NW: $%s)',
self.number, self.money, self.debt, self.networth
)
# Landing on Jail, just hang out until next turn
elif self.position == JAIL:
visiting = 'visiting' if self.just_visiting else 'in'
logging.debug('Player %s is %s jail. Hang until next turn.', self.number, visiting)
# Landing on Go to Jail, go to jail without passing Go
elif self.position == GO_TO_JAIL:
self.go_to_space(JAIL, pass_go=False, just_visiting=False)
logging.debug('Player %s goes to jail.', self.number)
# Landing on Luxury or Income Tax, pay indicated amount
elif self.position == LUXURY_TAX or self.position == INCOME_TAX:
tax = 75.0 if self.position == LUXURY_TAX else self.guess_income_tax()
self.pay(tax, self.game.freeparking)
logging.debug(
'Player %s pays $%s tax. (M: $%s, D: $%s, NW: $%s)',
self.number, tax, self.money, self.debt, self.networth
)
# Landing on Free Parking, Collect the money
elif self.position == FREE_PARKING:
kitty = self.game.freeparking.money
self.add(kitty, self.game.freeparking)
logging.debug(
'Player %s collects $%s from free parking. (M: $%s, D: $%s, NW: $%s)',
self.number, kitty, self.money, self.debt, self.networth
)
# Landing on Chance, follow card instructions
elif self.position in CHANCE:
card = self.game.draw_chance()
logging.debug(
'Player %s draws "%s" from chance. (M: $%s, D: $%s, NW: $%s)',
self.number, card.name, self.money, self.debt, self.networth
)
self.follow(card)
# Landing on Community Chest, follow card instructions
elif self.position in COMMUNITY_CHEST:
card = self.game.draw_community_chest()
logging.debug(
'Player %s draws "%s" from community chest. (M: $%s, D: $%s, NW: $%s)',
self.number, card.name, self.money, self.debt, self.networth
)
self.follow(card)
# Land on Property space, Buy Rent or hold
else:
self.handle_property()
return self
def handle_property(self):
"""Determine what to do with a property. Buy, rent or nothing."""
# If self owns this do nothing
if self.owns(self.position):
prop = self.get_property(self.position)
logging.debug('Player %s already owns %s. Do Nothing.', self.number, prop.name)
return self
else:
owned = False
# Check to see if other players own property to pay rent
for plyr in self.game.others(self):
if plyr.owns(self.position):
owned = True
prop = plyr.get_property(self.position)
# Property Mortgaged, do nothing
if prop.mortgaged:
logging.debug('%s is mortgaged. Do Nothing.', prop.name)
return self
# Pay Rent
else:
rent = prop.rent(plyr)
self.pay(rent, plyr)
logging.debug(
'Player %s pays $%s rent to Player %s for %s. (M: $%s, D: $%s, NW: $%s)',
self.number, rent, plyr.number, prop.name, self.money, self.debt, self.networth
)
# Try to Buy if nobody owns
if not owned:
if self.money > PROPERTY_PRICES[self.position]:
self.buy_property()
else:
logging.debug(
'Player %s cannot afford %s for $%s. (M: $%s, D: $%s, NW: $%s)',
self.number,
PROPERTY_NAMES[self.position],
PROPERTY_PRICES[self.position],
self.money, self.debt, self.networth
)
find_money_or_auction = self.liquidate_or_auction()
# Look for money to buy property
if find_money_or_auction is 0:
self.liquidate(PROPERTY_PRICES[self.position])
logging.debug(
'Player %s decided to try liquidate some money. (M: $%s, D: $%s, NW: $%s)',
self.number, self.money, self.debt, self.networth
)
if self.money > PROPERTY_PRICES[self.position]:
self.buy_property()
# Auction the property for bid
elif find_money_or_auction is 1:
logging.debug(
'Player %s decided to auction off %s. (M: $%s, D: $%s, NW: $%s)',
self.number, PROPERTY_NAMES[self.position], self.money, self.debt, self.networth
)
self.game.new_property_auction(self.position)
return self
def check_monopolies(self):
"""Look for ownership of all groups in player's properties"""
monopolies = set()
for prop in self.properties:
if prop.category == 'property':
same_color = sum([prop.color == otherprop.color and not otherprop.mortgaged for otherprop in self.properties])
if same_color == COLOR_COUNTS[prop.color]:
monopolies.add(prop.color)
return monopolies
def develop(self):
"""Look for opportunities to develop properties."""
# Un mortgage any mortgaged properties
# TODO
# Get monolopies, if any
monopolies = self.check_monopolies()
# If monopoly exists, develop
for monopoly in monopolies:
# Make list of monopoly
monopoly_props = [self.get_property(p) for p in COLOR_PROPERTIES[monopoly]]
monopoly_levels = [prop.level for prop in monopoly_props]
dev_level = min(monopoly_levels)
if dev_level < MAX_LEVEL:
logging.debug(
'Player %s has a monopoly on %s %s with development at %s. (M: $%s, D: $%s, NW: $%s)',
self.number, monopoly, monopoly_props, dev_level,
self.money, self.debt, self.networth
)
# Loop thru properties to develop only the ones at the currect minimum development level
# TODO: allow multiple development cycles.
for prop in monopoly_props:
if prop.level == dev_level and prop.cost < self.risk_tolerance * self.money:
self.pay(prop.cost, self.game.bank)
if prop.houses < MAX_HOUSE_LEVEL:
prop.houses += 1
logging.debug(
'Player %s building house for %s on %s. (M: $%s, D: $%s, NW: $%s)',
self.number, prop.cost, prop.name, self.money, self.debt, self.networth
)
elif prop.hotels < 1:
prop.hotels += 1
logging.debug(
'Player %s building hotel for %s on %s. (M: $%s, D: $%s, NW: $%s)',
self.number, prop.cost, prop.name, self.money, self.debt, self.networth
)
return self
|
|
"""NRF24L01 driver for MicroPython
"""
from micropython import const
import utime
# nRF24L01+ registers
CONFIG = const(0x00)
EN_RXADDR = const(0x02)
SETUP_AW = const(0x03)
SETUP_RETR = const(0x04)
RF_CH = const(0x05)
RF_SETUP = const(0x06)
STATUS = const(0x07)
RX_ADDR_P0 = const(0x0A)
TX_ADDR = const(0x10)
RX_PW_P0 = const(0x11)
FIFO_STATUS = const(0x17)
DYNPD = const(0x1C)
# CONFIG register
EN_CRC = const(0x08) # enable CRC
CRCO = const(0x04) # CRC encoding scheme; 0=1 byte, 1=2 bytes
PWR_UP = const(0x02) # 1=power up, 0=power down
PRIM_RX = const(0x01) # RX/TX control; 0=PTX, 1=PRX
# RF_SETUP register
POWER_0 = const(0x00) # -18 dBm
POWER_1 = const(0x02) # -12 dBm
POWER_2 = const(0x04) # -6 dBm
POWER_3 = const(0x06) # 0 dBm
SPEED_1M = const(0x00)
SPEED_2M = const(0x08)
SPEED_250K = const(0x20)
# STATUS register
RX_DR = const(0x40) # RX data ready; write 1 to clear
TX_DS = const(0x20) # TX data sent; write 1 to clear
MAX_RT = const(0x10) # max retransmits reached; write 1 to clear
# FIFO_STATUS register
RX_EMPTY = const(0x01) # 1 if RX FIFO is empty
# constants for instructions
R_RX_PL_WID = const(0x60) # read RX payload width
R_RX_PAYLOAD = const(0x61) # read RX payload
W_TX_PAYLOAD = const(0xA0) # write TX payload
FLUSH_TX = const(0xE1) # flush TX FIFO
FLUSH_RX = const(0xE2) # flush RX FIFO
NOP = const(0xFF) # use to read STATUS register
class NRF24L01:
def __init__(self, spi, cs, ce, channel=46, payload_size=16):
assert payload_size <= 32
self.buf = bytearray(1)
# store the pins
self.spi = spi
self.cs = cs
self.ce = ce
# init the SPI bus and pins
self.init_spi(4000000)
# reset everything
ce.init(ce.OUT, value=0)
cs.init(cs.OUT, value=1)
self.payload_size = payload_size
self.pipe0_read_addr = None
utime.sleep_ms(5)
# set address width to 5 bytes and check for device present
self.reg_write(SETUP_AW, 0b11)
if self.reg_read(SETUP_AW) != 0b11:
raise OSError("nRF24L01+ Hardware not responding")
# disable dynamic payloads
self.reg_write(DYNPD, 0)
# auto retransmit delay: 1750us
# auto retransmit count: 8
self.reg_write(SETUP_RETR, (6 << 4) | 8)
# set rf power and speed
self.set_power_speed(POWER_3, SPEED_250K) # Best for point to point links
# init CRC
self.set_crc(2)
# clear status flags
self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT)
# set channel
self.set_channel(channel)
# flush buffers
self.flush_rx()
self.flush_tx()
def init_spi(self, baudrate):
try:
master = self.spi.MASTER
except AttributeError:
self.spi.init(baudrate=baudrate, polarity=0, phase=0)
else:
self.spi.init(master, baudrate=baudrate, polarity=0, phase=0)
def reg_read(self, reg):
self.cs(0)
self.spi.readinto(self.buf, reg)
self.spi.readinto(self.buf)
self.cs(1)
return self.buf[0]
def reg_write_bytes(self, reg, buf):
self.cs(0)
self.spi.readinto(self.buf, 0x20 | reg)
self.spi.write(buf)
self.cs(1)
return self.buf[0]
def reg_write(self, reg, value):
self.cs(0)
self.spi.readinto(self.buf, 0x20 | reg)
ret = self.buf[0]
self.spi.readinto(self.buf, value)
self.cs(1)
return ret
def flush_rx(self):
self.cs(0)
self.spi.readinto(self.buf, FLUSH_RX)
self.cs(1)
def flush_tx(self):
self.cs(0)
self.spi.readinto(self.buf, FLUSH_TX)
self.cs(1)
# power is one of POWER_x defines; speed is one of SPEED_x defines
def set_power_speed(self, power, speed):
setup = self.reg_read(RF_SETUP) & 0b11010001
self.reg_write(RF_SETUP, setup | power | speed)
# length in bytes: 0, 1 or 2
def set_crc(self, length):
config = self.reg_read(CONFIG) & ~(CRCO | EN_CRC)
if length == 0:
pass
elif length == 1:
config |= EN_CRC
else:
config |= EN_CRC | CRCO
self.reg_write(CONFIG, config)
def set_channel(self, channel):
self.reg_write(RF_CH, min(channel, 125))
# address should be a bytes object 5 bytes long
def open_tx_pipe(self, address):
assert len(address) == 5
self.reg_write_bytes(RX_ADDR_P0, address)
self.reg_write_bytes(TX_ADDR, address)
self.reg_write(RX_PW_P0, self.payload_size)
# address should be a bytes object 5 bytes long
# pipe 0 and 1 have 5 byte address
# pipes 2-5 use same 4 most-significant bytes as pipe 1, plus 1 extra byte
def open_rx_pipe(self, pipe_id, address):
assert len(address) == 5
assert 0 <= pipe_id <= 5
if pipe_id == 0:
self.pipe0_read_addr = address
if pipe_id < 2:
self.reg_write_bytes(RX_ADDR_P0 + pipe_id, address)
else:
self.reg_write(RX_ADDR_P0 + pipe_id, address[0])
self.reg_write(RX_PW_P0 + pipe_id, self.payload_size)
self.reg_write(EN_RXADDR, self.reg_read(EN_RXADDR) | (1 << pipe_id))
def start_listening(self):
self.reg_write(CONFIG, self.reg_read(CONFIG) | PWR_UP | PRIM_RX)
self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT)
if self.pipe0_read_addr is not None:
self.reg_write_bytes(RX_ADDR_P0, self.pipe0_read_addr)
self.flush_rx()
self.flush_tx()
self.ce(1)
utime.sleep_us(130)
def stop_listening(self):
self.ce(0)
self.flush_tx()
self.flush_rx()
# returns True if any data available to recv
def any(self):
return not bool(self.reg_read(FIFO_STATUS) & RX_EMPTY)
def recv(self):
# get the data
self.cs(0)
self.spi.readinto(self.buf, R_RX_PAYLOAD)
buf = self.spi.read(self.payload_size)
self.cs(1)
# clear RX ready flag
self.reg_write(STATUS, RX_DR)
return buf
# blocking wait for tx complete
def send(self, buf, timeout=500):
self.send_start(buf)
start = utime.ticks_ms()
result = None
while result is None and utime.ticks_diff(utime.ticks_ms(), start) < timeout:
result = self.send_done() # 1 == success, 2 == fail
if result == 2:
raise OSError("send failed")
# non-blocking tx
def send_start(self, buf):
# power up
self.reg_write(CONFIG, (self.reg_read(CONFIG) | PWR_UP) & ~PRIM_RX)
utime.sleep_us(150)
# send the data
self.cs(0)
self.spi.readinto(self.buf, W_TX_PAYLOAD)
self.spi.write(buf)
if len(buf) < self.payload_size:
self.spi.write(b"\x00" * (self.payload_size - len(buf))) # pad out data
self.cs(1)
# enable the chip so it can send the data
self.ce(1)
utime.sleep_us(15) # needs to be >10us
self.ce(0)
# returns None if send still in progress, 1 for success, 2 for fail
def send_done(self):
if not (self.reg_read(STATUS) & (TX_DS | MAX_RT)):
return None # tx not finished
# either finished or failed: get and clear status flags, power down
status = self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT)
self.reg_write(CONFIG, self.reg_read(CONFIG) & ~PWR_UP)
return 1 if status & TX_DS else 2
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import bson.json_util
import re
import six
from pymongo import MongoClient
from girder import logger as log
from . import base
from .base import DatabaseConnectorException
MongoOperators = {
'eq': '$eq',
'ne': '$ne',
'gt': '$gt',
'gte': '$gte',
'lt': '$lt',
'lte': '$lte',
'in': '$in',
'not_in': '$nin',
'regex': '$regex',
# not_regex, search, and not_search are handled as special cases
# search is treated as a case-insensitive, multiline regex
# is and not_is are the same as $eq and $ne unless the value is None
}
class MongoConnector(base.DatabaseConnector):
name = 'mongo'
databaseNameRequired = False
def __init__(self, *args, **kwargs):
super(MongoConnector, self).__init__(*args, **kwargs)
self.collection = kwargs.get('collection', kwargs.get('table'))
uri = kwargs.get('uri')
dialect, _ = base.getDBConnectorClassFromDialect(uri)
self.databaseUri = '%s://%s' % (dialect, uri.split('://', 1)[1])
self.databaseName = kwargs.get(
'database', base.databaseFromUri(self.databaseUri))
self.fieldInfo = None
self.initialized = True
def _addFilter(self, clauses, filter):
"""
Add a filter to a list of clauses.
:param clauses: a list which is modified.
:param filter: the filter to add. This needs to be a dictionary with
field, operator, and value or with group and value.
:return: the list of clauses.
"""
if 'group' in filter:
subclauses = []
for subfilter in filter['value']:
subclauses = self._addFilter(subclauses, subfilter)
clauses.append({'$' + filter['group']: subclauses})
return clauses
operator = filter['operator']
operator = base.FilterOperators.get(operator)
field = filter['field']
if not isinstance(field, six.string_types):
raise DatabaseConnectorException(
'Filters must use a known field as the left value')
value = filter['value']
if operator in MongoOperators:
operator = MongoOperators[operator]
elif operator == 'not_regex':
operator = '$not'
value = re.compile(filter['value'])
elif operator in ('search', 'not_search'):
operator = '$regex' if operator == 'search' else '$not'
value = re.compile(filter['value'],
re.IGNORECASE | re.MULTILINE | re.DOTALL)
elif operator in ('is', 'not_is'):
if value is None:
operator = '$in' if operator == 'is' else '$nin'
value = [None]
else:
operator = '$eq' if operator == 'is' else '$ne'
else:
raise DatabaseConnectorException('operator %s unimplemented' % (
operator))
clauses.append({field: {operator: value}})
return clauses
def connect(self):
"""
Connect to the database and get a reference to the Mongo collection.
:returns: the mongo collection.
"""
self.conn = MongoClient(self.databaseUri)
self.database = self.conn[self.databaseName]
return self.database[self.collection]
def disconnect(self):
"""
Disconnect from the database.
"""
self.conn.close()
self.conn = None
def performSelect(self, fields, queryProps={}, filters=[], client=None):
"""
Select data from the database. The results are passed back as a
dictionary with the following values:
limit: the limit used in the query
offset: the offset used in the query
sort: the list of sort parameters used in the query.
fields: a list of the fields that are being returned in the order
that they are returned.
data: a list with one entry per row of results. Each entry is a list
with one entry per column.
:param fields: the results from getFieldInfo.
:param queryProps: general query properties, including limit, offset,
and sort.
:param filters: a list of filters to apply.
:param client: if a client is specified, a previous query made by this
client can be cancelled.
:return: the results of the query. See above.
"""
result = super(MongoConnector, self).performSelect(
fields, queryProps, filters)
if queryProps.get('group'):
raise DatabaseConnectorException(
'Group unsupported by this database.')
filterQueryClauses = []
for filt in filters:
filterQueryClauses = self._addFilter(filterQueryClauses, filt)
opts = {}
for k, v in six.iteritems(queryProps):
target = None
if k == 'fields' and v and v != []:
target = 'projection'
v = {field: True for field in v}
if '_id' not in v:
v['_id'] = False
elif k == 'offset':
target = 'skip'
elif k in ['limit', 'no_cursor_timeout', 'cursor_type', 'sort',
'allow_partial_results', 'oplog_replay', 'modifiers']:
target = k
if target is not None:
opts[target] = v
if len(filterQueryClauses) > 0:
opts['filter'] = {'$and': filterQueryClauses}
result['format'] = 'dict'
if queryProps.get('limit') == 0:
result['data'] = []
else:
if queryProps.get('limit') < 0:
opts['limit'] = 0
coll = self.connect()
log.info('Query: %s', bson.json_util.dumps(
opts, check_circular=False, separators=(',', ':'),
sort_keys=False, default=str, indent=None))
cursor = coll.find(**opts)
result['datacount'] = cursor.count(True)
result['data'] = cursor
self.disconnect()
return result
def getFieldInfo(self):
"""
Return a list of fields that are known and can be queried.
:return: a list of known fields. Each entry is a dictionary with name,
datatype, and optionally a description.
"""
if self.fieldInfo is None:
# cache the fieldInfo so we don't process all of the documents
# every time.
# TODO: either have a maximum duration or some other method of
# analyzing a subset of the table; on a large table this takes a
# long time.
coll = self.connect()
fields = {}
for result in coll.find():
fields.update(result)
fieldInfo = []
for field in sorted(six.iterkeys(fields)):
fieldInfo.append({'name': field,
'type': 'unknown'})
self.fieldInfo = fieldInfo
return self.fieldInfo
@staticmethod
def getTableList(uri, internalTables=False, **kwargs):
"""
Get a list of known databases, each of which has a list of known
collections from the database. This is of the form [{'database':
(database 1), 'tables': [{'table': (collection 1)}, {'table':
(collection 2)}, ...]}, {'database': (database 2), 'tables': [...]},
...]
:param uri: uri to connect to the database.
:param internaltables: True to return tables about the database itself.
Ignored for Mongo.
:returns: A list of known collections.
"""
conn = MongoClient(uri)
databaseName = base.databaseFromUri(uri)
if databaseName is None:
databaseNames = conn.database_names()
else:
databaseNames = [databaseName]
results = []
for name in databaseNames:
database = conn[name]
results.append({
'database': name,
'tables': [{'table': collection, 'name': collection}
for collection in database.collection_names(False)]
})
return results
@staticmethod
def validate(uri=None, database=None, collection=None, **kwargs):
"""
Validate that the passed arguments are sufficient for connecting to the
database.
:returns: True if the arguments should allow connecting to the db.
"""
return uri and collection
@staticmethod
def jsonDumps(*args, **kwargs):
"""
Use the bson utility to dump JSON. This handles special BSON
datatypes. See json.dumps for the function paramters.
"""
return bson.json_util.dumps(*args, **kwargs)
base.registerConnectorClass(MongoConnector.name, MongoConnector, {
'dialects': {
'mongodb': 'mongodb',
'mongo': 'mongodb',
},
'default_dialect': 'mongodb',
'priority': 0,
})
|
|
#!/usr/bin/env python
"""Classes to produce videos from igraph plots.
"""
from __future__ import with_statement
from contextlib import contextmanager
from igraph.drawing import BoundingBox, Plot
from shutil import rmtree
from tempfile import mkdtemp, mkstemp
import os
import subprocess
__all__ = ["MEncoderVideoEncoder", "VideoEncoder"]
__author__ = "Tamas Nepusz"
__license__ = "GPL"
__docformat__ = "restructuredtext"
@contextmanager
def named_temporary_file(*args, **kwds):
"""Context manager that creates a named temporary file and
returns its name.
All parameters are passed on to `tempfile.mkstemp`, see
its documentation for more info.
"""
handle, tmpfile = mkstemp(*args, **kwds)
os.close(handle)
try:
yield tmpfile
finally:
os.unlink(tmpfile)
class TemporaryDir(object):
"""Represents a temporary directory on the filesystem.
This class acts as a drop-in replacement for `tempfile.mkdtemp`, but it
ensures that the temporary directory is removed when the object is deleted.
Note that since exceptions cannot be thrown from destructors, errors during
the deletion of the directory are silently ignored unless you delete the
directory explicitly with `remove()`.
"""
def __init__(self, *args, **kwds):
"""Creates a temporary directory using `tempfile.mkdtemp`.
All the arguments are passed on intact to `tempfile.mkdtemp`.
"""
self.path = mkdtemp(*args, **kwds)
def __del__(self):
"""Removes the temporary directory and all its contents."""
self.remove(ignore_errors=True)
def mkstemp(self, *args, **kwds):
"""Makes a temporary file in this directory using `tempfile.mkstemp`.
All the arguments are passed on intact to `tempfile.mkstemp`. Returns
the name of the file that was created. The ``dir`` keyword argument is
ignored and overwritten with the name of the folder."""
kwds["dir"] = self.path
return mkstemp(*args, **kwds)
def remove(self, ignore_errors=False):
"""Removes the temporary directory and all its contents, with proper
error handling.
An exception will be raised when the removal is not possible.
"""
if os.path.isdir(self.path):
rmtree(self.path, ignore_errors=ignore_errors)
class VideoEncoder(object):
"""Abstract base class that must be extended by all video encoder classes.
This class defines the basic interface of all video encoder classes.
Derived classes must implement `add`_ and `save`_ at least.
The class also defines some standard attributes that can be used by derived
video encoder classes. These attributes are:
- ``bbox`` stores the width and height of the video. It is an instance
of `BoundingBox`_, but it can also be set as a tuple (width and height)
or it can be left as ``None`` to let igraph infer the bounding box from
the `Plot`_ instances being added to the video.
- ``fps`` is the frame rate of the video.
`VideoEncoder`_ instances also work as context managers, so you can do
stuff like this:
>>> with MyEncoder((600, 600)).encode_to("video.avi") as encoder: #doctest: +SKIP
... # add your frames using encoder.add()
Upon exiting the context, the video encoder will save everything to the
given video file automatically (unless there was an exception, in which
case the exception is simply re-raised and the temporary files are cleaned
up).
"""
def __init__(self, bbox=None, fps=25):
"""Constructs an abstract video encoder.
This method should never be called directly as the whole class is
abstract.
The bounding box may be left as ``None`` if you are adding `Plot`_
instances to the video stream, the right bounding box will be inferred
from the first `Plot`_ instance you add.
"""
self._bbox = None
self._fps = None
self._images = []
self._tmpdir = None
self.bbox = bbox
self.fps = fps
def __del__(self):
"""Cleans up the temporary files and destroys the encoder."""
self.cleanup()
def add(self, frame, *args, **kwds):
"""Adds a frame to the video sequence being built. `frame` must either
be one of the following:
- the name of an image file. This is straightforward, the image file
is added to the video stream as a frame.
- an `igraph.Plot`_ instance. The plot will be rendered to a
temporary image file and saved into a temporary directory
before adding the image to the video stream.
- an object supported by `igraph.Plot`_ -- this means that the
object must have a ``__plot__`` method. In this case, we will
construct a new `Plot`_ on-the-fly, render it to a temporary
image file and add it to the video stream. The temporary file
will be cleaned up when the vide
Temporary files will be cleaned up when the video encoder object
is destroyed.
When possible, this method will try to infer the width and height of
the video if this was not given in advance by setting the `bbox`_
property. Such inference is possible only when you are adding
`igraph.Plot`_ instances; for other cases, you must specify the
width and height of the video stream before calling `save()`.
"""
if hasattr(frame, "__plot__"):
if self.bbox is None:
raise ValueError("self.bbox must be set before calling add() "
"on a plottable object")
plot = Plot(bbox=self.bbox)
plot.add(frame, *args, **kwds)
frame = plot
if isinstance(frame, Plot):
self._ensure_tmpdir_exists()
handle, tmpfile = self._tmpdir.mkstemp(suffix='.png')
os.close(handle)
frame.save(tmpfile)
if self.bbox is None:
self.bbox = frame.bounding_box
frame = tmpfile
self._add_image(frame)
def _add_image(self, fname):
"""Adds an image frame to the video sequence being built. `frame`
must be the name of an image file."""
raise NotImplementedError
def _ensure_tmpdir_exists(self):
"""Ensures that there exists a temporary directory in which
plots can be saved."""
if self._tmpdir is not None:
return
self._tmpdir = TemporaryDir()
@property
def bbox(self):
"""Returns the bounding box of the video.
It can either be ``None`` if the bounding box was not set yet, or an
instance of `BoundingBox`_.
"""
return self._bbox
@bbox.setter
def bbox(self, box):
"""Sets the bounding box of the video.
The bounding box may either be a tuple (width and height) or an
instance of `BoundingBox`_. If the top left corner of the given
`BoundingBox`_ instance is not at (0, 0), it will be assumed to be
there and the width and height will be adjusted accordingly.
"""
if isinstance(box, BoundingBox):
self._bbox = BoundingBox(0, 0, box.right, box.bottom)
elif box is None:
self._bbox = None
else:
self._bbox = BoundingBox(0, 0, *box)
def cleanup(self):
"""Cleans up the temporary files created during the encoding
process and empties the image list."""
if self._tmpdir:
self._tmpdir.remove()
self._tmpdir = None
self._images = []
@contextmanager
def encode_to(self, *args, **kwds):
"""Defines a context which saves the video to the given file when
exiting the context.
All the arguments are passed on to `save()` when exiting the context
unless there was an exception, in which case the video is not saved.
The temporary files are always cleaned up when exiting the context.
Usage::
>>> with MyEncoder((600, 600)).encode_to("test.avi") as encoder: #doctest: +SKIP
... # add your frames here by calling encoder.add()
"""
try:
yield self
self.save(*args, **kwds)
finally:
self.cleanup()
@property
def fps(self):
"""Returns the frame rate of the video"""
return self._fps
@fps.setter
def fps(self, fps):
"""Sets the frame rate of the video.
Examples:
>>> encoder = MEncoderVideoEncoder(fps=22)
>>> encoder.fps
22.0
>>> encoder.fps = 24
>>> encoder.fps
24.0
>>> encoder.fps = -6
>>> encoder.fps == 0.01
True
"""
self._fps = max(float(fps), 0.01)
def save(self, filename):
"""Saves the video to the given file."""
raise NotImplementedError
class MEncoderVideoEncoder(VideoEncoder):
"""Video encoder that uses an external ``mencoder``
executable to create videos.
This class supports the attributes laid out in the base `VideoEncoder`_
class as well as the following extra attributes:
- ``lavcopts`` is a dict of options to be passed to the ``libavcodec``
library used by ``mencoder``. It must be a dict of key-value pairs or
a string. By default, the video will be encoded using
``vcodec=msmpeg4v2``, which seems to work out-of-the-box on all three
major platforms (Windows, Linux and Mac OS X).
- ``mencoder_path`` is the full path to the ``mencoder`` executable. If
the executable is on your path, it is safe to leave it at its default
value (``mencoder``).
- ``verbose`` defines the verbosity level of the encoder. If ``False``,
only errors will be printed while encoding. If ``True``, ``mencoder``
will be invoked with its default verbosity level. If a number, it is
interpreted as a verbosity level directly and passed on to ``mencoder``
"""
def __init__(self, bbox=None, fps=25, lavcopts=None,
mencoder_path="mencoder", verbose=False):
"""Constructs a new video with the given width and height (`bbox`,
which must be a tuple), frame rate (`fps`) and the given encoding
options (`lavcopts`). If `lavcopts` is ``None``, it will default to
``vcodec=msmpeg4v2``. `mencoder_path` may be used to override the path
where the ``mencoder`` executable is to be found. `verbose` is the
value of the verbosity parameter; ``False`` means that only errors will
be printed while encoding, ``True`` means the default verbosity level
of ``mencoder``.
The bounding box may be left as ``None`` if you are adding `Plot`_
instances to the video stream, the right bounding box will be inferred
from the first `Plot`_ instance you add.
Examples:
>>> encoder= MEncoderVideoEncoder()
>>> encoder.bbox
None
>>> encoder.fps
25.0
>>> encoder.lavcopts
{'vcodec': 'msmpeg4v2'}
>>> encoder.mencoder_path
'mencoder'
>>> encoder.verbose
False
>>> (encoder.width, encoder.height)
(None, None)
"""
super(MEncoderVideoEncoder, self).__init__(bbox, fps)
self._lavcopts = {}
self.lavcopts = lavcopts
self.mencoder_path = mencoder_path
self.verbose = verbose
def _add_image(self, fname):
"""Adds an image stored in the given image file to the video stream.
This is an internal method, called by `add()`, which is the primary
entry point for end users. You should not have to call this method
directly."""
self._images.append(fname)
@property
def lavcopts(self):
"""Returns the option dict passed to the ``libavcodec`` library
when encoding the video"""
return self._lavcopts
@lavcopts.setter
def lavcopts(self, value):
"""Sets the option dict passed to the ``libavcodec`` library
when encoding the video.
Examples:
>>> encoder = MEncoderVideoEndoerr()
>>> encoder.lavcopts = None
>>> encoder.lavcopts
{'vcodec': 'msmpeg4v2'}
>>> encoder.lavcopts = "vcodec=mpeg4:mbd=2:trell"
>>> encoder.lavcopts["vcodec"]
mpeg4
>>> encoder.lavcopts["mbd"]
2
>>> encoder.lavcopts["trell"]
True
"""
if value is None:
opts = {}
elif isinstance(value, basestring):
opts = {}
for part in value.split(":"):
if "=" in part:
key, value = part.split("=", 1)
opts[key] = value
else:
opts[part] = True
else:
opts = value
self._lavcopts = opts
if "vcodec" not in self._lavcopts:
self._lavcopts["vcodec"] = "msmpeg4v2"
def save(self, filename, keep_images=False):
"""Saves the video to the given file.
If `keep_images` is ``True``, the temporary folder holding the
images will not be deleted after the video is encoded.
"""
if self._bbox is None:
raise ValueError("bounding box of video stream was not inferred. "
"Please set self.bbox accordingly.")
width, height = int(self._bbox.width), int(self._bbox.height)
lavcopts = ":".join(
key if value is True else ("%s=%s" % (key, value))
for key, value in self._lavcopts.iteritems()
)
with named_temporary_file() as tmpfile:
with open(tmpfile, "w") as fp:
for image in self._images:
fp.write(image + "\n")
args = [self.mencoder_path, "mf://@%s" % tmpfile,
"-mf", "w=%d:h=%d:fps=%g" % (width, height, self.fps),
"-ovc", "lavc", "-oac", "copy",
"-lavcopts", lavcopts,
"-o", filename]
env = dict(os.environ)
if self.verbose is False or self.verbose is None:
env["MPLAYER_VERBOSE"] = "-4"
elif self.verbose is True:
env["MPLAYER_VERBOSE"] = "0"
elif isinstance(self.verbose, int):
env["MPLAYER_VERBOSE"] = str(self.verbose-4)
subprocess.check_call(args, env=env)
if not keep_images:
self.cleanup()
def demo_layout():
from igraph import Graph
# Generate graph and find the communities
graph = Graph.GRG(100, 0.2)
clusters = graph.community_spinglass()
# Specify initial layout
layout = None
# Set up the video encoder
encoder = MEncoderVideoEncoder(bbox=(600, 600), fps=30)
encoder.lavcopts = "vcodec=mpeg4:mbd=2:vbitrate=1600:trell:keyint=30"
# Generate frames in the animation one by one
with encoder.encode_to("demo_layout.avi"):
for i in xrange(500):
# Run one step of the layout algorithm
layout = graph.layout("graphopt", niter=1, seed=layout)
# Add the clustering to the encoder
encoder.add(clusters, layout=layout, mark_groups=True, margin=20)
def demo_epidemic():
from igraph import Graph, Layout
from random import sample, random
# Specify the simulation parameters
initial_outbreak_size = 3
spread_probability = 0.05
recovery_probability = 0.1
# Set up the mapping from vertex states to colors
colormap=dict(S="white", I="red", R="green")
# Generate the graph
graph, xs, ys = Graph.GRG(100, 0.2, return_coordinates=True)
layout = Layout(zip(xs, ys))
# Set up the initial state of the individuals
graph.vs["state"] = ["S"] * graph.vcount()
for vertex in sample(graph.vs, initial_outbreak_size):
vertex["state"] = "I"
graph.vs["size"] = [20] * graph.vcount()
# Set up the video encoder
encoder = MEncoderVideoEncoder(bbox=(600, 600), fps=5)
# Generate frames in the animation one by one
with encoder.encode_to("demo_epidemic.avi"):
# Run the simulation until hell freezes over
while True:
# Create the plot and add to the encoder
colors = [colormap[state] for state in graph.vs["state"]]
encoder.add(graph, layout=layout, vertex_color=colors,
vertex_label=graph.vs["state"], margin=20)
# First, the infected individuals try to infect their neighbors
infected = graph.vs.select(state="I")
for vertex in infected:
for idx in graph.neighbors(vertex.index):
if graph.vs[idx]["state"] == "S" and random() < spread_probability:
graph.vs[idx]["state"] = "I"
# Second, the infected individuals try to recover
for vertex in infected:
if random() < recovery_probability:
vertex["state"] = "R"
# Okay, are there any infected people left?
if not infected:
break
def test():
demo_layout()
demo_epidemic()
if __name__ == "__main__":
test()
|
|
"""
Low-level functions for complex arithmetic.
"""
from backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO
from libmpf import (\
round_floor, round_ceiling, round_down, round_up,
round_nearest, round_fast, bitcount,
bctable, normalize, normalize1, reciprocal_rnd, rshift, lshift, giant_steps,
negative_rnd,
to_str, to_fixed, from_man_exp, from_float, to_float, from_int, to_int,
fzero, fone, ftwo, fhalf, finf, fninf, fnan, fnone,
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul,
mpf_div, mpf_mul_int, mpf_shift, mpf_sqrt, mpf_hypot,
mpf_rdiv_int, mpf_floor, mpf_ceil, mpf_nint, mpf_frac,
mpf_sign,
ComplexResult
)
from libelefun import (\
mpf_pi, mpf_exp, mpf_log, mpf_cos_sin, mpf_cosh_sinh, mpf_tan, mpf_pow_int,
mpf_log_hypot,
mpf_cos_sin_pi, mpf_phi,
mpf_atan, mpf_atan2, mpf_cosh, mpf_sinh, mpf_tanh,
mpf_asin, mpf_acos, mpf_acosh, mpf_nthroot, mpf_fibonacci
)
# An mpc value is a (real, imag) tuple
mpc_one = fone, fzero
mpc_zero = fzero, fzero
mpc_two = ftwo, fzero
mpc_half = (fhalf, fzero)
_infs = (finf, fninf)
_infs_nan = (finf, fninf, fnan)
def mpc_is_inf(z):
"""Check if either real or imaginary part is infinite"""
re, im = z
if re in _infs: return True
if im in _infs: return True
return False
def mpc_is_infnan(z):
"""Check if either real or imaginary part is infinite or nan"""
re, im = z
if re in _infs_nan: return True
if im in _infs_nan: return True
return False
def mpc_to_str(z, dps, **kwargs):
re, im = z
rs = to_str(re, dps)
if im[0]:
return rs + " - " + to_str(mpf_neg(im), dps, **kwargs) + "j"
else:
return rs + " + " + to_str(im, dps, **kwargs) + "j"
def mpc_to_complex(z, strict=False):
re, im = z
return complex(to_float(re, strict), to_float(im, strict))
def mpc_hash(z):
try:
return hash(mpc_to_complex(z, strict=True))
except OverflowError:
return hash(z)
def mpc_conjugate(z, prec, rnd=round_fast):
re, im = z
return re, mpf_neg(im, prec, rnd)
def mpc_is_nonzero(z):
return z != mpc_zero
def mpc_add(z, w, prec, rnd=round_fast):
a, b = z
c, d = w
return mpf_add(a, c, prec, rnd), mpf_add(b, d, prec, rnd)
def mpc_add_mpf(z, x, prec, rnd=round_fast):
a, b = z
return mpf_add(a, x, prec, rnd), b
def mpc_sub(z, w, prec=0, rnd=round_fast):
a, b = z
c, d = w
return mpf_sub(a, c, prec, rnd), mpf_sub(b, d, prec, rnd)
def mpc_sub_mpf(z, p, prec=0, rnd=round_fast):
a, b = z
return mpf_sub(a, p, prec, rnd), b
def mpc_pos(z, prec, rnd=round_fast):
a, b = z
return mpf_pos(a, prec, rnd), mpf_pos(b, prec, rnd)
def mpc_neg(z, prec=None, rnd=round_fast):
a, b = z
return mpf_neg(a, prec, rnd), mpf_neg(b, prec, rnd)
def mpc_shift(z, n):
a, b = z
return mpf_shift(a, n), mpf_shift(b, n)
def mpc_abs(z, prec, rnd=round_fast):
"""Absolute value of a complex number, |a+bi|.
Returns an mpf value."""
a, b = z
return mpf_hypot(a, b, prec, rnd)
def mpc_arg(z, prec, rnd=round_fast):
"""Argument of a complex number. Returns an mpf value."""
a, b = z
return mpf_atan2(b, a, prec, rnd)
def mpc_floor(z, prec, rnd=round_fast):
a, b = z
return mpf_floor(a, prec, rnd), mpf_floor(b, prec, rnd)
def mpc_ceil(z, prec, rnd=round_fast):
a, b = z
return mpf_ceil(a, prec, rnd), mpf_ceil(b, prec, rnd)
def mpc_nint(z, prec, rnd=round_fast):
a, b = z
return mpf_nint(a, prec, rnd), mpf_nint(b, prec, rnd)
def mpc_frac(z, prec, rnd=round_fast):
a, b = z
return mpf_frac(a, prec, rnd), mpf_frac(b, prec, rnd)
def mpc_mul(z, w, prec, rnd=round_fast):
"""
Complex multiplication.
Returns the real and imaginary part of (a+bi)*(c+di), rounded to
the specified precision. The rounding mode applies to the real and
imaginary parts separately.
"""
a, b = z
c, d = w
p = mpf_mul(a, c)
q = mpf_mul(b, d)
r = mpf_mul(a, d)
s = mpf_mul(b, c)
re = mpf_sub(p, q, prec, rnd)
im = mpf_add(r, s, prec, rnd)
return re, im
def mpc_square(z, prec, rnd=round_fast):
# (a+b*I)**2 == a**2 - b**2 + 2*I*a*b
a, b = z
p = mpf_mul(a,a)
q = mpf_mul(b,b)
r = mpf_mul(a,b, prec, rnd)
re = mpf_sub(p, q, prec, rnd)
im = mpf_shift(r, 1)
return re, im
def mpc_mul_mpf(z, p, prec, rnd=round_fast):
a, b = z
re = mpf_mul(a, p, prec, rnd)
im = mpf_mul(b, p, prec, rnd)
return re, im
def mpc_mul_imag_mpf(z, x, prec, rnd=round_fast):
"""
Multiply the mpc value z by I*x where x is an mpf value.
"""
a, b = z
re = mpf_neg(mpf_mul(b, x, prec, rnd))
im = mpf_mul(a, x, prec, rnd)
return re, im
def mpc_mul_int(z, n, prec, rnd=round_fast):
a, b = z
re = mpf_mul_int(a, n, prec, rnd)
im = mpf_mul_int(b, n, prec, rnd)
return re, im
def mpc_div(z, w, prec, rnd=round_fast):
a, b = z
c, d = w
wp = prec + 10
# mag = c*c + d*d
mag = mpf_add(mpf_mul(c, c), mpf_mul(d, d), wp)
# (a*c+b*d)/mag, (b*c-a*d)/mag
t = mpf_add(mpf_mul(a,c), mpf_mul(b,d), wp)
u = mpf_sub(mpf_mul(b,c), mpf_mul(a,d), wp)
return mpf_div(t,mag,prec,rnd), mpf_div(u,mag,prec,rnd)
def mpc_div_mpf(z, p, prec, rnd=round_fast):
"""Calculate z/p where p is real"""
a, b = z
re = mpf_div(a, p, prec, rnd)
im = mpf_div(b, p, prec, rnd)
return re, im
def mpc_reciprocal(z, prec, rnd=round_fast):
"""Calculate 1/z efficiently"""
a, b = z
m = mpf_add(mpf_mul(a,a),mpf_mul(b,b),prec+10)
re = mpf_div(a, m, prec, rnd)
im = mpf_neg(mpf_div(b, m, prec, rnd))
return re, im
def mpc_mpf_div(p, z, prec, rnd=round_fast):
"""Calculate p/z where p is real efficiently"""
a, b = z
m = mpf_add(mpf_mul(a,a),mpf_mul(b,b), prec+10)
re = mpf_div(mpf_mul(a,p), m, prec, rnd)
im = mpf_div(mpf_neg(mpf_mul(b,p)), m, prec, rnd)
return re, im
def complex_int_pow(a, b, n):
"""Complex integer power: computes (a+b*I)**n exactly for
nonnegative n (a and b must be Python ints)."""
wre = 1
wim = 0
while n:
if n & 1:
wre, wim = wre*a - wim*b, wim*a + wre*b
n -= 1
a, b = a*a - b*b, 2*a*b
n //= 2
return wre, wim
def mpc_pow(z, w, prec, rnd=round_fast):
if w[1] == fzero:
return mpc_pow_mpf(z, w[0], prec, rnd)
return mpc_exp(mpc_mul(mpc_log(z, prec+10), w, prec+10), prec, rnd)
def mpc_pow_mpf(z, p, prec, rnd=round_fast):
psign, pman, pexp, pbc = p
if pexp >= 0:
return mpc_pow_int(z, (-1)**psign * (pman<<pexp), prec, rnd)
if pexp == -1:
sqrtz = mpc_sqrt(z, prec+10)
return mpc_pow_int(sqrtz, (-1)**psign * pman, prec, rnd)
return mpc_exp(mpc_mul_mpf(mpc_log(z, prec+10), p, prec+10), prec, rnd)
def mpc_pow_int(z, n, prec, rnd=round_fast):
a, b = z
if b == fzero:
return mpf_pow_int(a, n, prec, rnd), fzero
if a == fzero:
v = mpf_pow_int(b, n, prec, rnd)
n %= 4
if n == 0:
return v, fzero
elif n == 1:
return fzero, v
elif n == 2:
return mpf_neg(v), fzero
elif n == 3:
return fzero, mpf_neg(v)
if n == 0: return mpc_one
if n == 1: return mpc_pos(z, prec, rnd)
if n == 2: return mpc_square(z, prec, rnd)
if n == -1: return mpc_reciprocal(z, prec, rnd)
if n < 0: return mpc_reciprocal(mpc_pow_int(z, -n, prec+4), prec, rnd)
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if asign: aman = -aman
if bsign: bman = -bman
de = aexp - bexp
abs_de = abs(de)
exact_size = n*(abs_de + max(abc, bbc))
if exact_size < 10000:
if de > 0:
aman <<= de
aexp = bexp
else:
bman <<= (-de)
bexp = aexp
re, im = complex_int_pow(aman, bman, n)
re = from_man_exp(re, int(n*aexp), prec, rnd)
im = from_man_exp(im, int(n*bexp), prec, rnd)
return re, im
return mpc_exp(mpc_mul_int(mpc_log(z, prec+10), n, prec+10), prec, rnd)
def mpc_sqrt(z, prec, rnd=round_fast):
"""Complex square root (principal branch).
We have sqrt(a+bi) = sqrt((r+a)/2) + b/sqrt(2*(r+a))*i where
r = abs(a+bi), when a+bi is not a negative real number."""
a, b = z
if b == fzero:
if a == fzero:
return (a, b)
# When a+bi is a negative real number, we get a real sqrt times i
if a[0]:
im = mpf_sqrt(mpf_neg(a), prec, rnd)
return (fzero, im)
else:
re = mpf_sqrt(a, prec, rnd)
return (re, fzero)
wp = prec+20
if not a[0]: # case a positive
t = mpf_add(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) + a
u = mpf_shift(t, -1) # u = t/2
re = mpf_sqrt(u, prec, rnd) # re = sqrt(u)
v = mpf_shift(t, 1) # v = 2*t
w = mpf_sqrt(v, wp) # w = sqrt(v)
im = mpf_div(b, w, prec, rnd) # im = b / w
else: # case a negative
t = mpf_sub(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) - a
u = mpf_shift(t, -1) # u = t/2
im = mpf_sqrt(u, prec, rnd) # im = sqrt(u)
v = mpf_shift(t, 1) # v = 2*t
w = mpf_sqrt(v, wp) # w = sqrt(v)
re = mpf_div(b, w, prec, rnd) # re = b/w
if b[0]:
re = mpf_neg(re)
im = mpf_neg(im)
return re, im
def mpc_nthroot_fixed(a, b, n, prec):
# a, b signed integers at fixed precision prec
start = 50
a1 = int(rshift(a, prec - n*start))
b1 = int(rshift(b, prec - n*start))
try:
r = (a1 + 1j * b1)**(1.0/n)
re = r.real
im = r.imag
re = MPZ(int(re))
im = MPZ(int(im))
except OverflowError:
a1 = from_int(a1, start)
b1 = from_int(b1, start)
fn = from_int(n)
nth = mpf_rdiv_int(1, fn, start)
re, im = mpc_pow((a1, b1), (nth, fzero), start)
re = to_int(re)
im = to_int(im)
extra = 10
prevp = start
extra1 = n
for p in giant_steps(start, prec+extra):
# this is slow for large n, unlike int_pow_fixed
re2, im2 = complex_int_pow(re, im, n-1)
re2 = rshift(re2, (n-1)*prevp - p - extra1)
im2 = rshift(im2, (n-1)*prevp - p - extra1)
r4 = (re2*re2 + im2*im2) >> (p + extra1)
ap = rshift(a, prec - p)
bp = rshift(b, prec - p)
rec = (ap * re2 + bp * im2) >> p
imc = (-ap * im2 + bp * re2) >> p
reb = (rec << p) // r4
imb = (imc << p) // r4
re = (reb + (n-1)*lshift(re, p-prevp))//n
im = (imb + (n-1)*lshift(im, p-prevp))//n
prevp = p
return re, im
def mpc_nthroot(z, n, prec, rnd=round_fast):
"""
Complex n-th root.
Use Newton method as in the real case when it is faster,
otherwise use z**(1/n)
"""
a, b = z
if a[0] == 0 and b == fzero:
re = mpf_nthroot(a, n, prec, rnd)
return (re, fzero)
if n < 2:
if n == 0:
return mpc_one
if n == 1:
return mpc_pos((a, b), prec, rnd)
if n == -1:
return mpc_div(mpc_one, (a, b), prec, rnd)
inverse = mpc_nthroot((a, b), -n, prec+5, reciprocal_rnd[rnd])
return mpc_div(mpc_one, inverse, prec, rnd)
if n <= 20:
prec2 = int(1.2 * (prec + 10))
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
pf = mpc_abs((a,b), prec)
if pf[-2] + pf[-1] > -10 and pf[-2] + pf[-1] < prec:
af = to_fixed(a, prec2)
bf = to_fixed(b, prec2)
re, im = mpc_nthroot_fixed(af, bf, n, prec2)
extra = 10
re = from_man_exp(re, -prec2-extra, prec2, rnd)
im = from_man_exp(im, -prec2-extra, prec2, rnd)
return re, im
fn = from_int(n)
prec2 = prec+10 + 10
nth = mpf_rdiv_int(1, fn, prec2)
re, im = mpc_pow((a, b), (nth, fzero), prec2, rnd)
re = normalize(re[0], re[1], re[2], re[3], prec, rnd)
im = normalize(im[0], im[1], im[2], im[3], prec, rnd)
return re, im
def mpc_cbrt(z, prec, rnd=round_fast):
"""
Complex cubic root.
"""
return mpc_nthroot(z, 3, prec, rnd)
def mpc_exp(z, prec, rnd=round_fast):
"""
Complex exponential function.
We use the direct formula exp(a+bi) = exp(a) * (cos(b) + sin(b)*i)
for the computation. This formula is very nice because it is
pefectly stable; since we just do real multiplications, the only
numerical errors that can creep in are single-ulp rounding errors.
The formula is efficient since mpmath's real exp is quite fast and
since we can compute cos and sin simultaneously.
It is no problem if a and b are large; if the implementations of
exp/cos/sin are accurate and efficient for all real numbers, then
so is this function for all complex numbers.
"""
a, b = z
if a == fzero:
return mpf_cos_sin(b, prec, rnd)
mag = mpf_exp(a, prec+4, rnd)
c, s = mpf_cos_sin(b, prec+4, rnd)
re = mpf_mul(mag, c, prec, rnd)
im = mpf_mul(mag, s, prec, rnd)
return re, im
def mpc_log(z, prec, rnd=round_fast):
re = mpf_log_hypot(z[0], z[1], prec, rnd)
im = mpc_arg(z, prec, rnd)
return re, im
def mpc_cos(z, prec, rnd=round_fast):
"""Complex cosine. The formula used is cos(a+bi) = cos(a)*cosh(b) -
sin(a)*sinh(b)*i.
The same comments apply as for the complex exp: only real
multiplications are pewrormed, so no cancellation errors are
possible. The formula is also efficient since we can compute both
pairs (cos, sin) and (cosh, sinh) in single stwps."""
a, b = z
if a == fzero:
return mpf_cosh(b, prec, rnd), fzero
wp = prec + 6
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(c, ch, prec, rnd)
im = mpf_mul(s, sh, prec, rnd)
return re, mpf_neg(im)
def mpc_sin(z, prec, rnd=round_fast):
"""Complex sine. We have sin(a+bi) = sin(a)*cosh(b) +
cos(a)*sinh(b)*i. See the docstring for mpc_cos for additional
comments."""
a, b = z
if a == fzero:
return fzero, mpf_sinh(b, prec, rnd)
wp = prec + 6
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(s, ch, prec, rnd)
im = mpf_mul(c, sh, prec, rnd)
return re, im
def mpc_tan(z, prec, rnd=round_fast):
"""Complex tangent. Computed as tan(a+bi) = sin(2a)/M + sinh(2b)/M*i
where M = cos(2a) + cosh(2b)."""
a, b = z
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if b == fzero: return mpf_tan(a, prec, rnd), fzero
if a == fzero: return fzero, mpf_tanh(b, prec, rnd)
wp = prec + 15
a = mpf_shift(a, 1)
b = mpf_shift(b, 1)
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
# TODO: handle cancellation when c ~= -1 and ch ~= 1
mag = mpf_add(c, ch, wp)
re = mpf_div(s, mag, prec, rnd)
im = mpf_div(sh, mag, prec, rnd)
return re, im
def mpc_cos_pi(z, prec, rnd=round_fast):
a, b = z
b = mpf_mul(b, mpf_pi(prec+5), prec+5)
if a == fzero:
return mpf_cosh(b, prec, rnd), fzero
wp = prec + 6
c, s = mpf_cos_sin_pi(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(c, ch, prec, rnd)
im = mpf_mul(s, sh, prec, rnd)
return re, mpf_neg(im)
def mpc_sin_pi(z, prec, rnd=round_fast):
a, b = z
b = mpf_mul(b, mpf_pi(prec+5), prec+5)
if a == fzero:
return fzero, mpf_sinh(b, prec, rnd)
wp = prec + 6
c, s = mpf_cos_sin_pi(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(s, ch, prec, rnd)
im = mpf_mul(c, sh, prec, rnd)
return re, im
def mpc_cos_sin(z, prec, rnd=round_fast):
a, b = z
if a == fzero:
ch, sh = mpf_cosh_sinh(b, prec, rnd)
return (ch, fzero), (sh, fzero)
wp = prec + 6
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
cre = mpf_mul(c, ch, prec, rnd)
cim = mpf_mul(s, sh, prec, rnd)
sre = mpf_mul(s, ch, prec, rnd)
sim = mpf_mul(c, sh, prec, rnd)
return (cre, mpf_neg(cim)), (sre, sim)
def mpc_cos_sin_pi(z, prec, rnd=round_fast):
a, b = z
b = mpf_mul(b, mpf_pi(prec+5), prec+5)
if a == fzero:
ch, sh = mpf_cosh_sinh(b, prec, rnd)
return (ch, fzero), (fzero, sh)
wp = prec + 6
c, s = mpf_cos_sin_pi(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
cre = mpf_mul(c, ch, prec, rnd)
cim = mpf_mul(s, sh, prec, rnd)
sre = mpf_mul(s, ch, prec, rnd)
sim = mpf_mul(c, sh, prec, rnd)
return (cre, mpf_neg(cim)), (sre, sim)
def mpc_cosh(z, prec, rnd=round_fast):
"""Complex hyperbolic cosine. Computed as cosh(z) = cos(z*i)."""
a, b = z
return mpc_cos((b, mpf_neg(a)), prec, rnd)
def mpc_sinh(z, prec, rnd=round_fast):
"""Complex hyperbolic sine. Computed as sinh(z) = -i*sin(z*i)."""
a, b = z
b, a = mpc_sin((b, a), prec, rnd)
return a, b
def mpc_tanh(z, prec, rnd=round_fast):
"""Complex hyperbolic tangent. Computed as tanh(z) = -i*tan(z*i)."""
a, b = z
b, a = mpc_tan((b, a), prec, rnd)
return a, b
# TODO: avoid loss of accuracy
def mpc_atan(z, prec, rnd=round_fast):
a, b = z
# atan(z) = (I/2)*(log(1-I*z) - log(1+I*z))
# x = 1-I*z = 1 + b - I*a
# y = 1+I*z = 1 - b + I*a
wp = prec + 15
x = mpf_add(fone, b, wp), mpf_neg(a)
y = mpf_sub(fone, b, wp), a
l1 = mpc_log(x, wp)
l2 = mpc_log(y, wp)
a, b = mpc_sub(l1, l2, prec, rnd)
# (I/2) * (a+b*I) = (-b/2 + a/2*I)
v = mpf_neg(mpf_shift(b,-1)), mpf_shift(a,-1)
# Subtraction at infinity gives correct real part but
# wrong imaginary part (should be zero)
if v[1] == fnan and mpc_is_inf(z):
v = (v[0], fzero)
return v
beta_crossover = from_float(0.6417)
alpha_crossover = from_float(1.5)
def acos_asin(z, prec, rnd, n):
""" complex acos for n = 0, asin for n = 1
The algorithm is described in
T.E. Hull, T.F. Fairgrieve and P.T.P. Tang
'Implementing the Complex Arcsine and Arcosine Functions
using Exception Handling',
ACM Trans. on Math. Software Vol. 23 (1997), p299
The complex acos and asin can be defined as
acos(z) = acos(beta) - I*sign(a)* log(alpha + sqrt(alpha**2 -1))
asin(z) = asin(beta) + I*sign(a)* log(alpha + sqrt(alpha**2 -1))
where z = a + I*b
alpha = (1/2)*(r + s); beta = (1/2)*(r - s) = a/alpha
r = sqrt((a+1)**2 + y**2); s = sqrt((a-1)**2 + y**2)
These expressions are rewritten in different ways in different
regions, delimited by two crossovers alpha_crossover and beta_crossover,
and by abs(a) <= 1, in order to improve the numerical accuracy.
"""
a, b = z
wp = prec + 10
# special cases with real argument
if b == fzero:
am = mpf_sub(fone, mpf_abs(a), wp)
# case abs(a) <= 1
if not am[0]:
if n == 0:
return mpf_acos(a, prec, rnd), fzero
else:
return mpf_asin(a, prec, rnd), fzero
# cases abs(a) > 1
else:
# case a < -1
if a[0]:
pi = mpf_pi(prec, rnd)
c = mpf_acosh(mpf_neg(a), prec, rnd)
if n == 0:
return pi, mpf_neg(c)
else:
return mpf_neg(mpf_shift(pi, -1)), c
# case a > 1
else:
c = mpf_acosh(a, prec, rnd)
if n == 0:
return fzero, c
else:
pi = mpf_pi(prec, rnd)
return mpf_shift(pi, -1), mpf_neg(c)
asign = bsign = 0
if a[0]:
a = mpf_neg(a)
asign = 1
if b[0]:
b = mpf_neg(b)
bsign = 1
am = mpf_sub(fone, a, wp)
ap = mpf_add(fone, a, wp)
r = mpf_hypot(ap, b, wp)
s = mpf_hypot(am, b, wp)
alpha = mpf_shift(mpf_add(r, s, wp), -1)
beta = mpf_div(a, alpha, wp)
b2 = mpf_mul(b,b, wp)
# case beta <= beta_crossover
if not mpf_sub(beta_crossover, beta, wp)[0]:
if n == 0:
re = mpf_acos(beta, wp)
else:
re = mpf_asin(beta, wp)
else:
# to compute the real part in this region use the identity
# asin(beta) = atan(beta/sqrt(1-beta**2))
# beta/sqrt(1-beta**2) = (alpha + a) * (alpha - a)
# alpha + a is numerically accurate; alpha - a can have
# cancellations leading to numerical inaccuracies, so rewrite
# it in differente ways according to the region
Ax = mpf_add(alpha, a, wp)
# case a <= 1
if not am[0]:
# c = b*b/(r + (a+1)); d = (s + (1-a))
# alpha - a = (1/2)*(c + d)
# case n=0: re = atan(sqrt((1/2) * Ax * (c + d))/a)
# case n=1: re = atan(a/sqrt((1/2) * Ax * (c + d)))
c = mpf_div(b2, mpf_add(r, ap, wp), wp)
d = mpf_add(s, am, wp)
re = mpf_shift(mpf_mul(Ax, mpf_add(c, d, wp), wp), -1)
if n == 0:
re = mpf_atan(mpf_div(mpf_sqrt(re, wp), a, wp), wp)
else:
re = mpf_atan(mpf_div(a, mpf_sqrt(re, wp), wp), wp)
else:
# c = Ax/(r + (a+1)); d = Ax/(s - (1-a))
# alpha - a = (1/2)*(c + d)
# case n = 0: re = atan(b*sqrt(c + d)/2/a)
# case n = 1: re = atan(a/(b*sqrt(c + d)/2)
c = mpf_div(Ax, mpf_add(r, ap, wp), wp)
d = mpf_div(Ax, mpf_sub(s, am, wp), wp)
re = mpf_shift(mpf_add(c, d, wp), -1)
re = mpf_mul(b, mpf_sqrt(re, wp), wp)
if n == 0:
re = mpf_atan(mpf_div(re, a, wp), wp)
else:
re = mpf_atan(mpf_div(a, re, wp), wp)
# to compute alpha + sqrt(alpha**2 - 1), if alpha <= alpha_crossover
# replace it with 1 + Am1 + sqrt(Am1*(alpha+1)))
# where Am1 = alpha -1
# if alpha <= alpha_crossover:
if not mpf_sub(alpha_crossover, alpha, wp)[0]:
c1 = mpf_div(b2, mpf_add(r, ap, wp), wp)
# case a < 1
if mpf_neg(am)[0]:
# Am1 = (1/2) * (b*b/(r + (a+1)) + b*b/(s + (1-a))
c2 = mpf_add(s, am, wp)
c2 = mpf_div(b2, c2, wp)
Am1 = mpf_shift(mpf_add(c1, c2, wp), -1)
else:
# Am1 = (1/2) * (b*b/(r + (a+1)) + (s - (1-a)))
c2 = mpf_sub(s, am, wp)
Am1 = mpf_shift(mpf_add(c1, c2, wp), -1)
# im = log(1 + Am1 + sqrt(Am1*(alpha+1)))
im = mpf_mul(Am1, mpf_add(alpha, fone, wp), wp)
im = mpf_log(mpf_add(fone, mpf_add(Am1, mpf_sqrt(im, wp), wp), wp), wp)
else:
# im = log(alpha + sqrt(alpha*alpha - 1))
im = mpf_sqrt(mpf_sub(mpf_mul(alpha, alpha, wp), fone, wp), wp)
im = mpf_log(mpf_add(alpha, im, wp), wp)
if asign:
if n == 0:
re = mpf_sub(mpf_pi(wp), re, wp)
else:
re = mpf_neg(re)
if not bsign and n == 0:
im = mpf_neg(im)
if bsign and n == 1:
im = mpf_neg(im)
re = normalize(re[0], re[1], re[2], re[3], prec, rnd)
im = normalize(im[0], im[1], im[2], im[3], prec, rnd)
return re, im
def mpc_acos(z, prec, rnd=round_fast):
return acos_asin(z, prec, rnd, 0)
def mpc_asin(z, prec, rnd=round_fast):
return acos_asin(z, prec, rnd, 1)
def mpc_asinh(z, prec, rnd=round_fast):
# asinh(z) = I * asin(-I z)
a, b = z
a, b = mpc_asin((b, mpf_neg(a)), prec, rnd)
return mpf_neg(b), a
def mpc_acosh(z, prec, rnd=round_fast):
# acosh(z) = -I * acos(z) for Im(acos(z)) <= 0
# +I * acos(z) otherwise
a, b = mpc_acos(z, prec, rnd)
if b[0] or b == fzero:
return mpf_neg(b), a
else:
return b, mpf_neg(a)
def mpc_atanh(z, prec, rnd=round_fast):
# atanh(z) = (log(1+z)-log(1-z))/2
wp = prec + 15
a = mpc_add(z, mpc_one, wp)
b = mpc_sub(mpc_one, z, wp)
a = mpc_log(a, wp)
b = mpc_log(b, wp)
v = mpc_shift(mpc_sub(a, b, wp), -1)
# Subtraction at infinity gives correct imaginary part but
# wrong real part (should be zero)
if v[0] == fnan and mpc_is_inf(z):
v = (fzero, v[1])
return v
def mpc_fibonacci(z, prec, rnd=round_fast):
re, im = z
if im == fzero:
return (mpf_fibonacci(re, prec, rnd), fzero)
size = max(abs(re[2]+re[3]), abs(re[2]+re[3]))
wp = prec + size + 20
a = mpf_phi(wp)
b = mpf_add(mpf_shift(a, 1), fnone, wp)
u = mpc_pow((a, fzero), z, wp)
v = mpc_cos_pi(z, wp)
v = mpc_div(v, u, wp)
u = mpc_sub(u, v, wp)
u = mpc_div_mpf(u, b, prec, rnd)
return u
def mpf_expj(x, prec, rnd='f'):
raise ComplexResult
def mpc_expj(z, prec, rnd='f'):
re, im = z
if im == fzero:
return mpf_cos_sin(re, prec, rnd)
if re == fzero:
return mpf_exp(mpf_neg(im), prec, rnd), fzero
ey = mpf_exp(mpf_neg(im), prec+10)
c, s = mpf_cos_sin(re, prec+10)
re = mpf_mul(ey, c, prec, rnd)
im = mpf_mul(ey, s, prec, rnd)
return re, im
def mpf_expjpi(x, prec, rnd='f'):
raise ComplexResult
def mpc_expjpi(z, prec, rnd='f'):
re, im = z
if im == fzero:
return mpf_cos_sin_pi(re, prec, rnd)
sign, man, exp, bc = im
wp = prec+10
if man:
wp += max(0, exp+bc)
im = mpf_neg(mpf_mul(mpf_pi(wp), im, wp))
if re == fzero:
return mpf_exp(im, prec, rnd), fzero
ey = mpf_exp(im, prec+10)
c, s = mpf_cos_sin_pi(re, prec+10)
re = mpf_mul(ey, c, prec, rnd)
im = mpf_mul(ey, s, prec, rnd)
return re, im
|
|
import sys, time
sys.path.append("../../")
sys.path.append("../")
import numpy as np
import pylab as p
import scipy.stats.distributions as s
#Display
from tikz_graphs import StateGraphParams, StateGraph
from utils import Utils, PhraseUtils
"""
* Nog uitstaande vir Grid2:
* Make seker Gaussian offset is reg en alle moontlike opsies som na een. (begin en einde delay)
* Stel plot op i.t.v. duration i.pv om tyd te fix
* Verstel Gaussian noise en maak plots (mean en std)
* Wys ook geen uittree simbool
* Doen dieselfde vir Ticker: Run dp randomsier vir 1 and 2 channels
* Onthou die enigste uittree codes is eintlik 1000001000 ens. nie alle permutasies van 1'e nie
(so eintlik makliker as die grid)
* Ook p(x) is uniform wat die information rate drasties makliker maak
* Doen wpm eksperiment: vir 'n gegewe stel letter probs om bitrate te bepaal
"""
class GridSimulation():
########################################### Init
def __init__(self, i_display=False):
#Assume Gaussians in middle of file
self.display = i_display
self.output_dir = "/home/emlimari/ticker_dev/pami_2017_scanning/figures/" #./results/simulations/" "./results/simulations/"
self.word_length = 5.0
self.debug = False
self.utils = Utils()
self.phrase_utils = PhraseUtils()
self.initDefaultConfig()
self.getLetterPos(self.display)
self.early_bail_prob = 0.01
#Add an extra sound at the beginning to help pre-empting of the first character
#The first sound will be double the length
self.add_tick = True
def setGroupDelta(self, i_group_delta):
self.group_delta = i_group_delta
self.last_scan_time = int(np.ceil( self.scan_delay + self.group_delta / self.scan_delay))
def init(self, i_scan_delay, i_click_time_delay, i_std, i_config ):
self.scan_delay= i_scan_delay
self.click_time_delay = i_click_time_delay
self.std = i_std
self.setGroupDelta(self.click_time_delay + 2.0* self.std)
if i_config is not None:
self.config = list(i_config)
else:
self.initDefaultConfig()
self.getLetterPos(self.display)
self.letter_info = self.getLetterDurations(self.display)
self.n_rows = len(self.config)
self.n_cols = 0
for cols in self.config:
self.n_cols = max(self.n_cols, len(cols))
def initDefaultConfig(self):
self.config = [['a','b','c','d','_','D'],
['e','f','g','h','.'],
['i','j','k','l','m','n'],
['o','p','q','r','s','t'],
['u','v','w','x','y','z']]
##################################### Main
def compute(self, i_sentence, i_params):
words = self.phrase_utils.wordsFromSentece(i_sentence)
(n_errors, n_undo, fr, fp_rate, scan_delay, click_time_delay, std, n_max, draw_tikz, display) = i_params
self.init(scan_delay, click_time_delay, i_std=std, i_config=None)
print "****************************************************************************************************************************"
print "Words to test = ", words
M = len(words)
total = np.zeros(10)
for m in range(0, M):
print "****************************************************************************************************************************"
grnd_truth_word = self.phrase_utils.getWord(words[m])
(results, scan_probs, click_probs, err_probs, wpm_probs) = self.wordResults(grnd_truth_word, i_params)
self.displayResults(results, True)
total += np.array(list(results))
total /= M
print "****************************************************************************************************************************"
print "Final results Grid"
print "****************************************************************************************************************************"
self.displayResults(tuple(total), True, True)
print "****************************************************************************************************************************"
return total
def probResults(self, i_grnd_truth_word, i_params):
#results: (min_scans, avg_scans, std_scans, min_wpm avg_wpm, std_wpm, avg_chr_err, std_chr_err)
(n_errors, n_undo, fr, fp_rate, scan_delay, click_time_delay, std, n_max, draw_tikz, display) = i_params
max_scans = self.getMaxScans(i_grnd_truth_word, n_max)
#Get the Markov chain structure
(states, transitions, transition_probs, scan_delays ) = self.getStates(i_grnd_truth_word, i_params )
#Compute the pdfs for the stats
(scan_probs, click_probs, err_probs, wpm_probs) = self.stateProbs(states, transitions, transition_probs, scan_delays,
max_scans, i_grnd_truth_word, n_errors, display)
return (scan_probs, click_probs, err_probs, wpm_probs)
def wordResults(self, i_grnd_truth_word, i_params):
(scan_probs, click_probs, err_probs, wpm_probs) = self.probResults(i_grnd_truth_word, i_params)
#Compute the stats using the probs and possible outputs that correspond to the probs
results = self.probsToResults( i_grnd_truth_word, scan_probs, click_probs, err_probs, wpm_probs)
(n_errors, n_undo, fr, fp_rate, scan_delay, click_time_delay, std, n_max, draw_tikz, display) = i_params
self.displayResults( results, display)
if click_probs is None:
click_probs = 0.0
return (results, scan_probs, click_probs, err_probs, wpm_probs)
def probsToResults(self, i_grnd_truth_word, i_scan_probs, i_click_probs, i_err_probs, i_wpm_probs):
if i_click_probs is None:
n_clicks = None
else:
n_clicks = len(i_click_probs)
(scans,scans_wpm, clicks, errors) = self.getPossibleResultValues(i_grnd_truth_word, len(i_scan_probs), n_clicks, len(i_err_probs))
#Text-entry speed: number of scans and wpm
avg_scans = np.sum(scans*i_scan_probs)
std_scans = np.sqrt(np.sum(((scans-avg_scans)**2)*i_scan_probs))
min_scans = self.getMinScans(i_grnd_truth_word)
min_wpm = self.scansToWpm(min_scans, i_grnd_truth_word)
#Do a variable transformastion to find the pdf for wpm
avg_wpm = self.scansToWpm(avg_scans, i_grnd_truth_word) #np.sum(scans_wpm*i_wpm_probs)
std_wpm = avg_wpm - self.scansToWpm(avg_scans+std_scans, i_grnd_truth_word) # np.sqrt(np.sum(((scans_wpm-avg_wpm)**2)*i_wpm_probs))
#Number of clicks
if clicks is not None:
avg_clicks = np.sum(clicks*i_click_probs)
std_clicks = np.sqrt(np.sum(((clicks-avg_clicks)**2)*i_click_probs))
else:
avg_clicks = 0.0
std_clicks = 0.0
#Number of character errors
avg_chr_err = np.sum(errors*i_err_probs)
std_chr_err = np.sqrt(np.sum(((errors-avg_chr_err)**2)*i_err_probs))
#Final results
results = (min_scans, avg_scans, std_scans, min_wpm, avg_wpm, std_wpm, avg_chr_err, std_chr_err, avg_clicks, std_clicks)
return results
def getPossibleResultValues(self, i_grnd_truth_word, i_n_scans, i_n_clicks, i_n_errors):
scans = np.array(range(0,i_n_scans))
scans_wpm = self.scansToWpm(scans, i_grnd_truth_word)
if i_n_clicks is not None:
clicks = np.array(range(0, i_n_clicks))
clicks /= len(i_grnd_truth_word)
else: clicks = None
errors = 100.0*np.array(range(0, i_n_errors)) / len(i_grnd_truth_word)
return (scans, scans_wpm, clicks, errors)
def getProbStateSeq(self, i_states, i_transitions, i_transition_probs, i_seq, i_display ):
if i_display:
print "************************************************************"
print "i_seq = ", i_seq
print "************************************************************"
(states, dest) = self.convertDest(i_states, i_transitions )
states -= 1
seq = np.array(i_seq) - 1
o_seq_prob = []
for n in range(0, len(seq)-1):
(src, dst) = (seq[n], seq[n+1])
idx = np.nonzero( dest[src] == dst )[0]
if len( idx ) < 1:
print "ERR: idx = ", idx, " src = ", src, " dst = ", dst
raise ValueError("no state has empty transitions!")
o_seq_prob.extend( i_transition_probs[src][idx] )
print o_seq_prob
o_seq_prob = np.array(o_seq_prob)
return (np.cumprod(o_seq_prob)[-1], o_seq_prob.flatten())
##################################### Probability computations
#Function that computate the probability distributions needed to copmute expectations
def stateProbs(self, i_states, i_transitions, i_transition_probs, i_scan_delays, i_T, i_grnd_truth_word, i_max_spurious, i_display):
"""
*o_probs in format:
* rows: times steps
* cols:
0 - word error state
1 - system failure state
2 - correct state
* o_err_probs: Probs of states associated with column and "_", but not correct, i.e., an erroneous word selection
"""
t = time.time()
#The number of correct letters in a word and total number of states
(M, N) = (len(i_grnd_truth_word),len(i_states))
#Sate ids
(state_nums, dest) = self.convertDest( i_states, i_transitions )
#Forwards probs, the prob to be in any state at any given time
state_probs = -np.inf*np.ones(N)
state_probs[0] = 0.0
#Probs for with the number of scans to write any word
max_scans = np.max(i_scan_delays)
scan_probs = -np.inf*np.ones( [N, max_scans*i_T+1])
scan_probs[0,0] = 0.0
#scan_probs2 = -np.inf*np.ones( i_T +1 )
#Probs for number of clicks to write any word
click_probs = -np.inf*np.ones( [N, i_T+2])
click_probs[0,0] = 0.0
#Probs for number of erroneous characters
err_probs = -np.inf*np.ones(M+i_max_spurious+1)
for cur_time in range(0, i_T):
state_probs_prev = np.array(state_probs)
state_probs[0:N-3] = -np.inf*np.ones(N-3)
click_probs_prev = np.array(click_probs)
click_probs[0:N-3,:] = -np.inf
scan_probs_prev = np.array(scan_probs)
scan_probs[0:N-3,:] = -np.inf
#Do the non-terminating states first
for i in range(0, N-3):
(p_click, p_miss) = (np.log(i_transition_probs[i][0]), np.log(i_transition_probs[i][1]))
(dest_click, dest_miss) = (dest[i][0] , dest[i][1])
psum_click = self.utils.elnprod(state_probs_prev[i], p_click)
psum_miss = self.utils.elnprod(state_probs_prev[i], p_miss)
#State probs
state_probs[dest_click] = self.utils.elnsum(state_probs[dest_click], psum_click)
state_probs[dest_miss] = self.utils.elnsum(state_probs[dest_miss], psum_miss)
#The scan probs (how long it takes)
prob_click = scan_probs_prev[i, 0:-i_scan_delays[i]] + p_click
prob_miss = scan_probs_prev[i, 0:-i_scan_delays[i]] + p_miss
tmp_probs = np.vstack( (scan_probs[dest_click,i_scan_delays[i]:], prob_click)).transpose()
scan_probs[dest_click,i_scan_delays[i]:] = self.utils.expTrick(tmp_probs).flatten()
tmp_probs = np.vstack( (scan_probs[dest_miss,i_scan_delays[i]:], prob_miss)).transpose()
scan_probs[dest_miss,i_scan_delays[i]:] = self.utils.expTrick(tmp_probs).flatten()
#The click probabilities
prob_click = click_probs_prev[i,0:cur_time+2] + p_click
prob_miss = click_probs_prev[i,0:cur_time+2] + p_miss
tmp_probs = np.vstack( (click_probs[dest_click,1:cur_time+2], prob_click[0:cur_time+1])).transpose()
click_probs[dest_click,1:cur_time+2] = self.utils.expTrick(tmp_probs).flatten()
tmp_probs = np.vstack( (click_probs[dest_miss,0:cur_time+2], prob_miss)).transpose()
click_probs[dest_miss,0:cur_time+2] = self.utils.expTrick(tmp_probs).flatten()
#The state id
(letter_t, col, n_error, m, n_undo) = self.itemizeStateId(i_states[i])
#At all time steps, except for T, a terminating state can only be reached by clicking
#and being busy with a column scans
if not(self.isTerminatingDest(dest,i,0,N) and col):
continue
#scan_probs2[cur_time+1] = self.utils.elnsum(scan_probs2[cur_time+1], psum_click)
"""Char error probs: See if the current letter occurs in remaining part of word
If the system failed the whole word is taken to be wrong
if i_grnd_truth_word.find(letter_t, m, M) >= 0:
"""
#In case of a click an the destination is not the correct state , the
#Remaining word from the spurious event is taken to be wrong.
if dest[i][0] == N-1:
m += 1
else:
n_error += 1
err_probs[M-m+n_error] = self.utils.elnsum(err_probs[M-m+n_error], psum_click)
if self.isEarlyBailTestStateProbs(state_probs, cur_time+1, i_T, i_display):
break
(state_probs, scan_probs, click_probs, err_probs) = self.updateMaxTimeFailureProbs(state_probs, scan_probs,
click_probs, err_probs, M, N, i_T, cur_time)
scan_probs = self.utils.expTrick(scan_probs[-3:,:].transpose())
click_probs = self.utils.expTrick(click_probs[-3:,:].transpose())
(scan_probs, click_probs, err_probs) = ( np.exp(scan_probs), np.exp(click_probs), np.exp(err_probs))
wpm_probs = np.array(scan_probs)
print "Total time = ", time.time() - t, " seconds"
print "state_probs = ", state_probs[-3:]
return (scan_probs, click_probs, err_probs, wpm_probs )
def updateMaxTimeFailureProbs(self, state_probs, scan_probs, click_probs, err_probs, M, N, i_T, i_bail_out_time ):
#A system failure has occured, because the maximum time was reached, include these probabilities
#These prob are included if the time of early bail out is close to total time allowed
eps_time = 5
if (i_bail_out_time + eps_time) < i_T:
return (state_probs, scan_probs, click_probs, err_probs)
sum_fail = self.utils.expTrick(np.atleast_2d(state_probs[0:N-3]))[0]
state_probs[N-3] = self.utils.elnsum(state_probs[N-3], sum_fail)
#Text-entry rate
fail_scan_probs = self.utils.expTrick( scan_probs[0:N-3,:].transpose())
fail_scan_probs = np.vstack( (scan_probs[N-3,:], fail_scan_probs)).transpose()
scan_probs[N-3,:] = np.array(self.utils.expTrick(fail_scan_probs))
#scan_probs[-1] = self.utils.elnsum(scan_probs[-1], sum_fail)
#Click probs
if click_probs is not None:
fail_click_probs = self.utils.expTrick( click_probs[0:N-3,:].transpose())
fail_click_probs = np.vstack( (click_probs[N-3,:], fail_click_probs)).transpose()
click_probs[N-3,:] = np.array(self.utils.expTrick(fail_click_probs))
#Number of character errors
err_probs[M] = self.utils.elnsum(err_probs[M], sum_fail)
return (state_probs, scan_probs, click_probs, err_probs)
##################################### State Topology
def getStates(self,i_word, i_params ):
"""States in format:
[output letter, row (*) / col (_), input letter number, #spurious click, #undo]"""
(n_errors, n_undo, self.fr, self.fp_rate, scan_delay, click_time_delay, self.std, n_max, draw_tikz, display) = i_params
self.displayParams(i_word, i_params)
n_input_letters = len(i_word)
(row_states, col_states, row_transitions, col_transitions, row_probs, col_probs) = ([],[],{},{},[],[])
(states, transitions,transition_probs, state_ids) = ([], {},[], [])
scan_delays = [] #Add the number of scan delays associated with each state
for letter_idx in range(0, n_input_letters):
letter = i_word[letter_idx]
if letter_idx >= (n_input_letters-1):
correct_id = "Correct"
else:
correct_id = self.getStateId(self.config[0][0], letter_idx+1, i_click=0, i_undo=0, i_row=True)
for click in range(0, n_errors):
if display :
print "******************************"
print "Letter = ", letter, " click ", click
print "---------------------------------------"
(tmp_row_states, tmp_row_transitions, tmp_row_probs) = self.getRowStates( letter, letter_idx, click, self.letter_info['0'] , display )
row_states.extend(tmp_row_states)
#Add the number of scan_delays associated with all states (typically more if a tick sound is added at the beginning
tmp_scan_delays = np.ones(len(tmp_row_states), dtype=np.int)
if self.add_tick:
tmp_scan_delays[0] += 1
scan_delays.extend(tmp_scan_delays)
states.extend(tmp_row_states)
row_probs.extend(tmp_row_probs)
transition_probs.extend(tmp_row_probs)
for key in tmp_row_transitions.keys():
row_transitions[key] = tmp_row_transitions[key]
transitions[key] = row_transitions[key]
for undo in range(0, n_undo):
(tmp_col_states, tmp_col_transitions, tmp_col_probs) = self.getColStates( letter, letter_idx, click,
undo, n_undo, n_errors, correct_id, self.letter_info['1'], display )
col_states.extend(tmp_col_states)
tmp_scan_delays = np.ones(len(tmp_col_states), dtype=np.int)
if self.add_tick:
tmp_scan_delays[0] += 1
scan_delays.extend(tmp_scan_delays)
states.extend(tmp_col_states)
state_ids.append([str(letter_idx),click,undo])
col_probs.extend(tmp_col_probs)
transition_probs.extend(tmp_col_probs)
for key in tmp_col_transitions.keys():
col_transitions[key] = tmp_col_transitions[key]
transitions[key] = col_transitions[key]
if display:
self.displayStates( row_states, col_states, row_transitions, col_transitions, n_undo, n_errors, n_input_letters, scan_delays)
#Update word selection (error state), the system failure state and the correct state
states.extend(['Err', 'Failure', 'Correct'])
scan_delays.extend([0,0,0])
transition_probs.extend([np.array([1.0]),np.array([1.0]), np.array([1.0])])
transitions['Correct'] = ['Correct']
transitions['Failure'] = ['Failure']
transitions['Err'] = ['Err']
if draw_tikz:
self.tikzStateDiagram( i_word, states, transitions, transition_probs)
return (states, transitions, transition_probs, scan_delays)
def getRowStates(self, i_input_letter, i_input_letter_num, i_click, i_letter_info, i_display):
states = []
transition_probs = []
transitions = {}
row_ids = self.getGridRow()
row_ids.append(row_ids[0])
if i_click > 0:
"""The user will try to undo an erroneous click"""
gauss_mean = self.getInputClickInfoRow('D', i_letter_info)
else:
gauss_mean = self.getInputClickInfoRow( i_input_letter, i_letter_info)
#Compute the row-scan destinations and state ids
for r in range(0 , len(row_ids)-1):
#The colum-scan id if a click happens - reset undo to zero
dest_click = self.getStateId(self.config[r][0], i_input_letter_num, i_click, i_undo=0,i_row=False)
"""Proceed to the next letter if this one is missed, repeat scan at last letter"""
dest_miss = self.getStateId(row_ids[r+1], i_input_letter_num, i_click, None,i_row=True)
states.append(self.getStateId(row_ids[r], i_input_letter_num, i_click, None, i_row=True) )
transitions[states[-1]]=[dest_click, dest_miss ]
(prob_click, prob_miss ) = self.clickProb( row_ids[0:-1], i_letter_info, gauss_mean, r , i_display )
transition_probs.append(np.array([prob_click, prob_miss]))
return (states, transitions, transition_probs)
def getColStates(self, i_input_letter, i_input_letter_num, i_click, i_undo, i_n_undo, i_n_errors, i_correct_id, i_letter_info, i_display ):
#Compute the current row ids
states = []
transition_probs = []
transitions = {}
row_ids = self.getGridRow()
for r in range(0 , len(row_ids)):
gauss_mean= self.getInputClickInfoCol(i_input_letter, i_letter_info, i_click, r )
dest_list = list(self.config[r])
for c in range(0, len(dest_list)):
#The destinations if a click happens
if (dest_list[c] == i_input_letter) and (i_click == 0):
"""If the user clicks on the desired letter and nothing else has been written,
i.e., all errors undone, and its the end of a word the correct state is reached
"""
dest_click = i_correct_id
elif dest_list[c] == "D":
"""Click on backspace/delete"""
dest_click = self.getStateId( row_ids[0], i_input_letter_num-1, i_click-1, i_undo=0, i_row=True)
elif (i_click+1) >= i_n_errors:
"""If too many letters have been selected the system fails"""
dest_click = 'Failure'
elif (dest_list[c] == "_") or (dest_list[c] == "."):
dest_click = 'Err'
else:
"""If a false positive click happens the undo counter is set to zero and the current
click is incremented"""
dest_click = self.getStateId( row_ids[0], i_input_letter_num, i_click+1, i_undo=0, i_row=True)
#The destinations if a click is missed
if c == ( len(dest_list) - 1):
if ( (i_undo + 1) >= i_n_undo) and (i_n_undo > 0) :
"""We've reached an undo after false positive click, go back to row scan"""
dest_miss = self.getStateId( row_ids[0], i_input_letter_num, i_click , i_undo=0, i_row=True)
else:
"""Increment the undo counter"""
dest_miss = self.getStateId( dest_list[0], i_input_letter_num, i_click , i_undo + 1, i_row=False)
else:
dest_miss = self.getStateId(dest_list[c+1], i_input_letter_num, i_click, i_undo, i_row=False)
#Get the current state id
states.append(self.getStateId( dest_list[c], i_input_letter_num, i_click, i_undo, i_row=False))
#Compute the transition for this state
transitions[states[-1]]= [dest_click, dest_miss]
#The transition probabilities
(prob_click, prob_miss ) = self.clickProb( dest_list , i_letter_info[r], gauss_mean, c, i_display )
transition_probs.append(np.array([prob_click, prob_miss]))
return (states, transitions, transition_probs)
def convertDest(self, i_states, i_transitions ):
state_idx = {}
o_dest = []
#The states
for i in range(0, len(i_states)):
state_idx[i_states[i]] = i
#The Destinations
for i in range(0,len(i_states)):
o_dest.append( np.array([state_idx[t] for t in i_transitions[i_states[i]]]))
o_states = np.array( [i+1 for i in range(0, len(i_states))])
return (o_states, o_dest)
def getStateId(self, i_output_letter, i_input_letter_num, i_click, i_undo, i_row):
"""Each state has an id depending on the letter we're trying to write given the input letter,
whether we're busy with a row scan (i_row=True, id will have *) or column scan (i_row=False, id will have _), the number of spurious clicks
that has happend, the current undo iteration (only applicable to column scans)."""
id = str(i_output_letter)
if i_row:
id += '*'
else:
id += '_'
if i_click < 0:
click = 0
else:
click = i_click
if not isinstance(i_input_letter_num, int):
raise ValueError("Input str should be an int")
if i_input_letter_num < 0:
input_letter_num = 0
else:
input_letter_num = i_input_letter_num
id += (str(input_letter_num) + "," + str(click))
if not i_row:
id += ("," + str(i_undo))
else:
id += " "
return id
def itemizeStateId(self, i_state_id):
(letter_t, col, m, n_error) = (i_state_id[0], bool(i_state_id[1] == '_'), int(i_state_id[2]), int(i_state_id[4]))
if not col:
n_undo = 0
else:
n_undo = int(i_state_id[6])
return (letter_t, col, n_error, m, n_undo)
##################################### Code Probs
def clickProb(self, seq, time_info, gauss_mean, cnt, i_display ):
"""
* gauss_mean: The mean of the Gaussian the user is supposed to click on
* current_mean: The mean of the Gaussian associated with the current scan (row/col cnt)
* current scan info extracted from time_info
* seq: All the cells that have to be scanned, currently we're at cell cnt."""
#The cell's might have different lengths
delta_time = np.float32( time_info['t_end'][cnt] - time_info['t_start'][cnt])
fp = self.getFalsePositiveProb(self.fp_rate, delta_time)
if gauss_mean is None:
#The user is has clicked on the wrong row and now waits to cancel it
click_output_1 = 1.0 - fp
click_output_0 = fp
if i_display:
print "%s obs_1 =%.4f, obs_0=%.4f " % (seq[cnt], click_output_1, click_output_0)
else:
"""The user wants to click on either undo or the target letter"""
(letter, t_start, t_end, current_mean) = (seq[cnt], time_info['t_start'][cnt], time_info['t_end'][cnt], time_info['gauss_mean'][cnt])
click_pdf_end = s.norm.cdf(x=t_end, loc=gauss_mean, scale=self.std)
click_pdf_start = s.norm.cdf(x=t_start, loc=gauss_mean, scale=self.std)
q = click_pdf_end - click_pdf_start
click_output_0 = fp * ( 1.0 - ((1.0-self.fr)*q) )
click_output_1 = 1.0 - click_output_0
if i_display:
print "%s g_mean=%.4f, t_mean=%.4f, q=%.6f " % (letter, gauss_mean, current_mean, q),
print " obs_1 =%.4f, obs_0=%.4f " % ( click_output_1, click_output_0 ),
print " t_start = %.4f, t_end = %.4f " % ( t_start + self.click_time_delay, t_end+ self.click_time_delay )
return (click_output_1, click_output_0 )
def getFalsePositiveProb(self, i_fp_rate, i_delta_time):
fp = np.exp(-self.fp_rate * i_delta_time)
return fp
############################################# Display
def displayParams(self, i_grnd_truth_word, i_params):
(n_errors, n_undo, fr, fp_rate, scan_delay, click_time_delay, std, n_max, draw_tikz, display) = i_params
max_scans = self.getMaxScans(i_grnd_truth_word, n_max)
disp_str = "Params for %s:" % i_grnd_truth_word
disp_str += " #errors=%d, #undo=%d, draw_tikz=%d, disp=%d," % (n_errors, n_undo, draw_tikz, display)
disp_str +=" fr=%.2f, fp_rate=%.5f, scan_delay=%1.3s, " % (fr, fp_rate, scan_delay)
disp_str +=" click_delay=%1.3s, click_std=%1.3s," % (click_time_delay, std)
disp_str +=" max scans=%d" % (max_scans)
print disp_str
def tikzStateDiagram(self, i_input_letters, i_states, i_transitions, i_transition_probs):
params = StateGraphParams( i_filename=(self.output_dir + "grid1.tikz"), i_scale=0.8)
params.ns = 0.4
params.rect_height = 3.0
params.show_probs = False
params.x_offset = 0.8
graph = StateGraph(params)
graph.compute( i_input_letters, i_states, i_transitions, i_transition_probs)
def displayStates(self, i_row_states, i_col_states, i_row_transitions, i_col_transitions, i_n_undo, i_n_clicks, i_n_input_letters, scan_delays):
n_letters_rows = len(self.getGridRow())
n_letters_cols = len(self.letter_pos.keys())
for click in range(0, i_n_clicks):
disp_str="======================================================================================="
disp_str+= "===================================================================="
print disp_str
self.__displayStates(i_row_states, i_row_transitions, click, 1, i_n_clicks, n_letters_rows, i_n_input_letters, scan_delays)
disp_str="---------------------------------------------------------------------------------------"
disp_str+= "--------------------------------------------------------------------"
print disp_str
self.__displayStates(i_col_states, i_col_transitions, click, i_n_undo, i_n_clicks, n_letters_cols, i_n_input_letters, scan_delays)
print disp_str
def __displayStates(self, i_states, i_transitions, i_click, i_n_undo, i_n_clicks, i_n_output_letters, i_n_input_letters, i_scan_delays):
for n in range(0, i_n_output_letters):
for m in range(0, i_n_input_letters):
for undo in range(0, i_n_undo ):
disp_str = ""
letter_offset = m*i_n_output_letters * i_n_clicks * i_n_undo
click_offset = i_click * i_n_undo * i_n_output_letters
undo_offset = undo*i_n_output_letters
state_idx = letter_offset + click_offset + undo_offset + n
state_id = i_states[state_idx]
disp_str += "%s: [ " % state_id
for k in range(0, len( i_transitions[state_id] ) ):
disp_str += "%s" % i_transitions[state_id][k]
if k < (len( i_transitions[state_id] ) - 1):
disp_str += " , "
disp_str += (" ], scan_delay = %1.1f " % i_scan_delays[state_idx] )
print disp_str
def displayResults(self, i_results, i_display, i_display_all=False):
if not i_display:
return
(min_scans, avg_scans, std_scans, min_wpm, avg_wpm, std_wpm, avg_err_rate, std_err_rate, avg_cpc, std_cpc) = i_results
if i_display_all:
print "min scans=%d, avg scans=%.2f, std scans=%.2f, min_wpm=%.2f" % (min_scans,avg_scans,std_scans,min_wpm)
r= (avg_cpc, std_cpc, avg_wpm, std_wpm, avg_err_rate, std_err_rate)
self.utils.dispResults(r)
def displayProbs(self, i_scan_probs, i_click_probs, i_err_probs):
print "********************************************************"
print "Scan probs (for text-entry rate, total number of scans (0 ... T))"
print "********************************************************"
self.utils.printMatrix(np.atleast_2d(i_scan_probs))
print "********************************************************"
print "Click probs (for number total number of clicks (0 ... T))"
print "********************************************************"
if i_click_probs is None:
print "No click probabilities computed"
else:
self.utils.printMatrix(np.atleast_2d(i_click_probs))
print "********************************************************"
print "Error probs (for number total number of erroneous characters, 0 ... M+#spurious)"
print "********************************************************"
self.utils.printMatrix(np.atleast_2d(i_err_probs))
def displaySpeed(self, i_scan_probs, i_input_word):
scans = np.arange(0, len(i_scan_probs))
avg_scans = np.sum(scans*i_scan_probs)
std_scans = np.sqrt(np.sum(((scans-avg_scans)**2)*i_scan_probs))
idx_max = np.argmax(i_scan_probs)
print "***************************************************************"
print "Speed stats"
print "***************************************************************"
print "scan_probs sum = ", np.sum( i_scan_probs ) , " avg_scans = ", avg_scans, " std_scans = ", std_scans
print "Min scans = ", self.getMinScans(i_input_word)
print "Best probs = ", idx_max, " prob = ", i_scan_probs[idx_max]
####################################### Get
def getMaxScans(self, i_grnd_truth_word, i_n_max):
max_scans = i_n_max*len(i_grnd_truth_word)*self.n_rows*self.n_cols
return max_scans
def getLetterPos(self,i_display=False):
if i_display:
print "===================================="
print "Letter Pos"
print "===================================="
self.letter_pos = {}
for row in range(0, len(self.config)):
for col in range(0, len(self.config[row])):
self.letter_pos[self.config[row][col]] = (row,col)
if i_display:
print self.config[row][col], " ", (row,col)
def getLetterDurations(self , i_display=False):
letter_info = []
if i_display:
print "===================================="
print "Letter Duration Info"
print "===================================="
for row_cnt in range(0, len(self.config)):
row = self.config[row_cnt]
durations = self.getSeqDuration(row, i_display)
letter_info.append(dict(durations))
durations = self.getSeqDuration(self.getGridRow(), i_display)
o_info = {'1' : list(letter_info), '0': dict(durations) }
return o_info
def getSeqDuration(self, i_seq, i_display):
durations = {}
durations['t_start'] = np.cumsum( np.array(self.scan_delay*(np.ones(len(i_seq)))) )
if not self.add_tick:
durations['t_start'] -= self.scan_delay
durations['t_end'] = durations['t_start'] + self.scan_delay
durations['gauss_mean'] = durations['t_start'] + self.click_time_delay - 0.5*self.scan_delay
durations['t_start'][0] = 0.0
total_time = durations['t_end'][-1]
if i_display:
print "Input seq=%s " % (''.join(i_seq)),
print "t_start=" , self.utils.stringVector( durations['t_start'] + self.click_time_delay, i_type="%.1f" ),
print "t_end=" , self.utils.stringVector( durations['t_end'] + self.click_time_delay, i_type="%.1f" ),
print "g_mean=", self.utils.stringVector( durations['gauss_mean'], i_type="%.1f" ),
print "total_time = %.3f " % (total_time),
print " add_tick = " , self.add_tick, " scan delay = " ,self.scan_delay, " click_time delay = ", self.click_time_delay
return durations
def getInputClickInfoRow(self, x_id, click_info):
letter_pos = self.letter_pos[x_id][0]
gauss_mean = click_info['gauss_mean'][letter_pos]
return gauss_mean
def getInputClickInfoCol(self, x_id, click_info, i_click, i_cur_row):
(letter_pos_row, letter_pos_col) = ( self.letter_pos[x_id][0] , self.letter_pos[x_id][1] )
gauss_mean_grnd_truth = click_info[letter_pos_row]['gauss_mean'][letter_pos_col]
(letter_pos_row, letter_pos_col) = ( self.letter_pos["D"][0] , self.letter_pos["D"][1] )
gauss_mean_del = click_info[letter_pos_row]['gauss_mean'][letter_pos_col]
if i_click == 0:
"""No spurious clicks received"""
if i_cur_row == self.letter_pos[x_id][0]:
"""The intentional click is still on track: That is, no spurious clicks received and the current row iteration is equal to the ground truth row."""
gauss_mean = gauss_mean_grnd_truth
else:
"""It is assumed that the user would like to undo row scan by wait if the wrong row was selected"""
gauss_mean = None
else:
if i_cur_row == self.letter_pos["D"][0]:
"""Spurious clicks, the user is in the correct row to be able to UNDO"""
gauss_mean = gauss_mean_del
else:
"""It is assumed that the user would like to undo row scan by wait if the wrong row was selected"""
gauss_mean = None
return gauss_mean
def getGridRow(self):
"""Return the letters associated with selecting a row, i.e., the first click"""
return [letters[0] for letters in self.config]
def getMinScans(self, i_word):
"""Get the minimum time (measured as #scans) it should take to write a sentence using Grid 2"""
t = 0
for letter in i_word:
(row, col) = self.letter_pos[letter]
total = row+1 + col + 1
if self.add_tick:
total += 2
t += total
#print "sentence = ", i_sentence, " letter = ", letter, " row = ", row, " col = ", col, " total = ", total, " t = " , t
return t
def scansToWpm(self, i_scans, i_grnd_truth_word):
wpm = self.scansToWpmConst(i_grnd_truth_word)/ i_scans
return wpm
def scansToWpmConst(self, i_grnd_truth_word):
c = 60.0*len(i_grnd_truth_word) / (self.word_length*self.scan_delay)
return c
def isTerminatingDest(self, i_dest, i_state_num, i_dest_num, i_total_states):
if i_dest[i_state_num][i_dest_num] > (i_total_states-4):
return True
return False
def isTerminatingState(self, i_state_num, i_total_states ):
if i_state_num < (i_total_states-3):
return False
return True
def isEarlyBailTestStateProbs(self, i_state_probs, i_time, i_T, i_display):
#Look at the total prob in cases where the simulation should stop (e.g., if the correct state is reached)
#Bail out if this total prob does not change much
is_bail = False
p_tmp = np.exp(i_state_probs[-3:])
p_sum = np.sum(p_tmp)
n_states = len(i_state_probs)
str_probs = self.utils.stringVector(p_tmp)
if i_display:
print "Current time %d of %d, probs=%s, prob sum=%.4f, total states=%d" % (i_time, i_T, str_probs, p_sum, n_states)
if np.abs(p_sum - 1.0) < self.early_bail_prob:
print "Ending at time %d of %d, probs=%s, prob sum=%.4f, total states=%d" % (i_time, i_T, str_probs, p_sum, n_states)
return True
return False
def itemizeResults(self, i_results):
(min_scans, avg_scans, std_scans, min_wpm, avg_wpm, std_wpm, avg_chr_err, std_chr_err, avg_clicks, std_clicks) = i_result
return (min_scans, avg_scans, std_scans, min_wpm, avg_wpm, std_wpm, avg_chr_err, std_chr_err, avg_clicks, std_clicks)
if __name__ == "__main__":
app = GridSimulation(i_display=False)
w = "abcdefghijklmnopqrstuvwxyz_."
m = app.getMinScans(w)
print "minimum scans = " , m, " avg scans per letter = ", m / float(len(w))
|
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: components/auth/proto/delegation.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='components/auth/proto/delegation.proto',
package='components.auth',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n&components/auth/proto/delegation.proto\x12\x0f\x63omponents.auth\"y\n\x0f\x44\x65legationToken\x12\x11\n\tsigner_id\x18\x02 \x01(\t\x12\x16\n\x0esigning_key_id\x18\x03 \x01(\t\x12\x18\n\x10pkcs1_sha256_sig\x18\x04 \x01(\x0c\x12\x1b\n\x13serialized_subtoken\x18\x05 \x01(\x0cJ\x04\x08\x01\x10\x02\"\xa0\x02\n\x08Subtoken\x12,\n\x04kind\x18\x08 \x01(\x0e\x32\x1e.components.auth.Subtoken.Kind\x12\x13\n\x0bsubtoken_id\x18\x04 \x01(\x03\x12\x1a\n\x12\x64\x65legated_identity\x18\x01 \x01(\t\x12\x1a\n\x12requestor_identity\x18\x07 \x01(\t\x12\x15\n\rcreation_time\x18\x02 \x01(\x03\x12\x19\n\x11validity_duration\x18\x03 \x01(\x05\x12\x10\n\x08\x61udience\x18\x05 \x03(\t\x12\x10\n\x08services\x18\x06 \x03(\t\x12\x0c\n\x04tags\x18\t \x03(\t\"5\n\x04Kind\x12\x10\n\x0cUNKNOWN_KIND\x10\x00\x12\x1b\n\x17\x42\x45\x41RER_DELEGATION_TOKEN\x10\x01\x62\x06proto3'
)
_SUBTOKEN_KIND = _descriptor.EnumDescriptor(
name='Kind',
full_name='components.auth.Subtoken.Kind',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_KIND', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BEARER_DELEGATION_TOKEN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=418,
serialized_end=471,
)
_sym_db.RegisterEnumDescriptor(_SUBTOKEN_KIND)
_DELEGATIONTOKEN = _descriptor.Descriptor(
name='DelegationToken',
full_name='components.auth.DelegationToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='signer_id', full_name='components.auth.DelegationToken.signer_id', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signing_key_id', full_name='components.auth.DelegationToken.signing_key_id', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pkcs1_sha256_sig', full_name='components.auth.DelegationToken.pkcs1_sha256_sig', index=2,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='serialized_subtoken', full_name='components.auth.DelegationToken.serialized_subtoken', index=3,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=180,
)
_SUBTOKEN = _descriptor.Descriptor(
name='Subtoken',
full_name='components.auth.Subtoken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='components.auth.Subtoken.kind', index=0,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subtoken_id', full_name='components.auth.Subtoken.subtoken_id', index=1,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='delegated_identity', full_name='components.auth.Subtoken.delegated_identity', index=2,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='requestor_identity', full_name='components.auth.Subtoken.requestor_identity', index=3,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creation_time', full_name='components.auth.Subtoken.creation_time', index=4,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='validity_duration', full_name='components.auth.Subtoken.validity_duration', index=5,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audience', full_name='components.auth.Subtoken.audience', index=6,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='services', full_name='components.auth.Subtoken.services', index=7,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='components.auth.Subtoken.tags', index=8,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_SUBTOKEN_KIND,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=183,
serialized_end=471,
)
_SUBTOKEN.fields_by_name['kind'].enum_type = _SUBTOKEN_KIND
_SUBTOKEN_KIND.containing_type = _SUBTOKEN
DESCRIPTOR.message_types_by_name['DelegationToken'] = _DELEGATIONTOKEN
DESCRIPTOR.message_types_by_name['Subtoken'] = _SUBTOKEN
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DelegationToken = _reflection.GeneratedProtocolMessageType('DelegationToken', (_message.Message,), {
'DESCRIPTOR' : _DELEGATIONTOKEN,
'__module__' : 'components.auth.proto.delegation_pb2'
# @@protoc_insertion_point(class_scope:components.auth.DelegationToken)
})
_sym_db.RegisterMessage(DelegationToken)
Subtoken = _reflection.GeneratedProtocolMessageType('Subtoken', (_message.Message,), {
'DESCRIPTOR' : _SUBTOKEN,
'__module__' : 'components.auth.proto.delegation_pb2'
# @@protoc_insertion_point(class_scope:components.auth.Subtoken)
})
_sym_db.RegisterMessage(Subtoken)
# @@protoc_insertion_point(module_scope)
|
|
"""Config flow for Netatmo."""
from __future__ import annotations
import logging
import uuid
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_SHOW_ON_MAP
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_entry_oauth2_flow, config_validation as cv
from .const import (
CONF_AREA_NAME,
CONF_LAT_NE,
CONF_LAT_SW,
CONF_LON_NE,
CONF_LON_SW,
CONF_NEW_AREA,
CONF_PUBLIC_MODE,
CONF_UUID,
CONF_WEATHER_AREAS,
DOMAIN,
)
class NetatmoFlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle Netatmo OAuth2 authentication."""
DOMAIN = DOMAIN
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Get the options flow for this handler."""
return NetatmoOptionsFlowHandler(config_entry)
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
scopes = [
"access_camera",
"access_presence",
"read_camera",
"read_homecoach",
"read_presence",
"read_smokedetector",
"read_station",
"read_thermostat",
"write_camera",
"write_presence",
"write_thermostat",
]
return {"scope": " ".join(scopes)}
async def async_step_user(self, user_input: dict | None = None) -> FlowResult:
"""Handle a flow start."""
await self.async_set_unique_id(DOMAIN)
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
return await super().async_step_user(user_input)
class NetatmoOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Netatmo options."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize Netatmo options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.options.setdefault(CONF_WEATHER_AREAS, {})
async def async_step_init(self, user_input: dict | None = None) -> FlowResult:
"""Manage the Netatmo options."""
return await self.async_step_public_weather_areas()
async def async_step_public_weather_areas(
self, user_input: dict | None = None
) -> FlowResult:
"""Manage configuration of Netatmo public weather areas."""
errors: dict = {}
if user_input is not None:
new_client = user_input.pop(CONF_NEW_AREA, None)
areas = user_input.pop(CONF_WEATHER_AREAS, [])
user_input[CONF_WEATHER_AREAS] = {
area: self.options[CONF_WEATHER_AREAS][area] for area in areas
}
self.options.update(user_input)
if new_client:
return await self.async_step_public_weather(
user_input={CONF_NEW_AREA: new_client}
)
return self._create_options_entry()
weather_areas = list(self.options[CONF_WEATHER_AREAS])
data_schema = vol.Schema(
{
vol.Optional(
CONF_WEATHER_AREAS,
default=weather_areas,
): cv.multi_select({wa: None for wa in weather_areas}),
vol.Optional(CONF_NEW_AREA): str,
}
)
return self.async_show_form(
step_id="public_weather_areas",
data_schema=data_schema,
errors=errors,
)
async def async_step_public_weather(self, user_input: dict) -> FlowResult:
"""Manage configuration of Netatmo public weather sensors."""
if user_input is not None and CONF_NEW_AREA not in user_input:
self.options[CONF_WEATHER_AREAS][
user_input[CONF_AREA_NAME]
] = fix_coordinates(user_input)
self.options[CONF_WEATHER_AREAS][user_input[CONF_AREA_NAME]][
CONF_UUID
] = str(uuid.uuid4())
return await self.async_step_public_weather_areas()
orig_options = self.config_entry.options.get(CONF_WEATHER_AREAS, {}).get(
user_input[CONF_NEW_AREA], {}
)
default_longitude = self.hass.config.longitude
default_latitude = self.hass.config.latitude
default_size = 0.04
data_schema = vol.Schema(
{
vol.Optional(CONF_AREA_NAME, default=user_input[CONF_NEW_AREA]): str,
vol.Optional(
CONF_LAT_NE,
default=orig_options.get(
CONF_LAT_NE, default_latitude + default_size
),
): cv.latitude,
vol.Optional(
CONF_LON_NE,
default=orig_options.get(
CONF_LON_NE, default_longitude + default_size
),
): cv.longitude,
vol.Optional(
CONF_LAT_SW,
default=orig_options.get(
CONF_LAT_SW, default_latitude - default_size
),
): cv.latitude,
vol.Optional(
CONF_LON_SW,
default=orig_options.get(
CONF_LON_SW, default_longitude - default_size
),
): cv.longitude,
vol.Required(
CONF_PUBLIC_MODE,
default=orig_options.get(CONF_PUBLIC_MODE, "avg"),
): vol.In(["avg", "max"]),
vol.Required(
CONF_SHOW_ON_MAP,
default=orig_options.get(CONF_SHOW_ON_MAP, False),
): bool,
}
)
return self.async_show_form(step_id="public_weather", data_schema=data_schema)
def _create_options_entry(self) -> FlowResult:
"""Update config entry options."""
return self.async_create_entry(
title="Netatmo Public Weather", data=self.options
)
def fix_coordinates(user_input: dict) -> dict:
"""Fix coordinates if they don't comply with the Netatmo API."""
# Ensure coordinates have acceptable length for the Netatmo API
for coordinate in (CONF_LAT_NE, CONF_LAT_SW, CONF_LON_NE, CONF_LON_SW):
if len(str(user_input[coordinate]).split(".")[1]) < 7:
user_input[coordinate] = user_input[coordinate] + 0.0000001
# Swap coordinates if entered in wrong order
if user_input[CONF_LAT_NE] < user_input[CONF_LAT_SW]:
user_input[CONF_LAT_NE], user_input[CONF_LAT_SW] = (
user_input[CONF_LAT_SW],
user_input[CONF_LAT_NE],
)
if user_input[CONF_LON_NE] < user_input[CONF_LON_SW]:
user_input[CONF_LON_NE], user_input[CONF_LON_SW] = (
user_input[CONF_LON_SW],
user_input[CONF_LON_NE],
)
return user_input
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import atexit
import errno
import os
import shutil
import stat
import tempfile
import threading
import uuid
from builtins import open
from collections import defaultdict
from contextlib import contextmanager
from future.utils import PY3
from pants.util.strutil import ensure_text
def longest_dir_prefix(path, prefixes):
"""Given a list of prefixes, return the one that is the longest prefix to the given path.
Returns None if there are no matches.
"""
longest_match, longest_prefix = 0, None
for prefix in prefixes:
if fast_relpath_optional(path, prefix) is not None and len(prefix) > longest_match:
longest_match, longest_prefix = len(prefix), prefix
return longest_prefix
def fast_relpath(path, start):
"""A prefix-based relpath, with no normalization or support for returning `..`."""
relpath = fast_relpath_optional(path, start)
if relpath is None:
raise ValueError('{} is not a directory containing {}'.format(start, path))
return relpath
def fast_relpath_optional(path, start):
"""A prefix-based relpath, with no normalization or support for returning `..`.
Returns None if `start` is not a directory-aware prefix of `path`.
"""
if len(start) == 0:
# Empty prefix.
return path
# Determine where the matchable prefix ends.
pref_end = len(start) - 1 if start[-1] == '/' else len(start)
if pref_end > len(path):
# The prefix is too long to match.
return None
elif path[:pref_end] == start[:pref_end] and (len(path) == pref_end or path[pref_end] == '/'):
# The prefix matches, and the entries are either identical, or the suffix indicates that
# the prefix is a directory.
return path[pref_end+1:]
def safe_mkdir(directory, clean=False):
"""Ensure a directory is present.
If it's not there, create it. If it is, no-op. If clean is True, ensure the dir is empty.
:API: public
"""
if clean:
safe_rmtree(directory)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def safe_mkdir_for(path, clean=False):
"""Ensure that the parent directory for a file is present.
If it's not there, create it. If it is, no-op.
"""
safe_mkdir(os.path.dirname(path), clean=clean)
def safe_mkdir_for_all(paths):
"""Make directories which would contain all of the passed paths.
This avoids attempting to re-make the same directories, which may be noticeably expensive if many
paths mostly fall in the same set of directories.
:param list of str paths: The paths for which containing directories should be created.
"""
created_dirs = set()
for path in paths:
dir_to_make = os.path.dirname(path)
if dir_to_make not in created_dirs:
safe_mkdir(dir_to_make)
created_dirs.add(dir_to_make)
# TODO(#6742): payload should be Union[str, bytes] in type hint syntax, but from
# https://pythonhosted.org/an_example_pypi_project/sphinx.html#full-code-example it doesn't appear
# that is possible to represent in docstring type syntax.
def safe_file_dump(filename, payload='', mode='w', makedirs=False):
"""Write a string to a file.
This method is "safe" to the extent that `safe_open` is "safe". See the explanation on the method
doc there.
When `payload` is an empty string (the default), this method can be used as a concise way to
create an empty file along with its containing directory (or truncate it if it already exists).
:param string filename: The filename of the file to write to.
:param string payload: The string to write to the file.
:param string mode: A mode argument for the python `open` builtin which should be a write mode
variant. Defaults to 'w'.
:param bool makedirs: Whether to make all parent directories of this file before making it.
"""
if makedirs:
if PY3:
os.makedirs(os.path.dirname(filename), exist_ok=True)
else:
try:
os.makedirs(os.path.dirname(filename))
except OSError as e:
if e.errno != os.errno.EEXIST:
raise
with safe_open(filename, mode=mode) as f:
f.write(payload)
def maybe_read_file(filename, binary_mode=False):
"""Read and return the contents of a file in a single file.read().
:param string filename: The filename of the file to read.
:param bool binary_mode: Read from file as bytes or unicode.
:returns: The contents of the file, or opening the file fails for any reason
:rtype: string
"""
try:
return read_file(filename, binary_mode=binary_mode)
except IOError:
return None
def read_file(filename, binary_mode=False):
"""Read and return the contents of a file in a single file.read().
:param string filename: The filename of the file to read.
:param bool binary_mode: Read from file as bytes or unicode.
:returns: The contents of the file.
:rtype: string
"""
mode = 'rb' if binary_mode else 'r'
with open(filename, mode) as f:
return f.read()
def safe_walk(path, **kwargs):
"""Just like os.walk, but ensures that the returned values are unicode objects.
This isn't strictly safe, in that it is possible that some paths
will not be decodeable, but that case is rare, and the only
alternative is to somehow avoid all interaction between paths and
unicode objects, which seems especially tough in the presence of
unicode_literals. See e.g.
https://mail.python.org/pipermail/python-dev/2008-December/083856.html
:API: public
"""
# If os.walk is given a text argument, it yields text values; if it
# is given a binary argument, it yields binary values.
return os.walk(ensure_text(path), **kwargs)
class ExistingFileError(ValueError):
"""Indicates a copy operation would over-write a file with a directory."""
class ExistingDirError(ValueError):
"""Indicates a copy operation would over-write a directory with a file."""
def mergetree(src, dst, symlinks=False, ignore=None, file_filter=None):
"""Just like `shutil.copytree`, except the `dst` dir may exist.
The `src` directory will be walked and its contents copied into `dst`. If `dst` already exists the
`src` tree will be overlayed in it; ie: existing files in `dst` will be over-written with files
from `src` when they have the same subtree path.
"""
safe_mkdir(dst)
if not file_filter:
file_filter = lambda _: True
for src_path, dirnames, filenames in safe_walk(src, topdown=True, followlinks=True):
ignorenames = ()
if ignore:
to_ignore = ignore(src_path, dirnames + filenames)
if to_ignore:
ignorenames = frozenset(to_ignore)
src_relpath = os.path.relpath(src_path, src)
dst_path = os.path.join(dst, src_relpath)
visit_dirs = []
for dirname in dirnames:
if dirname in ignorenames:
continue
src_dir = os.path.join(src_path, dirname)
dst_dir = os.path.join(dst_path, dirname)
if os.path.exists(dst_dir):
if not os.path.isdir(dst_dir):
raise ExistingFileError('While copying the tree at {} to {}, encountered directory {} in '
'the source tree that already exists in the destination as a '
'non-directory.'.format(src, dst, dst_dir))
visit_dirs.append(dirname)
elif symlinks and os.path.islink(src_dir):
link = os.readlink(src_dir)
os.symlink(link, dst_dir)
# We need to halt the walk at a symlink dir; so we do not place dirname in visit_dirs
# here.
else:
os.makedirs(dst_dir)
visit_dirs.append(dirname)
# In-place mutate dirnames to halt the walk when the dir is ignored by the caller.
dirnames[:] = visit_dirs
for filename in filenames:
if filename in ignorenames:
continue
src_file_relpath = os.path.join(src_relpath, filename)
if not file_filter(src_file_relpath):
continue
dst_filename = os.path.join(dst_path, filename)
if os.path.exists(dst_filename):
if not os.path.isfile(dst_filename):
raise ExistingDirError('While copying the tree at {} to {}, encountered file {} in the '
'source tree that already exists in the destination as a non-file.'
.format(src, dst, dst_filename))
else:
os.unlink(dst_filename)
src_filename = os.path.join(src_path, filename)
if symlinks and os.path.islink(src_filename):
link = os.readlink(src_filename)
os.symlink(link, dst_filename)
else:
shutil.copy2(src_filename, dst_filename)
_MKDTEMP_CLEANER = None
_MKDTEMP_DIRS = defaultdict(set)
_MKDTEMP_LOCK = threading.RLock()
def _mkdtemp_atexit_cleaner():
for td in _MKDTEMP_DIRS.pop(os.getpid(), []):
safe_rmtree(td)
def _mkdtemp_unregister_cleaner():
global _MKDTEMP_CLEANER
_MKDTEMP_CLEANER = None
def _mkdtemp_register_cleaner(cleaner):
global _MKDTEMP_CLEANER
if not cleaner:
return
assert callable(cleaner)
if _MKDTEMP_CLEANER is None:
atexit.register(cleaner)
_MKDTEMP_CLEANER = cleaner
def safe_mkdtemp(cleaner=_mkdtemp_atexit_cleaner, **kw):
"""Create a temporary directory that is cleaned up on process exit.
Arguments are as to tempfile.mkdtemp.
:API: public
"""
# Proper lock sanitation on fork [issue 6721] would be desirable here.
with _MKDTEMP_LOCK:
return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner)
def register_rmtree(directory, cleaner=_mkdtemp_atexit_cleaner):
"""Register an existing directory to be cleaned up at process exit."""
with _MKDTEMP_LOCK:
_mkdtemp_register_cleaner(cleaner)
_MKDTEMP_DIRS[os.getpid()].add(directory)
return directory
def safe_rmtree(directory):
"""Delete a directory if it's present. If it's not present, no-op.
Note that if the directory argument is a symlink, only the symlink will
be deleted.
:API: public
"""
if os.path.islink(directory):
safe_delete(directory)
else:
shutil.rmtree(directory, ignore_errors=True)
def safe_open(filename, *args, **kwargs):
"""Open a file safely, ensuring that its directory exists.
:API: public
"""
safe_mkdir_for(filename)
return open(filename, *args, **kwargs)
def safe_delete(filename):
"""Delete a file safely. If it's not present, no-op."""
try:
os.unlink(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def safe_concurrent_rename(src, dst):
"""Rename src to dst, ignoring errors due to dst already existing.
Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins.
"""
# Delete dst, in case it existed (with old content) even before any concurrent processes
# attempted this write. This ensures that at least one process writes the new content.
if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src.
safe_rmtree(dst)
else:
safe_delete(dst)
try:
shutil.move(src, dst)
except IOError as e:
if e.errno != errno.EEXIST:
raise
def safe_rm_oldest_items_in_dir(root_dir, num_of_items_to_keep, excludes=frozenset()):
"""
Keep `num_of_items_to_keep` newly modified items besides `excludes` in `root_dir` then remove the rest.
:param root_dir: the folder to examine
:param num_of_items_to_keep: number of files/folders/symlinks to keep after the cleanup
:param excludes: absolute paths excluded from removal (must be prefixed with `root_dir`)
:return: none
"""
if os.path.isdir(root_dir):
found_files = []
for old_file in os.listdir(root_dir):
full_path = os.path.join(root_dir, old_file)
if full_path not in excludes:
found_files.append((full_path, os.path.getmtime(full_path)))
found_files = sorted(found_files, key=lambda x: x[1], reverse=True)
for cur_file, _ in found_files[num_of_items_to_keep:]:
rm_rf(cur_file)
@contextmanager
def safe_concurrent_creation(target_path):
"""A contextmanager that yields a temporary path and renames it to a final target path when the
contextmanager exits.
Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.
:param target_path: The final target path to rename the temporary path to.
:yields: A temporary path containing the original path with a unique (uuid4) suffix.
"""
safe_mkdir_for(target_path)
tmp_path = '{}.tmp.{}'.format(target_path, uuid.uuid4().hex)
try:
yield tmp_path
except Exception:
rm_rf(tmp_path)
raise
else:
if os.path.exists(tmp_path):
safe_concurrent_rename(tmp_path, target_path)
def chmod_plus_x(path):
"""Equivalent of unix `chmod a+x path`"""
path_mode = os.stat(path).st_mode
path_mode &= int('777', 8)
if path_mode & stat.S_IRUSR:
path_mode |= stat.S_IXUSR
if path_mode & stat.S_IRGRP:
path_mode |= stat.S_IXGRP
if path_mode & stat.S_IROTH:
path_mode |= stat.S_IXOTH
os.chmod(path, path_mode)
def absolute_symlink(source_path, target_path):
"""Create a symlink at target pointing to source using the absolute path.
:param source_path: Absolute path to source file
:param target_path: Absolute path to intended symlink
:raises ValueError if source_path or link_path are not unique, absolute paths
:raises OSError on failure UNLESS file already exists or no such file/directory
"""
if not os.path.isabs(source_path):
raise ValueError("Path for source : {} must be absolute".format(source_path))
if not os.path.isabs(target_path):
raise ValueError("Path for link : {} must be absolute".format(target_path))
if source_path == target_path:
raise ValueError("Path for link is identical to source : {}".format(source_path))
try:
if os.path.lexists(target_path):
if os.path.islink(target_path) or os.path.isfile(target_path):
os.unlink(target_path)
else:
shutil.rmtree(target_path)
safe_mkdir_for(target_path)
os.symlink(source_path, target_path)
except OSError as e:
# Another run may beat us to deletion or creation.
if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
raise
def relative_symlink(source_path, link_path):
"""Create a symlink at link_path pointing to relative source
:param source_path: Absolute path to source file
:param link_path: Absolute path to intended symlink
:raises ValueError if source_path or link_path are not unique, absolute paths
:raises OSError on failure UNLESS file already exists or no such file/directory
"""
if not os.path.isabs(source_path):
raise ValueError("Path for source:{} must be absolute".format(source_path))
if not os.path.isabs(link_path):
raise ValueError("Path for link:{} must be absolute".format(link_path))
if source_path == link_path:
raise ValueError("Path for link is identical to source:{}".format(source_path))
# The failure state below had a long life as an uncaught error. No behavior was changed here, it just adds a catch.
# Raising an exception does differ from absolute_symlink, which takes the liberty of deleting existing directories.
if os.path.isdir(link_path) and not os.path.islink(link_path):
raise ValueError("Path for link would overwrite an existing directory: {}".format(link_path))
try:
if os.path.lexists(link_path):
os.unlink(link_path)
rel_path = os.path.relpath(source_path, os.path.dirname(link_path))
safe_mkdir_for(link_path)
os.symlink(rel_path, link_path)
except OSError as e:
# Another run may beat us to deletion or creation.
if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
raise
def relativize_path(path, rootdir):
"""
:API: public
"""
# Note that we can't test for length and return the shorter of the two, because we need these
# paths to be stable across systems (e.g., because they get embedded in analysis files),
# and this choice might be inconsistent across systems. So we assume the relpath is always
# shorter. We relativize because of a known case of very long full path prefixes on Mesos,
# so this seems like the right heuristic.
# Note also that we mustn't call realpath on the path - we need to preserve the symlink structure.
return os.path.relpath(path, rootdir)
# When running pants under mesos/aurora, the sandbox pathname can be very long. Since it gets
# prepended to most components in the classpath (some from ivy, the rest from the build),
# in some runs the classpath gets too big and exceeds ARG_MAX.
# We prevent this by using paths relative to the current working directory.
def relativize_paths(paths, rootdir):
return [relativize_path(path, rootdir) for path in paths]
def touch(path, times=None):
"""Equivalent of unix `touch path`.
:API: public
:path: The file to touch.
:times Either a tuple of (atime, mtime) or else a single time to use for both. If not
specified both atime and mtime are updated to the current time.
"""
if times:
if len(times) > 2:
raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value '
'to use for both.')
if len(times) == 1:
times = (times, times)
with safe_open(path, 'a'):
os.utime(path, times)
def recursive_dirname(f):
"""Given a relative path like 'a/b/c/d', yield all ascending path components like:
'a/b/c/d'
'a/b/c'
'a/b'
'a'
''
"""
prev = None
while f != prev:
yield f
prev = f
f = os.path.dirname(f)
yield ''
def get_basedir(path):
"""Returns the base directory of a path.
Examples:
get_basedir('foo/bar/baz') --> 'foo'
get_basedir('/foo/bar/baz') --> ''
get_basedir('foo') --> 'foo'
"""
return path[:path.index(os.sep)] if os.sep in path else path
def rm_rf(name):
"""Remove a file or a directory similarly to running `rm -rf <name>` in a UNIX shell.
:param str name: the name of the file or directory to remove.
:raises: OSError on error.
"""
if not os.path.exists(name):
return
try:
# Avoid using safe_rmtree so we can detect failures.
shutil.rmtree(name)
except OSError as e:
if e.errno == errno.ENOTDIR:
# 'Not a directory', but a file. Attempt to os.unlink the file, raising OSError on failure.
safe_delete(name)
elif e.errno != errno.ENOENT:
# Pass on 'No such file or directory', otherwise re-raise OSError to surface perm issues etc.
raise
def is_executable(path):
"""Returns whether a path names an existing executable file."""
return os.path.isfile(path) and os.access(path, os.X_OK)
def split_basename_and_dirname(path):
if not os.path.isfile(path):
raise ValueError("{} does not exist or is not a regular file.".format(path))
return (os.path.dirname(path), os.path.basename(path))
def check_no_overlapping_paths(paths):
"""Given a list of paths, ensure that all are unique and do not have the same prefix."""
for path in paths:
list_copy_without_path = list(paths)
list_copy_without_path.remove(path)
if path in list_copy_without_path:
raise ValueError('{} appeared more than once. All paths must be unique.'.format(path))
for p in list_copy_without_path:
if path in p:
raise ValueError('{} and {} have the same prefix. All paths must be unique and cannot overlap.'.format(path, p))
def is_readable_dir(path):
"""Returns whether a path names an existing directory we can list and read files from."""
return os.path.isdir(path) and os.access(path, os.R_OK) and os.access(path, os.X_OK)
def is_writable_dir(path):
"""Returns whether a path names an existing directory that we can create and modify files in.
We call is_readable_dir(), so this definition of "writable" is a superset of that.
"""
return is_readable_dir(path) and os.access(path, os.W_OK)
|
|
import numpy as np
import inspect
from copy import copy, deepcopy
from itertools import chain
from scipy.optimize import newton, root
from .exceptions import OverDefinedSystem, UnderDefinedSystem
from warnings import warn
from .impeller import Impeller
from .point import Point
from .state import State
__all__ = ['Stream', 'Component', 'Mixer', 'Tee', 'Valve',
'Parameter', 'Compressor', 'ConvergenceBlock']
##################################################
# Helper functions
##################################################
def automatic_docstring(func):
"""Decorator that will automatically generate docstrings."""
doc = 'Options: \n'
sig = inspect.signature(func).parameters
options = sig['options'].default
for k, v in options.items():
line = f'{k}: {v} \n'
doc += line
def wrapped(option):
func(option)
wrapped.__doc__ = doc
return wrapped
class Parameter:
"""Parameter class.
This class is used to create objects that hold configuration
parameters.
"""
def __init__(self, values):
"""
Parameters
----------
values : list
List with options of for the parameter.
Examples
--------
self.pressure_assignment = Parameter(['Equalize All',
'Set Outlet to Lowest Inlet'])
"""
self.values = values
try:
self.current_value = values[0]
except TypeError:
self.current_value = values
kwargs = {k: v for k, v in enumerate(values)}
@automatic_docstring
def set_to(options=kwargs):
self.current_value = self.values[options]
self.set_to = set_to
def __repr__(self):
return str(self.current_value)
##################################################
# Streams
##################################################
class Stream:
"""Material Stream."""
def __init__(self, name, state=None, flow_m=None):
"""
A material stream has a flow and a thermodynamic state.
Parameters
----------
name : str
Stream name.
state : prf.State
Thermodynamic state.
flow_m : float
Mass flow
Examples
--------
state0 = prf.State.define(p=1e6, T=300, fluid='CO2')
stream0 = prf.Stream('s0', state=state0, flow_m=1)
"""
self.name = name
self.state = state
self._flow_m = flow_m
self.linked_stream = None
@property
def flow_m(self):
if self.linked_stream is None:
return self._flow_m
else:
return self.linked_stream.flow_m
@flow_m.setter
def flow_m(self, value):
"""Constrain the mass flow to a value.
This function should be used during setup of a unit
to constrain the flow mass value of a stream.
"""
if isinstance(value, Stream):
self._flow_m = value.flow_m
self.linked_stream = value
else:
self._flow_m = value
def break_link(self):
self.linked_stream = None
self.flow_m = None
def __repr__(self):
return f'\n' \
f'Stream: {self.name} - \n Flow: {self.flow_m} kg/s - {self.state.__repr__()}'
def __eq__(self, other):
eq_flow = (self.flow_m == other.flow_m)
eq_state = np.array(
(np.allclose(self.state.p(), other.state.p()),
np.allclose(self.state.T(), other.state.T()),
np.allclose(self.state.molar_mass(), other.state.molar_mass())))
return eq_flow and eq_state.all()
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
##################################################
# Components
##################################################
class Component:
"""Component (unit)."""
def __init__(self, name=None):
"""
Parameters
----------
name : str
Component name.
"""
self.name = name
self.inputs = None
self.outputs = None
self.connections = None
# unknowns
self.unks = []
self.x0 = []
def link(self, inputs, outputs):
"""Connects the components inputs and outputs."""
self.inputs = inputs
self.outputs = outputs
self.connections = list(chain(self.inputs, self.outputs))
def mass_balance(self):
"""Calculates the mass balance for the component."""
input_mass = sum((inp.flow_m for inp in self.inputs))
output_mass = sum((out.flow_m for out in self.outputs))
return input_mass - output_mass
def energy_balance(self):
"""Calculates the energy balance for the component."""
input_energy = 0
for inp in self.inputs:
inp_energy = inp.flow_m * inp.state.hmass()
input_energy += inp_energy
output_energy = 0
for out in self.outputs:
out_energy = out.flow_m * out.state.hmass()
output_energy += out_energy
input_mass = sum((inp.flow_m for inp in self.inputs))
output_mass = sum((out.flow_m for out in self.outputs))
return (input_energy / input_mass) - (output_energy / output_mass)
def setup(self):
"""Setup constraints."""
def balance(self, x):
"""Update each stream with iteration value."""
for i, unk in enumerate(self.unks):
s_name, prop = unk.split('_', maxsplit=1)
for con in self.connections:
if con.name == s_name:
if prop == 'flow_m':
con.flow_m = x[i]
else:
con.state.setup_args[prop] = x[i]
props = {k: v for k, v in con.state.setup_args.items() if v is not None}
if len(props) == 2:
try:
con.state.update2(**props)
except ValueError:
# if refprop does not converge, try CP's HEOS
heos_state = State.define(**props, fluid=con.state.fluid_dict(), EOS='HEOS')
heos_state.setup_args = con.state.setup_args
con.state = heos_state
y = np.zeros_like(x)
if len(self.unks) == 1:
if 'flow_m' in self.unks[0]:
y[0] = self.mass_balance()
else:
y[0] = self.energy_balance()
else:
y[0] = self.mass_balance()
y[1] = self.energy_balance()
return y
def check_consistency(self):
"""Check component consistency"""
if len(self.unks) < 2:
raise OverDefinedSystem(f'Unit {self.name} is over defined. Unknowns : {self.unks}')
elif len(self.unks) > 2:
raise UnderDefinedSystem(f'Unit {self.name} is under defined for {self.name}.'
f' Unknowns : {self.unks}')
def set_x0(self):
"""Set initial values for convergence."""
x0 = self.x0
for unk in self.unks:
if unk[-1] == 'p':
x0.append(100000.)
if unk[-1] == 'T':
x0.append(300.)
if unk[-1] == 'm':
x0.append(1.)
def run(self):
"""Run the unit and calculate unknowns."""
# apply constraints
self.setup()
# check all unknowns
for con in self.connections:
if con.flow_m is None \
and con.linked_stream is None\
and (con.name + '_flow_m') not in self.unks:
self.unks.append(con.name + '_flow_m')
if con.state.not_defined:
if con.state.setup_args['p'] is None\
and (con.name + '_p') not in self.unks:
self.unks.append(con.name + '_p')
if con.state.setup_args['T'] is None\
and (con.name + '_T') not in self.unks:
self.unks.append(con.name + '_T')
self.check_consistency()
self.set_x0()
if len(self.unks) == 0:
if np.allclose(self.balance([0, 0]), np.array([0, 0])) is False:
raise OverDefinedSystem(f'System is over defined and'
f'not balanced {self.balance(0)}')
else:
root(self.balance, self.x0)
def is_solved(self):
for con in self.connections:
if con.flow_m is None:
return False
elif con.state.not_defined():
return False
return True
def __repr__(self):
return f'Inputs: \n {self.inputs} \n' \
f'Outputs: \n {self.outputs} \n'
class Mixer(Component):
def __init__(self, name):
"""A mixer.
A mixer has inputs and outputs streams.
Undetermined stream values are solved through balance
of mass and energy.
"""
self.pressure_assignment = Parameter(['Equalize All',
'Set Outlet to Lowest Inlet'])
super().__init__(name)
def setup(self):
pressure = []
for con in self.connections:
if con.state.init_args['p'] is not None:
pressure.append(con.state.init_args['p'])
if self.pressure_assignment.current_value == 'Equalize All':
for con in self.connections:
if con.state.init_args['p'] is None:
con.state.setup_args['p'] = pressure[0]
if len(pressure) > 1:
raise OverDefinedSystem('System is over determined for '
'"Equalize All"')
if self.pressure_assignment.current_value == 'Set Outlet to Lowest Inlet':
for con in self.connections:
if con.state.init_args['p'] is None:
con.state.setup_args['p'] = min(pressure)
class Tee(Component):
"""Tee operation.
Splits feed stream into multiple outputs with the same conditions and
composition.
"""
def setup(self):
pressure = {}
temperature = {}
for con in self.connections:
if con.state.init_args['p'] is not None:
pressure[f'{con.name}_p'] = con.state.init_args['p']
if con.state.init_args['T'] is not None:
temperature[f'{con.name}_T'] = con.state.init_args['T']
if len({i for i in pressure.values()}) > 1:
raise OverDefinedSystem(f'System is over defined {pressure}')
if len({i for i in temperature.values()}) > 1:
raise OverDefinedSystem(f'System is over defined {temperature}')
for con in self.connections:
con.state.setup_args['p'] = [i for i in pressure.values()][0]
con.state.setup_args['T'] = [i for i in temperature.values()][0]
def check_consistency(self):
pass
class Valve(Component):
"""Simple valve.
Valve that will give an isenthalpic expansion.
"""
def __init__(self, name, cv=None, v_open=0.5):
"""
Parameters
----------
name : str
Valve name
cv : float
Valve cv.
v_open : float
Valve opening.
"""
self.cv = cv
self.v_open = v_open
self.init_cv = deepcopy(cv)
super().__init__(name)
def setup(self):
# same input and output mass
inp = self.inputs[0]
out = self.outputs[0]
# constrain mass
if out.flow_m is None:
out.flow_m = inp
elif inp.flow_m is None:
inp.flow_m = out
else:
if out.flow_m != inp.flow_m:
raise OverDefinedSystem(f'Different mass for {inp} and {out}')
def calc_cv(self):
"""Calculate cv based on simple resistance equation."""
m = self.inputs[0].flow_m
v_open = self.v_open
dP = self.inputs[0].state.p() - self.outputs[0].state.p()
rho = self.inputs[0].state.rhomass()
cv = self.cv
return cv - m / np.sqrt(v_open * dP * rho)
def balance(self, x):
for i, unk in enumerate(self.unks):
s_name, prop = unk.split('_', maxsplit=1)
if s_name == 'valve':
self.cv = x[i]
for con in self.connections:
if con.name == s_name:
if prop == 'flow_m':
con.flow_m = x[i]
else:
con.state.setup_args[prop] = x[i]
props = {k: v for k, v in con.state.setup_args.items() if v is not None}
if len(props) == 2:
try:
con.state.update2(**props)
except ValueError:
# if refprop does not converge, try CP's HEOS
heos_state = State.define(**props, fluid=con.state.fluid_dict(), EOS='HEOS')
heos_state.setup_args = con.state.setup_args
con.state = heos_state
y = np.zeros_like(x, dtype=np.float)
# mass balance is already satisfied for a valve
y[0] = self.energy_balance()
y[1] = self.calc_cv()
return y
def run(self):
if self.cv is None:
self.unks.append('valve_cv')
self.x0.append(0.8)
super().run()
class Compressor(Component):
"""Compressor."""
def __init__(self, name, impeller=None, speed=None, flow_m=None, b=None, D=None):
"""
# TODO change this to impeller or and create compressor class
Parameters
----------
name : str
Compressor name.
impeller : prf.Impeller
Impeller object.
speed : float
Impeller speed.
flow_m : float
Mass flow.
b : float
Impeller width
D : float
Impeller diameter.
"""
self.name = name
self.init_impeller = impeller
self.impeller = None
self.speed = speed
self.b = b
self.D = D
super().__init__(name)
def setup(self):
# same input and output mass
inp = self.inputs[0]
out = self.outputs[0]
# constrain mass
if out.flow_m is None:
out.flow_m = inp
elif inp.flow_m is None:
inp.flow_m = out
else:
if out.flow_m != inp.flow_m:
raise OverDefinedSystem(f'Different mass for {inp} and {out}')
def check_consistency(self):
pass
def run(self):
inp = self.inputs[0]
out = self.outputs[0]
if self.init_impeller is None:
point = Point(speed=self.speed, flow_m=inp.flow_m, suc=inp.state, disch=out.state)
self.impeller = Impeller(point, b=self.b, D=self.D)
else:
self.impeller.suc = inp.state
out.state.update2(p=self.impeller.disch.p(), T=self.impeller.disch.T())
class ConvergenceBlock(Component):
"""Convergence block."""
def __init__(self, stream, units):
"""
A convergence block is used to converge the recycles.
The selected stream is split in two streams (sc0 and sc1).
A guess is made for one of the streams (sc0). Units are calculated
and the values for the other stream (sc1) are compared with the
guessed stream (sc0).
Parameters
----------
stream : str
Name of the stream that will be used for iteration.
units : list
List with prf.Component objects
"""
self.stream = stream
self.units = units
self.units0 = deepcopy(units)
self.converged_units = None
# convergence information
self.tolerance = 0.1
self.iter = 0
self.convergence_info = None
self.converged = False
self.y0 = None
self.y1 = None
super().__init__('ConvBlock')
def setup(self):
# create convergence block
sc = 0
for unit in self.units0:
new_inputs = []
for i, inp in enumerate(unit.inputs):
if inp.name == self.stream:
new_inp = deepcopy(inp)
new_inp.name = f'sc{sc}'
sc += 1
new_inputs.append(new_inp)
else:
new_inputs.append(inp)
new_outputs = []
for i, out in enumerate(unit.outputs):
if out.name == self.stream:
new_out = deepcopy(out)
new_out.name = f'sc{sc}'
sc += 1
new_outputs.append(new_out)
else:
new_outputs.append(out)
unit.link(inputs=new_inputs, outputs=new_outputs)
for unit in self.units0:
for con in unit.connections:
if 'sc' in con.name:
setattr(self, con.name, con)
def balance(self, x):
# store units that are not solved and go forward
not_solved_units = []
for unit in self.units0:
for con in unit.connections:
if con.name == 'sc0':
con.flow_m = x[0]
con.state.setup_args['T'] = x[1]
props = {k: v for k, v in
con.state.setup_args.items() if v is not None}
if len(props) == 2:
try:
con.state.update2(**props)
except ValueError:
# if refprop does not converge, try CP's HEOS
heos_state = State.define(
**props, fluid=con.state.fluid_dict(), EOS='HEOS')
heos_state.setup_args = con.state.setup_args
con.state = heos_state
if con.name == 'sc1':
con.state.setup_args['p'] = self.sc0.state.setup_args['p']
props = {k: v for k, v in
con.state.setup_args.items() if v is not None}
if len(props) == 2:
try:
con.state.update2(**props)
except ValueError:
# if refprop does not converge, try CP's HEOS
heos_state = State.define(
**props, fluid=con.state.fluid_dict(), EOS='HEOS')
heos_state.setup_args = con.state.setup_args
con.state = heos_state
# if cv is given, calculate mass
if isinstance(unit, Valve):
if unit.init_cv is not None:
# break connections
for con in unit.connections:
con.break_link()
unit.run()
if not unit.is_solved():
not_solved_units.append(unit)
# try to go back and run unsolved units
for unit in not_solved_units:
unit.run()
y = np.zeros_like(x)
for unit in self.units0:
for con in unit.connections:
if con.name == 'sc1':
y[0] = con.flow_m - x[0]
y[1] = con.state.T() - x[1]
return y
def run(self):
self.setup()
for unit in self.units0:
unit.setup()
for con in unit.connections:
try:
con.state.update_from_setup_args()
except KeyError:
continue
self.convergence_info = root(self.balance, [0.8, 300])
for unit in self.units0:
for con in unit.connections:
if 'sc' in con.name:
con.name = self.stream
|
|
import logging
import math
import requests
import json
import traceback
from idigbio import util
try:
# Python 2
from urllib import urlencode
except:
# Python 3
from urllib.parse import urlencode
global_disable_images = False
try:
import PIL.Image as Image
except:
global_disable_images = True
try:
# Python 2 C
from cStringIO import StringIO as io_ify
except:
try:
# Python 2 native
from StringIO import StringIO as io_ify
except:
# Python 3
from io import BytesIO as io_ify
log = logging.getLogger(__name__)
FIELDS_EXCLUDE_DEFAULT = ['data.*']
def level_dic():
'''
http://wiki.openstreetmap.org/wiki/Zoom_levels
'''
# return data
data = {0: 360.0,
1: 180.0,
2: 90.0,
3: 45.0,
4: 22.5,
5: 11.25,
6: 5.625,
7: 2.813,
8: 1.406,
9: 0.703,
10: 0.352,
11: 0.176,
12: 0.088,
13: 0.044,
14: 0.022,
15: 0.011,
16: 0.005,
17: 0.003,
18: 0.001,
19: 0.0005}
return data
def getzoom(min_lon, max_lon, min_lat, max_lat):
data = level_dic() # our presets
r = 4
dne = max(round(max_lat - min_lat, r),
round(max_lon - min_lon, r)) # ne: North East point
mylist = [round(i, r) for i in data.values()] + [dne]
new = sorted(mylist, reverse=True)
return new.index(dne)
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
class BadEnvException(Exception):
pass
class MapNotCreatedException(Exception):
pass
class ImagesDisabledException(Exception):
pass
def make_session(user=None, password=None):
import idigbio
s = requests.Session()
if user and password:
s.auth = (user, password)
s.headers["User-Agent"] = "idigbio-python-client/" + idigbio.__version__
return s
class iDigBioMap(object):
def __init__(self, api, rq={}, style=None, t="auto", disable_images=False):
self.__api = api
self._disable_images = disable_images or global_disable_images
self._map_def = self.__api._api_post(
"/v2/mapping", rq=rq, style=style, type=t)
if self._map_def is None:
raise MapNotCreatedException()
self._short_code = self._map_def["shortCode"]
self._tiles = self._map_def["tiles"]
def definition(self):
return self.__api._api_get("/v2/mapping/{0}".format(self._short_code))
def json_tile(self, z, x, y):
return self.__api._api_get(
"/v2/mapping/{0}/{1}/{2}/{3}.json".format(
self._short_code, z, x, y))
def utf8grid_tile(self, z, x, y):
return self.__api._api_get(
"/v2/mapping/{0}/{1}/{2}/{3}.grid.json".format(
self._short_code, z, x, y))
def png_tile(self, z, x, y):
if self._disable_images:
raise ImagesDisabledException()
tile = self.__api._api_get(
"/v2/mapping/{0}/{1}/{2}/{3}.png".format(
self._short_code, z, x, y), raw=True)
if tile is None:
return None
else:
return Image.open(io_ify(tile))
def points(self, lat, lon, zoom, sort=None, limit=100, offset=None):
return self.__api._api_get(
"/v2/mapping/{0}/points".format(self._short_code),
lat=lat, lon=lon, zoom=zoom, sort=sort, limit=limit, offset=offset)
def save_map_image(self, filename, zoom, bbox=None):
x_tiles = None
y_tiles = None
if zoom is None and bbox is not None:
zoom = getzoom(
bbox["bottom_right"]["lat"],
bbox["top_left"]["lat"],
bbox["top_left"]["lon"],
bbox["bottom_right"]["lon"]
)
if bbox is not None:
top_left_tile = deg2num(
bbox["top_left"]["lat"],
bbox["top_left"]["lon"],
zoom
)
bottom_right_tile = deg2num(
bbox["bottom_right"]["lat"],
bbox["bottom_right"]["lon"],
zoom
)
x_tiles = range(top_left_tile[0], bottom_right_tile[0]+1)
y_tiles = range(top_left_tile[1], bottom_right_tile[1]+1)
if x_tiles is None:
x_tiles = range(0, 2**zoom)
if y_tiles is None:
y_tiles = range(0, 2**zoom)
s = make_session()
if self._disable_images:
raise ImagesDisabledException()
im = Image.new("RGB", (len(x_tiles) * 256, len(y_tiles) * 256))
x_tile_count = 0
for x in x_tiles:
y_tile_count = 0
for y in y_tiles:
r = s.get(
"http://b.tile.openstreetmap.org/{z}/{x}/{y}.png".format(
z=zoom, x=x, y=y))
r.raise_for_status()
bim = Image.open(io_ify(r.content))
tim = self.png_tile(zoom, x, y)
im.paste(bim, (x_tile_count * 256, y_tile_count * 256))
im.paste(tim, (x_tile_count * 256, y_tile_count * 256), tim)
y_tile_count += 1
x_tile_count += 1
im.save("{0}.png".format(filename), "PNG")
s.close()
class iDbApiJson(object):
""" iDigBio Search API Json Client """
def __init__(self, env="prod", retries=3, user=None, password=None):
"""
env: Which environment to use. Defaults to prod."
"""
self.retries = retries
if env == "prod":
self._api_urls = {
"base": "https://search.idigbio.org",
"/v2/media": "https://api.idigbio.org",
"/v2/download": "https://api.idigbio.org"
}
elif env == "beta":
self._api_urls = {
"base": "https://beta-search.idigbio.org",
"/v2/media": "https://beta-api.idigbio.org",
"/v2/download": "https://beta-api.idigbio.org"
}
elif env == "dev":
self._api_urls = {
"base": "https://localhost:19196",
"/v2/media": "http://localhost:19197",
"/v2/download": "http://localhost:19197"
}
else:
raise BadEnvException()
self.s = make_session(user=user, password=password)
def __del__(self):
self.s.close()
def _api_get(self, slug, **kwargs):
retries = self.retries
raw = kwargs.pop('raw', False)
api_url = self._api_urls.get(slug, self._api_urls.get("base"))
for arg in list(kwargs):
if isinstance(kwargs[arg], (dict, list)):
kwargs[arg] = json.dumps(kwargs[arg])
elif kwargs[arg] is None:
del kwargs[arg]
qs = urlencode(kwargs)
while retries > 0:
try:
log.debug("Querying: %r", api_url + slug + "?" + qs)
r = self.s.get(api_url + slug + "?" + qs)
r.raise_for_status()
if raw:
return r.content
else:
return r.json()
except:
log.debug(traceback.print_exc())
retries -= 1
return None
def _api_post(self, slug, **kwargs):
retries = self.retries
raw = kwargs.pop('raw', False)
files = kwargs.pop('files', None)
params = kwargs.pop('params', None)
api_url = self._api_urls.get(slug, self._api_urls.get("base"))
for arg in list(kwargs):
if kwargs[arg] is None:
del kwargs[arg]
while retries > 0:
try:
body = json.dumps(kwargs)
if files is None:
log.debug("POSTing: %r\n%s", slug, body)
r = self.s.post(
api_url + slug,
data=json.dumps(kwargs),
params=params,
headers={"Content-Type": "application/json"}
)
else:
# you must seek the file before sending,
# especially on the retry loop
for k in files:
files[k].seek(0)
log.debug("POSTing + Files: %r\n%s", slug, body)
r = self.s.post(
api_url + slug,
data=kwargs,
files=files,
params=params
)
r.raise_for_status()
if raw:
return r.content
else:
return r.json()
except:
log.exception("Error posting: %r %r", slug, params)
retries -= 1
return None
def view(self, t, uuid):
"""
t: the type to view. Supported types: records, media (mediarecords), recordsets, publishers
uuid: the uuid to view.
"""
return self._api_get("/v2/view/{0}/{1}".format(t, uuid))
def search_records(self, rq={}, limit=100, offset=0, sort=None,
fields=None, fields_exclude=FIELDS_EXCLUDE_DEFAULT):
"""
rq Search Query in iDigBio Query Format, using Record Query Fields
sort field to sort on, pick from Record Query Fields
fields a list of fields to return, specified using the fieldName parameter from Fields with type records
fields_exclude a list of fields to exclude, specified using the fieldName parameter from Fields with type records
limit max results
offset skip results
Returns idigbio record format (legacy api), plus additional top level keys with parsed index terms. Returns None on error.
"""
if fields is not None and fields_exclude is FIELDS_EXCLUDE_DEFAULT:
fields_exclude = None
return self._api_post("/v2/search/records",
rq=rq, limit=limit, offset=offset, sort=sort,
fields=fields, fields_exclude=fields_exclude)
def search_media(self, mq={}, rq={}, limit=100, offset=0, sort=None,
fields=None, fields_exclude=FIELDS_EXCLUDE_DEFAULT):
"""
mq Search Query in iDigBio Query Format, using Media Query Fields
rq Search Query in iDigBio Query Format, using Record Query Fields
sort field to sort on, pick from Media Query Fields
fields a list of fields to return, specified using the fieldName parameter from Fields with type mediarecords
fields_exclude a list of fields to exclude, specified using the fieldName parameter from Fields with type records
limit max results
offset skip results
Returns idigbio record format (legacy api), plus additional top level keys with parsed index terms. Returns None on error.
"""
if fields is not None and fields_exclude is FIELDS_EXCLUDE_DEFAULT:
fields_exclude = None
return self._api_post("/v2/search/media",
rq=rq, mq=mq, limit=limit, offset=offset, sort=sort,
fields=fields, fields_exclude=fields_exclude)
def create_map(self, rq={}, style=None, t="auto", disable_images=False):
return iDigBioMap(
self, rq=rq, style=style, t=t, disable_images=disable_images)
def top_records(self, rq={}, top_fields=None, count=None):
return self._api_post("/v2/summary/top/records",
rq=rq, top_fields=top_fields, count=count)
def top_media(self, mq={}, rq={}, top_fields=None, count=None):
return self._api_post("/v2/summary/top/media", mq=mq, rq=rq,
top_fields=top_fields, count=count)
def count_records(self, rq={}):
r = self._api_post("/v2/summary/count/records", rq=rq)
if r is not None:
return r["itemCount"]
else:
return None
def count_media(self, mq={}, rq={}):
r = self._api_post("/v2/summary/count/media", mq=mq, rq=rq)
if r is not None:
return r["itemCount"]
else:
return None
def count_recordsets(self, rsq={"data.ingest":True}):
r = self._api_post("/v2/summary/count/recordsets", rsq=rsq)
if r is not None:
return r["itemCount"]
else:
return None
def datehist(self, rq={}, top_fields=None, count=None, date_field=None,
min_date=None, max_date=None, date_interval=None):
return self._api_post(
"/v2/summary/datehist",
rq=rq, top_fields=top_fields, count=count, date_field=date_field,
min_date=min_date, max_date=max_date, date_interval=date_interval)
def stats(self, t, recordset=None, min_date=None, max_date=None,
date_interval=None):
return self._api_post("/v2/summary/stats/{0}".format(t),
recordset=recordset,
min_date=min_date, max_date=max_date,
date_interval=date_interval)
def upload(self, filereference, localfile, media_type=None, etag=None):
if not self.s.auth:
raise Exception("Unauthorized")
if not localfile:
raise ValueError("Must have local copy of file to upload")
fd = open(localfile, 'rb')
if etag is None:
etag = util.calcFileHash(fd, op=False)
log.debug("Calculate etag for %r as %s", localfile, etag)
files = {'file': fd}
p = {
"filereference": filereference,
"media_type": media_type,
"etag": etag
}
return self._api_post("/v2/media", files=files, params=p)
def addreference(self, filereference, localfile):
if not self.s.auth:
raise Exception("Unauthorized")
if not localfile:
raise ValueError("Must have local copy of file to upload")
etag = util.calcFileHash(localfile)
p = {'filereference': filereference,
'etag': etag}
return self._api_post("/v2/media", params=p)
def addurl(self, filereference, media_type=None, mime_type=None):
if not self.s.auth:
raise Exception("Unauthorized")
p = {
"filereference": filereference,
"media_type": media_type,
"mime": mime_type
}
return self._api_post("/v2/media", **p)
|
|
import io
import os
import unittest
import warnings
import xml.etree.ElementTree as etree
import pronto
class TestRdfXMLParser(unittest.TestCase):
@staticmethod
def get_ontology(content):
xml = f"""
<rdf:RDF xmlns="http://purl.obolibrary.org/obo/TEMP#"
xml:base="http://purl.obolibrary.org/obo/TEMP"
xmlns:obo="http://purl.obolibrary.org/obo/"
xmlns:owl="http://www.w3.org/2002/07/owl#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:xml="http://www.w3.org/XML/1998/namespace"
xmlns:xsd="http://www.w3.org/2001/XMLSchema#"
xmlns:doap="http://usefulinc.com/ns/doap#"
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:oboInOwl="http://www.geneontology.org/formats/oboInOwl#">
{content}
</rdf:RDF>
"""
s = io.BytesIO(xml.encode('utf-8'))
return pronto.Ontology(s, import_depth=0)
def setUp(self):
warnings.simplefilter("error")
def tearDown(self):
warnings.simplefilter(warnings.defaultaction)
# ---
def test_iao(self):
warnings.simplefilter("ignore")
path = os.path.join(__file__, "..", "..", "data", "iao.owl")
iao = pronto.Ontology(os.path.realpath(path))
self.assertEqual(len(iao.terms()), 245)
def test_aeo(self):
warnings.simplefilter("ignore")
path = os.path.join(__file__, "..", "..", "data", "aeo.owl")
aeo = pronto.Ontology(os.path.realpath(path))
self.assertEqual(len(aeo.terms()), 250)
self.assertEqual(len(aeo.relationships()), 11)
self.assertEqual(aeo["AEO:0000099"].name, "keratin-based structure")
self.assertEqual(len(aeo["AEO:0000099"].definition.xrefs), 1)
def test_invalid_xml_file(self):
self.assertRaises(ValueError, self.get_ontology, "")
# ------------------------------------------------------------------------
def test_metadata_auto_generated_by(self):
ont = self.get_ontology(
"""
<owl:Ontology>
<oboInOwl:auto-generated-by>pronto</oboInOwl:auto-generated-by>
</owl:Ontology>
"""
)
self.assertEqual(ont.metadata.auto_generated_by, "pronto")
def test_metadata_default_namespace(self):
ont = self.get_ontology(
"""
<owl:Ontology>
<oboInOwl:hasDefaultNamespace rdf:datatype="http://www.w3.org/2001/XMLSchema#string">thing</oboInOwl:hasDefaultNamespace>
</owl:Ontology>
"""
)
self.assertEqual(ont.metadata.default_namespace, "thing")
def test_metadata_data_version(self):
# owl:versionrIRI
ont = self.get_ontology(
"""
<owl:Ontology rdf:about="http://purl.obolibrary.org/obo/ms.owl">
<owl:versionIRI rdf:resource="http://purl.obolibrary.org/obo/ms/4.1.30/ms.owl"/>
</owl:Ontology>
"""
)
self.assertEqual(ont.metadata.ontology, "ms")
self.assertEqual(ont.metadata.data_version, "4.1.30")
# doap:Version
ont2 = self.get_ontology(
"<owl:Ontology><doap:Version>0.1.0</doap:Version></owl:Ontology>"
)
self.assertEqual(ont2.metadata.data_version, "0.1.0")
def test_metadata_format_version(self):
ont = self.get_ontology(
"""
<owl:Ontology>
<oboInOwl:hasOBOFormatVersion>1.2</oboInOwl:hasOBOFormatVersion>
</owl:Ontology>
"""
)
self.assertEqual(ont.metadata.format_version, "1.2")
def test_metadata_imports(self):
ont = self.get_ontology(
"""
<owl:Ontology>
<owl:imports rdf:resource="http://purl.obolibrary.org/obo/ms.obo"/>
</owl:Ontology>
"""
)
self.assertIn("http://purl.obolibrary.org/obo/ms.obo", ont.metadata.imports)
def test_metadata_saved_by(self):
ont = self.get_ontology(
"""
<owl:Ontology>
<oboInOwl:savedBy>Martin Larralde</oboInOwl:savedBy>
</owl:Ontology>
"""
)
self.assertEqual(ont.metadata.saved_by, "Martin Larralde")
# ------------------------------------------------------------------------
def test_term_consider(self):
# Extract from `oboInOwl:consider` text
ont = self.get_ontology(
"""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:consider rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:002</oboInOwl:consider>
</owl:Class>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_002"/>
"""
)
self.assertIn("TST:001", ont)
self.assertIn("TST:002", ont)
self.assertIn(ont["TST:002"], ont["TST:001"].consider)
# Extract from `oboInOwl:consider` RDF resource
ont2 = self.get_ontology(
"""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:consider rdf:resource="http://purl.obolibrary.org/obo/TST_002"/>
</owl:Class>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_002"/>
"""
)
self.assertIn("TST:001", ont2)
self.assertIn("TST:002", ont2)
self.assertIn(ont2["TST:002"], ont2["TST:001"].consider)
def test_term_definition_as_property(self):
ont = self.get_ontology("""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<obo:IAO_0000115 rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a term</obo:IAO_0000115>
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
</owl:Class>
""")
self.assertIn("TST:001", ont)
self.assertEqual(ont["TST:001"].definition, "a term")
self.assertEqual(len(ont["TST:001"].definition.xrefs), 0)
def test_term_definition_as_axiom(self):
ont = self.get_ontology("""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<obo:IAO_0000115 rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a term</obo:IAO_0000115>
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
</owl:Class>
<owl:Axiom>
<owl:annotatedSource rdf:resource="http://purl.obolibrary.org/obo/TST_001"/>
<owl:annotatedProperty rdf:resource="http://purl.obolibrary.org/obo/IAO_0000115"/>
<owl:annotatedTarget rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a term</owl:annotatedTarget>
<oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref>
</owl:Axiom>
""")
self.assertIn("TST:001", ont)
self.assertEqual(ont["TST:001"].definition, "a term")
self.assertEqual(list(ont["TST:001"].definition.xrefs)[0], pronto.Xref("ISBN:1234"))
def test_term_multiple_labels(self):
txt = """
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<rdfs:label>A</rdfs:label>
<rdfs:label>B</rdfs:label>
</owl:Class>
"""
# check multiple labels is a syntax error in error mode
with warnings.catch_warnings():
warnings.simplefilter("error", pronto.warnings.SyntaxWarning)
with self.assertRaises(SyntaxError):
ont = self.get_ontology(txt)
# check multiple labels is fine in ignore mode
with warnings.catch_warnings():
warnings.simplefilter("ignore", pronto.warnings.SyntaxWarning)
ont = self.get_ontology(txt)
self.assertIn(ont['TST:001'].name, ["A", "B"])
def test_term_subclass_of(self):
ont = self.get_ontology("""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_002">
<rdfs:subClassOf rdf:resource="http://purl.obolibrary.org/obo/TST_001"/>
</owl:Class>
""")
self.assertIn(ont["TST:001"], ont["TST:002"].superclasses().to_set())
self.assertIn(ont["TST:002"], ont["TST:001"].subclasses().to_set())
def test_term_subset(self):
ont = self.get_ontology("""
<owl:Ontology rdf:about="http://purl.obolibrary.org/obo/tst.owl"/>
<owl:AnnotationProperty rdf:about="http://purl.obolibrary.org/obo/tst#ss">
<rdfs:comment rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a subset</rdfs:comment>
<rdfs:subPropertyOf rdf:resource="http://www.geneontology.org/formats/oboInOwl#SubsetProperty"/>
</owl:AnnotationProperty>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
<oboInOwl:inSubset rdf:resource="http://purl.obolibrary.org/obo/tst#ss"/>
</owl:Class>
""")
self.assertIn("TST:001", ont)
self.assertEqual(ont["TST:001"].subsets, {"ss"})
def test_term_synonym_as_property(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", pronto.warnings.SyntaxWarning)
ont = self.get_ontology("""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:hasExactSynonym rdf:datatype="http://www.w3.org/2001/XMLSchema#string">stuff</oboInOwl:hasExactSynonym>
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
</owl:Class>
""")
self.assertIn("TST:001", ont)
self.assertEqual(len(ont["TST:001"].synonyms), 1)
syn = next(iter(ont["TST:001"].synonyms))
self.assertEqual(syn.description, "stuff")
self.assertEqual(syn.scope, "EXACT")
self.assertEqual(syn.xrefs, set())
def test_term_synonym_as_axiom(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", pronto.warnings.SyntaxWarning)
ont = self.get_ontology("""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:hasExactSynonym rdf:datatype="http://www.w3.org/2001/XMLSchema#string">stuff</oboInOwl:hasExactSynonym>
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
</owl:Class>
<owl:Axiom>
<owl:annotatedSource rdf:resource="http://purl.obolibrary.org/obo/TST_001"/>
<owl:annotatedProperty rdf:resource="http://www.geneontology.org/formats/oboInOwl#hasExactSynonym"/>
<owl:annotatedTarget rdf:datatype="http://www.w3.org/2001/XMLSchema#string">stuff</owl:annotatedTarget>
<oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref>
</owl:Axiom>
""")
self.assertIn("TST:001", ont)
self.assertEqual(len(ont["TST:001"].synonyms), 1)
syn = next(iter(ont["TST:001"].synonyms))
self.assertEqual(syn.description, "stuff")
self.assertEqual(syn.scope, "EXACT")
self.assertEqual(syn.xrefs, {pronto.Xref("ISBN:1234")})
def test_term_relationship(self):
ont = self.get_ontology("""
<owl:Ontology/>
<owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/RO_0002202">
<rdf:type rdf:resource="http://www.w3.org/2002/07/owl#TransitiveProperty"/>
<oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">RO:0002202</oboInOwl:hasDbXref>
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string"></oboInOwl:id>
<oboInOwl:shorthand rdf:datatype="http://www.w3.org/2001/XMLSchema#string">develops_from</oboInOwl:shorthand>
<rdfs:label rdf:datatype="http://www.w3.org/2001/XMLSchema#string">develops from</rdfs:label>
</owl:ObjectProperty>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001"/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_002">
<rdfs:subClassOf>
<owl:Restriction>
<owl:onProperty rdf:resource="http://purl.obolibrary.org/obo/RO_0002202"/>
<owl:someValuesFrom rdf:resource="http://purl.obolibrary.org/obo/TST_001"/>
</owl:Restriction>
</rdfs:subClassOf>
</owl:Class>
""")
self.assertIn("develops_from", [r.id for r in ont.relationships()])
develops_from = ont.get_relationship("develops_from")
self.assertIn(ont["TST:001"], ont["TST:002"].relationships[develops_from])
def test_term_xref_as_property_resource(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", pronto.warnings.SyntaxWarning)
ont = self.get_ontology("""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref>
<oboInOwl:id rdf:resource="http://purl.obolibrary.org/obo/ISBN_1234"/>
</owl:Class>
""")
self.assertEqual(len(ont["TST:001"].xrefs), 1)
self.assertEqual(list(ont["TST:001"].xrefs)[0].id, "ISBN:1234")
def test_term_xref_as_property_text(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", pronto.warnings.SyntaxWarning)
ont = self.get_ontology("""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref>
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
</owl:Class>
""")
self.assertEqual(len(ont["TST:001"].xrefs), 1)
self.assertEqual(list(ont["TST:001"].xrefs)[0].id, "ISBN:1234")
def test_term_xref_as_axiom_without_description(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", pronto.warnings.SyntaxWarning)
ont = self.get_ontology("""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref>
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
</owl:Class>
<owl:Axiom>
<owl:annotatedSource rdf:resource="http://purl.obolibrary.org/obo/TST_001"/>
<owl:annotatedProperty rdf:resource="http://www.geneontology.org/formats/oboInOwl#hasDbXref"/>
<owl:annotatedTarget rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</owl:annotatedTarget>
</owl:Axiom>
""")
self.assertEqual(len(ont["TST:001"].xrefs), 1)
self.assertEqual(list(ont["TST:001"].xrefs)[0].id, "ISBN:1234")
self.assertEqual(list(ont["TST:001"].xrefs)[0].description, None)
def test_term_xref_as_axiom_with_description(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", pronto.warnings.SyntaxWarning)
ont = self.get_ontology("""
<owl:Ontology/>
<owl:Class rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:hasDbXref rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</oboInOwl:hasDbXref>
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
</owl:Class>
<owl:Axiom>
<owl:annotatedSource rdf:resource="http://purl.obolibrary.org/obo/TST_001"/>
<owl:annotatedProperty rdf:resource="http://www.geneontology.org/formats/oboInOwl#hasDbXref"/>
<owl:annotatedTarget rdf:datatype="http://www.w3.org/2001/XMLSchema#string">ISBN:1234</owl:annotatedTarget>
<rdfs:label rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a great book</rdfs:label>
</owl:Axiom>
""")
self.assertEqual(len(ont["TST:001"].xrefs), 1)
self.assertEqual(list(ont["TST:001"].xrefs)[0].id, "ISBN:1234")
self.assertEqual(list(ont["TST:001"].xrefs)[0].description, "a great book")
# ------------------------------------------------------------------------
def test_relationship_cyclic(self):
ont = self.get_ontology(
"""
<owl:Ontology/>
<owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
<oboInOwl:is_cyclic rdf:datatype="http://www.w3.org/2001/XMLSchema#boolean">true</oboInOwl:is_cyclic>
</owl:ObjectProperty>
"""
)
self.assertIn("TST:001", ont.relationships())
self.assertTrue(ont.get_relationship("TST:001").cyclic)
def test_relationship_functional(self):
ont = self.get_ontology(
"""
<owl:Ontology/>
<owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
<rdf:type rdf:resource="http://www.w3.org/2002/07/owl#FunctionalProperty"/>
</owl:ObjectProperty>
"""
)
self.assertIn("TST:001", ont.relationships())
self.assertTrue(ont.get_relationship("TST:001").functional)
def test_relationship_multiple_labels(self):
txt = """
<owl:Ontology/>
<owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001">
<rdfs:label>A</rdfs:label>
<rdfs:label>B</rdfs:label>
</owl:ObjectProperty>
"""
# check multiple labels is a syntax error in error mode
with warnings.catch_warnings():
warnings.simplefilter("error", pronto.warnings.SyntaxWarning)
with self.assertRaises(SyntaxError):
ont = self.get_ontology(txt)
# check multiple labels is fine in ignore mode
with warnings.catch_warnings():
warnings.simplefilter("ignore", pronto.warnings.SyntaxWarning)
ont = self.get_ontology(txt)
self.assertIn(ont.get_relationship('TST:001').name, ["A", "B"])
def test_relationship_reflexive(self):
ont = self.get_ontology(
"""
<owl:Ontology/>
<owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
<rdf:type rdf:resource="http://www.w3.org/2002/07/owl#ReflexiveProperty"/>
</owl:ObjectProperty>
"""
)
self.assertIn("TST:001", ont.relationships())
self.assertTrue(ont.get_relationship("TST:001").reflexive)
def test_relationship_subset(self):
ont = self.get_ontology("""
<owl:Ontology rdf:about="http://purl.obolibrary.org/obo/tst.owl"/>
<owl:AnnotationProperty rdf:about="http://purl.obolibrary.org/obo/tst#ss">
<rdfs:comment rdf:datatype="http://www.w3.org/2001/XMLSchema#string">a subset</rdfs:comment>
<rdfs:subPropertyOf rdf:resource="http://www.geneontology.org/formats/oboInOwl#SubsetProperty"/>
</owl:AnnotationProperty>
<owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/tst#friend_of">
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">friend_of</oboInOwl:id>
<oboInOwl:inSubset rdf:resource="http://purl.obolibrary.org/obo/tst#ss"/>
</owl:ObjectProperty>
""")
self.assertIn("friend_of", ont.relationships())
self.assertEqual(ont.get_relationship("friend_of").subsets, {"ss"})
def test_relationship_symmetric(self):
ont = self.get_ontology(
"""
<owl:Ontology/>
<owl:ObjectProperty rdf:about="http://purl.obolibrary.org/obo/TST_001">
<oboInOwl:id rdf:datatype="http://www.w3.org/2001/XMLSchema#string">TST:001</oboInOwl:id>
<rdf:type rdf:resource="http://www.w3.org/2002/07/owl#SymmetricProperty"/>
</owl:ObjectProperty>
"""
)
self.assertIn("TST:001", ont.relationships())
self.assertTrue(ont.get_relationship("TST:001").symmetric)
|
|
#!/usr/bin/env python2
__author__ = 'Marcel Hellkamp'
__version__ = 'Templating-0.1-VanDrunen'
__license__ = 'MIT'
import functools, os, re
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
tonat = tob
class StplParser(object):
''' Parser for stpl templates. '''
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))(%%?)'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
''' Tokens as a space separated string (default: <% %> % {{ }}) '''
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
self.offset += m.end()
if m.group(1): # New escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+m.group(5)+line+sep)
self.offset += len(line+sep)+1
continue
elif m.group(5): # Old escape syntax
depr('Escape code lines with a backslash.') #0.12
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+line+sep)
self.offset += len(line+sep)+1
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if code_line and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
def process_inline(self, chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
line, comment = self.fix_backward_compatibility(line, comment)
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def fix_backward_compatibility(self, line, comment):
parts = line.strip().split(None, 2)
if parts and parts[0] in ('include', 'rebase'):
depr('The include and rebase keywords are functions now.') #0.12
if len(parts) == 1: return "_printlist([base])", comment
elif len(parts) == 2: return "_=%s(%r)" % tuple(parts), comment
else: return "_=%s(%r, %s)" % tuple(parts), comment
if self.lineno <= 2 and not line.strip() and 'coding' in comment:
m = re.match(r"#.*coding[:=]\s*([-\w.]+)", comment)
if m:
depr('PEP263 encoding strings in templates are deprecated.') #0.12
enc = m.group(1)
self.source = self.source.encode(self.encoding).decode(enc)
self.encoding = enc
return line, comment.replace('coding','coding*')
return line, comment
TEMPLATES = {}
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.') #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.') #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
if _name is None:
depr('Rebase function called without arguments.'
' You were probably looking for {{base}}?', True) #0.12
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
if _name is None:
depr('Rebase function called without arguments.'
' You were probably looking for {{base}}?', True) #0.12
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
|
|
from __future__ import unicode_literals
import base64
import json
import datetime
import mock
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.utils import timezone
from ..compat import urlparse, parse_qs, urlencode, get_user_model
from ..models import get_application_model, Grant, AccessToken, RefreshToken
from ..settings import oauth2_settings
from ..views import ProtectedResourceView
from .test_utils import TestCaseUtils
Application = get_application_model()
UserModel = get_user_model()
# mocking a protected resource view
class ResourceView(ProtectedResourceView):
def get(self, request, *args, **kwargs):
return "This is a protected resource"
class BaseTest(TestCaseUtils, TestCase):
def setUp(self):
self.factory = RequestFactory()
self.test_user = UserModel.objects.create_user("test_user", "test@user.com", "123456")
self.dev_user = UserModel.objects.create_user("dev_user", "dev@user.com", "123456")
oauth2_settings.ALLOWED_REDIRECT_URI_SCHEMES = ['http', 'custom-scheme']
self.application = Application(
name="Test Application",
redirect_uris="http://localhost http://example.com http://example.it custom-scheme://example.com",
user=self.dev_user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
self.application.save()
oauth2_settings._SCOPES = ['read', 'write']
def tearDown(self):
self.application.delete()
self.test_user.delete()
self.dev_user.delete()
class TestAuthorizationCodeView(BaseTest):
def test_skip_authorization_completely(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_pre_auth_invalid_client(self):
"""
Test error for an invalid client_id with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': 'fakeclientid',
'response_type': 'code',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_pre_auth_valid_client(self):
"""
Test response for a valid client_id with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form['redirect_uri'].value(), "http://example.it")
self.assertEqual(form['state'].value(), "random_state_string")
self.assertEqual(form['scope'].value(), "read write")
self.assertEqual(form['client_id'].value(), self.application.client_id)
def test_pre_auth_valid_client_custom_redirect_uri_scheme(self):
"""
Test response for a valid client_id with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'custom-scheme://example.com',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form['redirect_uri'].value(), "custom-scheme://example.com")
self.assertEqual(form['state'].value(), "random_state_string")
self.assertEqual(form['scope'].value(), "read write")
self.assertEqual(form['client_id'].value(), self.application.client_id)
def test_pre_auth_approval_prompt(self):
"""
TODO
"""
tok = AccessToken.objects.create(user=self.test_user, token='1234567890',
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope='read write')
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'approval_prompt': 'auto',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
# user already authorized the application, but with different scopes: prompt them.
tok.scope = 'read'
tok.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_pre_auth_approval_prompt_default(self):
"""
TODO
"""
self.assertEqual(oauth2_settings.REQUEST_APPROVAL_PROMPT, 'force')
AccessToken.objects.create(user=self.test_user, token='1234567890',
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope='read write')
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_pre_auth_approval_prompt_default_override(self):
"""
TODO
"""
oauth2_settings.REQUEST_APPROVAL_PROMPT = 'auto'
AccessToken.objects.create(user=self.test_user, token='1234567890',
application=self.application,
expires=timezone.now() + datetime.timedelta(days=1),
scope='read write')
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_pre_auth_default_redirect(self):
"""
Test for default redirect uri if omitted from query string with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
form = response.context["form"]
self.assertEqual(form['redirect_uri'].value(), "http://localhost")
def test_pre_auth_forbibben_redirect(self):
"""
Test error when passing a forbidden redirect_uri in query string with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'code',
'redirect_uri': 'http://forbidden.it',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_pre_auth_wrong_response_type(self):
"""
Test error when passing a wrong response_type in query string
"""
self.client.login(username="test_user", password="123456")
query_string = urlencode({
'client_id': self.application.client_id,
'response_type': 'WRONG',
})
url = "{url}?{qs}".format(url=reverse('oauth2_provider:authorize'), qs=query_string)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertIn("error=unauthorized_client", response['Location'])
def test_code_post_auth_allow(self):
"""
Test authorization code is given for an allowed request with response_type: code
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn('http://example.it?', response['Location'])
self.assertIn('state=random_state_string', response['Location'])
self.assertIn('code=', response['Location'])
def test_code_post_auth_deny(self):
"""
Test error when resource owner deny access
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'code',
'allow': False,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=access_denied", response['Location'])
def test_code_post_auth_bad_responsetype(self):
"""
Test authorization code is given for an allowed request with a response_type not supported
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'UNKNOWN',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn('http://example.it?error', response['Location'])
def test_code_post_auth_forbidden_redirect_uri(self):
"""
Test authorization code is given for an allowed request with a forbidden redirect_uri
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://forbidden.it',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 400)
def test_code_post_auth_malicious_redirect_uri(self):
"""
Test validation of a malicious redirect_uri
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': '/../',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 400)
def test_code_post_auth_allow_custom_redirect_uri_scheme(self):
"""
Test authorization code is given for an allowed request with response_type: code
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'custom-scheme://example.com',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn('custom-scheme://example.com?', response['Location'])
self.assertIn('state=random_state_string', response['Location'])
self.assertIn('code=', response['Location'])
def test_code_post_auth_deny_custom_redirect_uri_scheme(self):
"""
Test error when resource owner deny access
using a non-standard, but allowed, redirect_uri scheme.
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'custom-scheme://example.com',
'response_type': 'code',
'allow': False,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn('custom-scheme://example.com?', response['Location'])
self.assertIn("error=access_denied", response['Location'])
def test_code_post_auth_redirection_uri_with_querystring(self):
"""
Tests that a redirection uri with query string is allowed
and query string is retained on redirection.
See http://tools.ietf.org/html/rfc6749#section-3.1.2
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.com?foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.com?foo=bar", response['Location'])
self.assertIn("code=", response['Location'])
def test_code_post_auth_failing_redirection_uri_with_querystring(self):
"""
Test that in case of error the querystring of the redirection uri is preserved
See https://github.com/evonove/django-oauth-toolkit/issues/238
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.com?foo=bar',
'response_type': 'code',
'allow': False,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual("http://example.com?foo=bar&error=access_denied", response['Location'])
def test_code_post_auth_fails_when_redirect_uri_path_is_invalid(self):
"""
Tests that a redirection uri is matched using scheme + netloc + path
"""
self.client.login(username="test_user", password="123456")
form_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.com/a?foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=form_data)
self.assertEqual(response.status_code, 400)
class TestAuthorizationCodeTokenView(BaseTest):
def get_auth(self):
"""
Helper method to retrieve a valid authorization code
"""
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
return query_dict['code'].pop()
def test_basic_auth(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_refresh(self):
"""
Request an access token using a refresh token
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
# make a second token request to be sure the previous refresh token remains valid, see #65
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
'scope': content['scope'],
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('access_token' in content)
# check refresh token cannot be used twice
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('invalid_grant' in content.values())
def test_refresh_invalidates_old_tokens(self):
"""
Ensure existing refresh tokens are cleaned up when issuing new ones
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
rt = content['refresh_token']
at = content['access_token']
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': rt,
'scope': content['scope'],
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
self.assertFalse(RefreshToken.objects.filter(token=rt).exists())
self.assertFalse(AccessToken.objects.filter(token=at).exists())
def test_refresh_no_scopes(self):
"""
Request an access token using a refresh token without passing any scope
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('access_token' in content)
def test_refresh_bad_scopes(self):
"""
Request an access token using a refresh token and wrong scopes
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
'scope': 'read write nuke',
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_refresh_fail_repeating_requests(self):
"""
Try refreshing an access token with the same refresh token more than once
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
'scope': content['scope'],
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_refresh_repeating_requests_non_rotating_tokens(self):
"""
Try refreshing an access token with the same refresh token more than once when not rotating tokens.
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
self.assertTrue('refresh_token' in content)
token_request_data = {
'grant_type': 'refresh_token',
'refresh_token': content['refresh_token'],
'scope': content['scope'],
}
with mock.patch('oauthlib.oauth2.rfc6749.request_validator.RequestValidator.rotate_refresh_token',
return_value=False):
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
def test_basic_auth_bad_authcode(self):
"""
Request an access token using a bad authorization code
"""
self.client.login(username="test_user", password="123456")
token_request_data = {
'grant_type': 'authorization_code',
'code': 'BLAH',
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_basic_auth_bad_granttype(self):
"""
Request an access token using a bad grant_type string
"""
self.client.login(username="test_user", password="123456")
token_request_data = {
'grant_type': 'UNKNOWN',
'code': 'BLAH',
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
def test_basic_auth_grant_expired(self):
"""
Request an access token using an expired grant token
"""
self.client.login(username="test_user", password="123456")
g = Grant(application=self.application, user=self.test_user, code='BLAH', expires=timezone.now(),
redirect_uri='', scope='')
g.save()
token_request_data = {
'grant_type': 'authorization_code',
'code': 'BLAH',
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_basic_auth_bad_secret(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, 'BOOM!')
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_basic_auth_wrong_auth_type(self):
"""
Request an access token using basic authentication for client authentication
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
user_pass = '{0}:{1}'.format(self.application.client_id, self.application.client_secret)
auth_string = base64.b64encode(user_pass.encode('utf-8'))
auth_headers = {
'HTTP_AUTHORIZATION': 'Wrong ' + auth_string.decode("utf-8"),
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_request_body_params(self):
"""
Request an access token using client_type: public
"""
self.client.login(username="test_user", password="123456")
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it',
'client_id': self.application.client_id,
'client_secret': self.application.client_secret,
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_public(self):
"""
Request an access token using client_type: public
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it',
'client_id': self.application.client_id
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_malicious_redirect_uri(self):
"""
Request an access token using client_type: public and ensure redirect_uri is
properly validated.
"""
self.client.login(username="test_user", password="123456")
self.application.client_type = Application.CLIENT_PUBLIC
self.application.save()
authorization_code = self.get_auth()
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': '/../',
'client_id': self.application.client_id
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data)
self.assertEqual(response.status_code, 401)
def test_code_exchange_succeed_when_redirect_uri_match(self):
"""
Tests code exchange succeed when redirect uri matches the one used for code request
"""
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it?foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict['code'].pop()
# exchange authorization code for a valid access token
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it?foo=bar'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_code_exchange_fails_when_redirect_uri_does_not_match(self):
"""
Tests code exchange fails when redirect uri does not match the one used for code request
"""
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it?foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict['code'].pop()
# exchange authorization code for a valid access token
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it?foo=baraa'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 401)
def test_code_exchange_succeed_when_redirect_uri_match_with_multiple_query_params(self):
"""
Tests code exchange succeed when redirect uri matches the one used for code request
"""
self.client.login(username="test_user", password="123456")
self.application.redirect_uris = "http://localhost http://example.com?foo=bar"
self.application.save()
# retrieve a valid authorization code
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.com?bar=baz&foo=bar',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict['code'].pop()
# exchange authorization code for a valid access token
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.com?bar=baz&foo=bar'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
class TestAuthorizationCodeProtectedResource(BaseTest):
def test_resource_access_allowed(self):
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
'client_id': self.application.client_id,
'state': 'random_state_string',
'scope': 'read write',
'redirect_uri': 'http://example.it',
'response_type': 'code',
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=authcode_data)
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict['code'].pop()
# exchange authorization code for a valid access token
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': 'http://example.it'
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content['access_token']
# use token to access the resource
auth_headers = {
'HTTP_AUTHORIZATION': 'Bearer ' + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
def test_resource_access_deny(self):
auth_headers = {
'HTTP_AUTHORIZATION': 'Bearer ' + "faketoken",
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response.status_code, 403)
|
|
import logging
from datetime import timedelta
import random
from pajbot import utils
from pajbot.managers.db import DBManager
from pajbot.managers.handler import HandlerManager
from pajbot.managers.schedule import ScheduleManager
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
log = logging.getLogger(__name__)
class DuelModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Duel"
DESCRIPTION = "Let users duel to win or lose points."
CATEGORY = "Game"
SETTINGS = [
ModuleSetting(
key="max_pot",
label="How many points you can duel for at most",
type="number",
required=True,
placeholder="",
default=420,
constraints={"min_value": 0, "max_value": 1000000},
),
ModuleSetting(
key="message_won",
label="Winner message | Available arguments: {winner}, {loser}",
type="text",
required=True,
placeholder="{winner} won the duel vs {loser} PogChamp",
default="{winner} won the duel vs {loser} PogChamp",
constraints={"min_str_len": 10, "max_str_len": 400},
),
ModuleSetting(
key="message_won_points",
label="Points message | Available arguments: {winner}, {loser}, {total_pot}, {extra_points}",
type="text",
required=True,
placeholder="{winner} won the duel vs {loser} PogChamp . The pot was {total_pot}, the winner gets their bet back + {extra_points} points",
default="{winner} won the duel vs {loser} PogChamp . The pot was {total_pot}, the winner gets their bet back + {extra_points} points",
constraints={"min_str_len": 10, "max_str_len": 400},
),
ModuleSetting(
key="duel_tax",
label="Duel tax (deduct this percent value from the win)",
type="number",
required=True,
placeholder="",
default=30,
constraints={"min_value": 0, "max_value": 100},
),
ModuleSetting(
key="online_global_cd",
label="Global cooldown (seconds)",
type="number",
required=True,
placeholder="",
default=0,
constraints={"min_value": 0, "max_value": 120},
),
ModuleSetting(
key="online_user_cd",
label="Per-user cooldown (seconds)",
type="number",
required=True,
placeholder="",
default=5,
constraints={"min_value": 0, "max_value": 240},
),
ModuleSetting(
key="show_on_clr", label="Show duels on the clr overlay", type="boolean", required=True, default=True
),
ModuleSetting(
key="max_duel_age",
label="Auto-cancel duels after this many minutes",
type="number",
required=True,
placeholder="",
default=5,
constraints={"min_value": 1, "max_value": 60},
),
]
def load_commands(self, **options):
self.commands["duel"] = Command.raw_command(
self.initiate_duel,
delay_all=self.settings["online_global_cd"],
delay_user=self.settings["online_user_cd"],
description="Initiate a duel with a user",
examples=[
CommandExample(
None,
"0-point duel",
chat="user:!duel Karl_Kons\n" "bot>user:You have challenged Karl_Kons for 0 points",
description="Duel Karl_Kons for 0 points",
).parse(),
CommandExample(
None,
"69-point duel",
chat="user:!duel Karl_Kons 69\n" "bot>user:You have challenged Karl_Kons for 69 points",
description="Duel Karl_Kons for 69 points",
).parse(),
],
)
self.commands["cancelduel"] = Command.raw_command(
self.cancel_duel, delay_all=0, delay_user=10, description="Cancel your duel request"
)
self.commands["accept"] = Command.raw_command(
self.accept_duel, delay_all=0, delay_user=0, description="Accept a duel request"
)
self.commands["decline"] = Command.raw_command(
self.decline_duel, delay_all=0, delay_user=0, description="Decline a duel request"
)
self.commands["deny"] = self.commands["decline"]
self.commands["duelstatus"] = Command.raw_command(
self.status_duel, delay_all=0, delay_user=5, description="Current duel request info"
)
self.commands["duelstats"] = Command.raw_command(
self.get_duel_stats, delay_all=0, delay_user=120, description="Get your duel statistics"
)
def __init__(self, bot):
super().__init__(bot)
self.duel_requests = {}
self.duel_request_price = {}
self.duel_targets = {}
self.duel_begin_time = {}
self.gc_job = None
def initiate_duel(self, bot, source, message, **rest):
"""
Initiate a duel with a user.
You can also bet points on the winner.
By default, the maximum amount of points you can spend is 420.
How to use: !duel USERNAME POINTS_TO_BET
"""
if message is None:
return False
max_pot = self.settings["max_pot"]
msg_split = message.split()
input = msg_split[0]
with DBManager.create_session_scope() as db_session:
user = User.find_by_user_input(db_session, input)
if user is None:
# No user was found with this username
return False
duel_price = 0
if len(msg_split) > 1:
try:
duel_price = int(msg_split[1])
if duel_price < 0:
return False
if duel_price > max_pot:
duel_price = max_pot
except ValueError:
pass
if source.id in self.duel_requests:
currently_duelling = User.find_by_id(db_session, self.duel_requests[source.id])
if currently_duelling is None:
del self.duel_requests[source.id]
return False
bot.whisper(
source,
f"You already have a duel request active with {currently_duelling}. Type !cancelduel to cancel your duel request.",
)
return False
if user == source:
# You cannot duel yourself
return False
if user.last_active is None or (utils.now() - user.last_active) > timedelta(minutes=5):
bot.whisper(
source,
"This user has not been active in chat within the last 5 minutes. Get them to type in chat before sending another challenge",
)
return False
if not user.can_afford(duel_price) or not source.can_afford(duel_price):
bot.whisper(
source,
f"You or your target do not have more than {duel_price} points, therefore you cannot duel for that amount.",
)
return False
if user.id in self.duel_targets:
challenged_by = User.find_by_id(db_session, self.duel_requests[user.id])
bot.whisper(
source,
f"This person is already being challenged by {challenged_by}. Ask them to answer the offer by typing !deny or !accept",
)
return False
self.duel_targets[user.id] = source.id
self.duel_requests[source.id] = user.id
self.duel_request_price[source.id] = duel_price
self.duel_begin_time[source.id] = utils.now()
bot.whisper(
user,
f"You have been challenged to a duel by {source} for {duel_price} points. You can either !accept or !deny this challenge.",
)
bot.whisper(source, f"You have challenged {user} for {duel_price} points")
def cancel_duel(self, bot, source, **rest):
"""
Cancel any duel requests you've sent.
How to use: !cancelduel
"""
if source.id not in self.duel_requests:
bot.whisper(source, "You have not sent any duel requests")
return
with DBManager.create_session_scope() as db_session:
challenged = User.find_by_id(db_session, self.duel_requests[source.id])
bot.whisper(source, f"You have cancelled the duel vs {challenged}")
del self.duel_targets[challenged.id]
del self.duel_request_price[source.id]
del self.duel_begin_time[source.id]
del self.duel_requests[source.id]
def accept_duel(self, bot, source, **rest):
"""
Accepts any active duel requests you've received.
How to use: !accept
"""
if source.id not in self.duel_targets:
bot.whisper(source, "You are not being challenged to a duel by anyone.")
return
with DBManager.create_session_scope() as db_session:
requestor = User.find_by_id(db_session, self.duel_targets[source.id])
duel_price = self.duel_request_price[self.duel_targets[source.id]]
if not source.can_afford(duel_price) or not requestor.can_afford(duel_price):
bot.whisper(
source,
f"Your duel request with {requestor} was cancelled due to one of you not having enough points.",
)
bot.whisper(
requestor,
f"Your duel request with {source} was cancelled due to one of you not having enough points.",
)
del self.duel_requests[requestor.id]
del self.duel_request_price[requestor.id]
del self.duel_begin_time[requestor.id]
del self.duel_targets[source.id]
return False
source.points -= duel_price
requestor.points -= duel_price
winning_pot = int(duel_price * (1.0 - self.settings["duel_tax"] / 100))
participants = [source, requestor]
winner = random.choice(participants)
participants.remove(winner)
loser = participants.pop()
winner.points += duel_price
winner.points += winning_pot
# Persist duel statistics
winner.duel_stats.won(winning_pot)
loser.duel_stats.lost(duel_price)
arguments = {
"winner": winner.name,
"loser": loser.name,
"total_pot": duel_price,
"extra_points": winning_pot,
}
if duel_price > 0:
message = self.get_phrase("message_won_points", **arguments)
if duel_price >= 500 and self.settings["show_on_clr"]:
bot.websocket_manager.emit("notification", {"message": f"{winner} won the duel vs {loser}"})
else:
message = self.get_phrase("message_won", **arguments)
bot.say(message)
del self.duel_requests[requestor.id]
del self.duel_request_price[requestor.id]
del self.duel_begin_time[requestor.id]
del self.duel_targets[source.id]
HandlerManager.trigger(
"on_duel_complete", winner=winner, loser=loser, points_won=winning_pot, points_bet=duel_price
)
def decline_duel(self, bot, source, **options):
"""
Declines any active duel requests you've received.
How to use: !decline
"""
if source.id not in self.duel_targets:
bot.whisper(source, "You are not being challenged to a duel")
return False
with DBManager.create_session_scope() as db_session:
requestor = User.find_by_id(db_session, self.duel_targets[source.id])
bot.whisper(source, f"You have declined the duel vs {requestor}")
bot.whisper(requestor, f"{source} declined the duel challenge with you.")
del self.duel_targets[source.id]
del self.duel_requests[requestor.id]
del self.duel_request_price[requestor.id]
del self.duel_begin_time[requestor.id]
def status_duel(self, bot, source, **rest):
"""
Whispers you the current status of your active duel requests/duel targets
How to use: !duelstatus
"""
with DBManager.create_session_scope() as db_session:
msg = []
if source.id in self.duel_requests:
duelling = User.find_by_id(db_session, self.duel_requests[source.id])
msg.append(f"You have a duel request for {self.duel_request_price[source.id]} points by {duelling}")
if source.id in self.duel_targets:
challenger = User.find_by_id(db_session, self.duel_targets[source.id])
msg.append(
f"You have a pending duel request from {challenger} for {self.duel_request_price[self.duel_targets[source.id]]} points"
)
if len(msg) > 0:
bot.whisper(source, ". ".join(msg))
else:
bot.whisper(source, "You have no duel request or duel target. Type !duel USERNAME POT to duel someone!")
@staticmethod
def get_duel_stats(bot, source, **rest):
"""
Whispers the users duel winratio to the user
"""
if source.duel_stats is None:
bot.whisper(source, "You have no recorded duels.")
return True
bot.whisper(
source,
f"duels: {source.duel_stats.duels_total} winrate: {source.duel_stats.winrate:.2f}% streak: {source.duel_stats.current_streak} profit: {source.duel_stats.profit}",
)
def _cancel_expired_duels(self):
now = utils.now()
for source_id, started_at in self.duel_begin_time.items():
duel_age = now - started_at
if duel_age <= timedelta(minutes=self.settings["max_duel_age"]):
# Duel is not too old
continue
with DBManager.create_session_scope() as db_session:
source = User.find_by_id(db_session, source_id)
challenged = User.find_by_id(db_session, self.duel_requests[source.id])
if source is not None and challenged is not None:
self.bot.whisper(
source, f"{challenged} didn't accept your duel request in time, so the duel has been cancelled."
)
del self.duel_targets[self.duel_requests[source.id]]
del self.duel_requests[source.id]
del self.duel_request_price[source.id]
del self.duel_begin_time[source.id]
def enable(self, bot):
if not bot:
return
# We can't use bot.execute_every directly since we can't later cancel jobs created through bot.execute_every
self.gc_job = ScheduleManager.execute_every(30, lambda: self.bot.execute_now(self._cancel_expired_duels))
def disable(self, bot):
if not bot:
return
self.gc_job.remove()
self.gc_job = None
|
|
from nltk.featurestructure import *
from nltk.cfg import Nonterminal, CFGProduction
import string
class Category(FeatureStructure, Nonterminal):
"""
A C{Category} is a specialized feature structure, intended for use in
parsing. It can act as a C{Nonterminal}.
A C{Category} differs from a C{FeatureStructure} in these ways:
- Categories may not be re-entrant.
- Categories use value-based equality, while FeatureStructures use
identity-based equality.
- Strings in Categories are compared case-insensitively.
- Categories have one feature marked as the 'head', which prints
differently than other features if it has a value. For example,
in the C{repr()} representation of a Category, the head goes to the
left, on the outside of the brackets. Subclasses of C{Category}
may change the feature name that is designated as the head, which is
_head by default.
- Subclasses of C{Category} may contain a list of I{required features},
which are names of features whose value is None if unspecified. A
Category lacking a feature that is required in it will not unify with
any Category that has that feature. If a required feature's value is
C{None}, it is considered to be not present. (Mixing different
subclasses of C{Category} is probably a bad idea.)
- C{True} and C{False} are allowed as values. A feature named C{foo}
with a value of C{True} is simply expressed as C{+foo}. Similarly, if
it is C{False}, it is expressed as C{-foo}.
"""
headname = '_head'
requiredFeatures = []
def __init__(self, **features):
self._features = features
self._required = self.__class__.requiredFeatures
for name in self._required:
if not self._features.has_key(name):
self._features[name] = None
items = self._features.items()
items.sort()
self._hash = None
self._frozen = False
self._memorepr = None
def required_features(self):
"@return: A list of the names of all required features."
return self._required
def __cmp__(self, other):
return cmp(repr(self), repr(other))
def __div__(self, other):
"""
@return: A new Category based on this one, with its C{/} feature set to
C{other}.
"""
temp = self.deepcopy()
dict = temp._features
dict['/'] = other
return self.__class__(**dict)
def __eq__(self, other):
"""
@return: True if C{self} and C{other} assign the same value to
to every feature. In particular, return true if
C{self[M{p}]==other[M{p}]} for every feature path M{p} such
that C{self[M{p}]} or C{other[M{p}]} is a base value (i.e.,
not a nested Category).
@rtype: C{bool}
"""
# Get the result of equal_values, and make it a real boolean while
# we're at it.
if not other.__class__ == self.__class__: return False
if hash(self) != hash(other): return False
return (self.equal_values(other) == True)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
if self._hash is not None: return self._hash
items = self._features.items()
items.sort()
return hash(tuple(items))
def freeze(self):
"""
Freezing a Category memoizes its hash value, to make comparisons on it
faster. After freezing, the Category and all its values are immutable.
@return: self
"""
for val in self._features.values():
if isinstance(val, Category) and not val.frozen():
val.freeze()
self._hash = hash(self)
self._memorepr = self._repr({}, {})
self._frozen = True
return self
def frozen(self):
"""
Returns whether this Category is frozen (immutable).
@rtype: C{bool}
"""
return self._frozen
def __setitem__(self, name, value):
if self._frozen: raise "Cannot modify a frozen Category"
self._features[name] = value
def symbol(self):
"""
@return: the one-line string representation of the Category.
@rtype: C{str}
If you want the symbol for the head of the category, use C{head()}.
"""
return repr(self)
def head(self):
"""
@return: The head of this category (the value shown outside the
brackets in its string representation). If there is no head, returns
None.
@rtype: C{str} or C{None}
"""
return self._features.get(self.__class__.headname)
def deepcopy(self, memo=None):
"""
@return: A deep copy of C{self}.
"""
newcopy = self.__class__()
features = newcopy._features
# Fill out the features.
for (fname, fval) in self._features.items():
if isinstance(fval, FeatureStructure):
features[fname] = fval.deepcopy()
else:
features[fname] = fval
return newcopy
def reentrances(self):
return []
def feature_names(self):
"""
@return: a list of all features that have values.
"""
return filter(lambda x: not (x in self._required and self[x] is None),
self._features.keys())
def get_feature(self, *args):
try:
return self.__getitem__(*args)
except IndexError:
return StarValue()
def has_feature(self, name):
return (name in self.feature_names())
def remove_unbound_vars(self):
selfcopy = self.deepcopy()
selfcopy._remove_unbound_vars()
return selfcopy
def _remove_unbound_vars(self):
for (fname, fval) in self._features.items():
if isinstance(fval, FeatureVariable):
del self._features[fname]
elif isinstance(fval, Category):
fval._remove_unbound_vars()
# All this is unlikely to be necessary. All I've changed is to make
# strings case-insensitive.
def _destructively_unify(self, other, bindings, trace=False, depth=0):
"""
Attempt to unify C{self} and C{other} by modifying them
in-place. If the unification succeeds, then C{self} will
contain the unified value, and the value of C{other} is
undefined. If the unification fails, then a
_UnificationFailureError is raised, and the values of C{self}
and C{other} are undefined.
"""
if trace:
print ' '+'| '*depth+' /'+`self`
print ' '+'| '*depth+'|\\'+ `other`
for (fname, otherval) in other._features.items():
if trace:
trace_otherval = otherval
trace_selfval_defined = self._features.has_key(fname)
trace_selfval = self._features.get(fname)
if self._features.has_key(fname):
selfval = self._features[fname]
# If selfval or otherval is a bound variable, then
# replace it by the variable's bound value.
if isinstance(selfval, FeatureVariable):
selfval = bindings.lookup(selfval)
if isinstance(otherval, FeatureVariable):
otherval = bindings.lookup(otherval)
if trace:
print ' '+'| '*(depth+1)
print ' '+'%s| Unify %s feature:'%('| '*(depth),fname)
# Case 1: unify 2 feature structures (recursive case)
if (isinstance(selfval, FeatureStructure) and
isinstance(otherval, FeatureStructure)):
selfval._destructively_unify(otherval, bindings,
trace, depth+1)
# Case 2: unify 2 variables
elif (isinstance(selfval, FeatureVariable) and
isinstance(otherval, FeatureVariable)):
self._features[fname] = selfval.alias(otherval)
# Case 3: unify a variable with a value
elif isinstance(selfval, FeatureVariable):
bindings.bind(selfval, otherval)
elif isinstance(otherval, FeatureVariable):
bindings.bind(otherval, selfval)
# Case 4A: unify two strings.
elif isinstance(selfval, str) and isinstance(otherval, str)\
and selfval.upper() == otherval.upper(): pass
# Case 4: unify 2 non-equal values (failure case)
elif selfval != otherval:
if trace: print ' '+'| '*depth + 'X <-- FAIL'
raise FeatureStructure._UnificationFailureError()
# Case 5: unify 2 equal values
else: pass
if trace and not isinstance(selfval, FeatureStructure):
# apply_forwards to get reentrancy links right:
if isinstance(trace_selfval, FeatureStructure):
trace_selfval._apply_forwards({})
if isinstance(trace_otherval, FeatureStructure):
trace_otherval._apply_forwards({})
print ' '+'%s| /%r' % ('| '*(depth), trace_selfval)
print ' '+'%s| |\\%r' % ('| '*(depth), trace_otherval)
print ' '+'%s| +-->%r' % ('| '*(depth),
self._features[fname])
# Case 5: copy from other
else:
self._features[fname] = otherval
if trace:
print ' '+'| '*depth+'|'
print ' '+'| '*depth+'+-->'+`self`
if len(bindings.bound_variables()) > 0:
print ' '+'| '*depth+' '+`bindings`
def __repr__(self):
"""
@return: A string representation of this feature structure.
"""
if self._memorepr is not None: return self._memorepr
else: return self._repr({}, {})
return self._memorepr
def _repr(self, reentrances, reentrance_ids):
segments = []
items = self.feature_names()
items.sort() # sorting note: keys are unique strings, so we'll
# never fall through to comparing values.
for fname in items:
if fname == self.__class__.headname: continue
fval = self[fname]
if isinstance(fval, bool):
if fval: segments.append('+%s' % fname)
else: segments.append('-%s' % fname)
elif not isinstance(fval, Category):
segments.append('%s=%r' % (fname, fval))
else:
fval_repr = fval._repr(reentrances, reentrance_ids)
segments.append('%s=%s' % (fname, fval_repr))
head = self._features.get(self.__class__.headname)
if head is None: head = ''
if head and not len(segments): return head
return '%s[%s]' % (head, ', '.join(segments))
def _str(self, reentrances, reentrance_ids):
# Special case:
if len(self.feature_names()) == 0:
return ['[]']
if self.feature_names() == [self.__class__.headname]:
return ['%s[]' % self[self.__class__.headname]]
# What's the longest feature name? Use this to align names.
maxfnamelen = max([len(k) for k in self.feature_names()])
lines = []
items = self.feature_names()
items.sort() # sorting note: keys are unique strings, so we'll
# never fall through to comparing values.
if self.__class__.headname in items:
items.remove(self.__class__.headname)
# items.insert(0, self.__class__.headname)
for fname in items:
fval = self[fname]
if not isinstance(fval, FeatureStructure):
# It's not a nested feature structure -- just print it.
lines.append('%s = %r' % (fname.ljust(maxfnamelen), fval))
else:
# It's a new feature structure. Separate it from
# other values by a blank line.
if lines and lines[-1] != '': lines.append('')
# Recursively print the feature's value (fval).
fval_lines = fval._str(reentrances, reentrance_ids)
# Indent each line to make room for fname.
fval_lines = [(' '*(maxfnamelen+3))+l for l in fval_lines]
# Pick which line we'll display fname on.
nameline = (len(fval_lines)-1)/2
fval_lines[nameline] = (
fname.ljust(maxfnamelen)+' ='+
fval_lines[nameline][maxfnamelen+2:])
# Add the feature structure to the output.
lines += fval_lines
# Separate FeatureStructures by a blank line.
lines.append('')
# Get rid of any excess blank lines.
if lines[-1] == '': lines = lines[:-1]
# Add brackets around everything.
headline = (len(lines) - 1)/2
if self.has_feature(self.__class__.headname):
head = self[self.__class__.headname]
else: head = ''
maxlen = max([len(line) for line in lines])
for l in range(len(lines)):
line = lines[l]
if l == headline:
lines[l] = ('%s[ %s%s ]' % (head, line, ' '*(maxlen-len(line))))
else:
lines[l] = ('%s[ %s%s ]' % (' '*len(head), line, ' '*(maxlen-len(line))))
return lines
# Regular expressions for parsing.
_PARSE_RE = {'name': re.compile(r'\s*([^\s\(\)"\'\-=,\[\]]+)\s*'),
'categorystart': re.compile(r'\s*([^\s\(\)"\'\-=,\[\]]*)\s*\['),
'bool': re.compile(r'\s*([-\+])'),
'ident': re.compile(r'\s*\((\d+)\)\s*'),
'arrow': re.compile(r'\s*->\s*'),
'assign': re.compile(r'\s*=\s*'),
'bracket': re.compile(r'\s*]\s*'),
'comma': re.compile(r'\s*,\s*'),
'none': re.compile(r'None(?=\s|\]|,)'),
'int': re.compile(r'-?\d+(?=\s|\]|,)'),
'var': re.compile(r'\?[a-zA-Z_][a-zA-Z0-9_]*'+'|'+
r'\?<[a-zA-Z_][a-zA-Z0-9_]*'+
r'(=[a-zA-Z_][a-zA-Z0-9_]*)*>'),
'symbol': re.compile(r'\w+'),
'disjunct': re.compile(r'\s*\|\s*'),
'whitespace': re.compile(r'\s*'),
'stringmarker': re.compile("['\"\\\\]")}
def parse(cls, s):
"""
Convert a string representation of a feature structure (as
displayed by C{repr}) into a C{Category}. This parse
imposes the following restrictions on the string
representation:
- Feature names cannot contain any of the following:
whitespace, parentheses, quote marks, equals signs,
dashes, plus signs, and square brackets.
- Only the following basic feature value are supported:
strings, integers, variables, C{None}, C{True}, C{False},
and unquoted alphanumeric strings.
- A feature named C{foo} with a value of C{True} or C{False} should
be expressed as C{+foo} or C{-foo} respectively, not as
C{foo=True} or C{foo=False}.
"""
try:
value, position = cls._parse(s, 0, {})
except ValueError, e:
estr = ('Error parsing field structure\n\n\t' +
s + '\n\t' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
raise ValueError, estr
if position != len(s): raise ValueError()
return value
def _parse(cls, s, position=0, reentrances=None):
"""
Helper function that parses a Category.
@param s: The string to parse.
@param position: The position in the string to start parsing.
@param reentrances: A dictionary from reentrance ids to values.
@return: A tuple (val, pos) of the feature structure created
by parsing and the position where the parsed feature
structure ends.
"""
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
# Find the head, if there is one.
match = _PARSE_RE['name'].match(s, position)
if match is not None:
head = match.group(1)
position = match.end()
else: head = None
# Check that the name is followed by an open bracket.
if position >= len(s) or s[position] != '[':
return cls(**{cls.headname: head}), position
position += 1
# If it's immediately followed by a close bracket, then just
# return an empty feature structure.
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
if head is None: return cls(), match.end()
else: return cls(**{cls.headname: head}), match.end()
# Build a list of the features defined by the structure.
# Each feature has one of the three following forms:
# name = value
# +name
# -name
features = {}
if head is not None: features[cls.headname] = head
while position < len(s):
# Use these variables to hold info about the feature:
name = target = val = None
# Is this a shorthand boolean value?
match = _PARSE_RE['bool'].match(s, position)
if match is not None:
if match.group(1) == '+': val = True
else: val = False
position = match.end()
# Find the next feature's name.
match = _PARSE_RE['name'].match(s, position)
if match is None: raise ValueError('feature name', position)
name = match.group(1)
position = match.end()
# If it's not a shorthand boolean, it must be an assignment.
if val is None:
match = _PARSE_RE['assign'].match(s, position)
if match is None: raise ValueError('equals sign', position)
position = match.end()
val, position = cls._parseval(s, position, reentrances)
features[name] = val
# Check for a close bracket
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
return cls(**features), match.end()
# Otherwise, there should be a comma
match = _PARSE_RE['comma'].match(s, position)
if match is None: raise ValueError('comma', position)
position = match.end()
# We never saw a close bracket.
raise ValueError('close bracket', position)
def _parseval(cls, s, position, reentrances):
"""
Helper function that parses a feature value. Currently
supports: None, bools, integers, variables, strings, nested feature
structures.
@param s: The string to parse.
@param position: The position in the string to start parsing.
@param reentrances: A dictionary from reentrance ids to values.
@return: A tuple (val, pos) of the value created by parsing
and the position where the parsed value ends.
"""
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
# End of string (error)
if position == len(s): raise ValueError('value', position)
# String value
if s[position] in "'\"":
start = position
quotemark = s[position:position+1]
position += 1
while 1:
match = _PARSE_RE['stringmarker'].search(s, position)
if not match: raise ValueError('close quote', position)
position = match.end()
if match.group() == '\\': position += 1
elif match.group() == quotemark:
return eval(s[start:position]), position
# Nested category
if _PARSE_RE['categorystart'].match(s, position) is not None:
return cls._parse(s, position, reentrances)
# Variable
match = _PARSE_RE['var'].match(s, position)
if match is not None:
return FeatureVariable.parse(match.group()), match.end()
# None
match = _PARSE_RE['none'].match(s, position)
if match is not None:
return None, match.end()
# Integer value
match = _PARSE_RE['int'].match(s, position)
if match is not None:
return int(match.group()), match.end()
# Alphanumeric symbol (must be checked after integer)
match = _PARSE_RE['symbol'].match(s, position)
if match is not None:
return cls(**{cls.headname: match.group()}), match.end()
# We don't know how to parse this value.
raise ValueError('value', position)
def parse_rules(cls, s):
"""
Parse a L{CFG} line involving C{Categories}. A line has this form:
C{lhs -> rhs | rhs | ...}
where C{lhs} is a Category, and each C{rhs} is a sequence of
Categories.
@returns: a list of C{CFGProductions}, one for each C{rhs}.
"""
_PARSE_RE = cls._PARSE_RE
position = 0
try:
lhs, position = cls._parse(s, position)
except ValueError, e:
estr = ('Error parsing field structure\n\n\t' +
s + '\n\t' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
raise ValueError, estr
lhs.freeze()
match = _PARSE_RE['arrow'].match(s, position)
if match is None: raise ValueError('arrow', position)
else: position = match.end()
rules = []
while position < len(s):
rhs = []
while position < len(s) and _PARSE_RE['disjunct'].match(s, position) is None:
try:
val, position = cls._parseval(s, position, {})
except ValueError, e:
estr = ('Error parsing field structure\n\n\t' +
s + '\n\t' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
raise ValueError, estr
if isinstance(val, Category): val.freeze()
rhs.append(val)
position = _PARSE_RE['whitespace'].match(s, position).end()
rules.append(CFGProduction(lhs, rhs))
if position < len(s):
match = _PARSE_RE['disjunct'].match(s, position)
position = match.end()
# Special case: if there's nothing after the arrow, it is one rule with
# an empty RHS, instead of no rules.
if len(rules) == 0: rules = [CFGProduction(lhs, ())]
return rules
_parseval=classmethod(_parseval)
_parse=classmethod(_parse)
parse=classmethod(parse)
parse_rules=classmethod(parse_rules)
class GrammarCategory(Category):
"""
A class of C{Category} for use in parsing.
The name of the head feature in a C{GrammarCategory} is C{pos} (for "part
of speech"). There is one required feature, C{/}, which is intended to
indicate a type of phrase that is missing from the grammatical structure.
In addition, GrammarCategories are displayed and parse differently, to be
consistent with NLP teaching materials: the value of the C{/} feature can
be written with a slash after the right bracket, so that the string
representation looks like: C{head[...]/value}.
An example of a C{GrammarCategory} is C{VP[+fin]/NP}, for a verb phrase
that is finite and has an omitted noun phrase inside it.
"""
headname = 'pos'
requiredFeatures = ['/']
def _repr(self, reentrances, reentrance_ids):
segments = []
items = self.feature_names()
items.sort() # sorting note: keys are unique strings, so we'll
# never fall through to comparing values.
for fname in items:
if fname == self.__class__.headname or fname == '/': continue
fval = self[fname]
if isinstance(fval, bool):
if fval: segments.append('+%s' % fname)
else: segments.append('-%s' % fname)
elif not isinstance(fval, Category):
segments.append('%s=%r' % (fname, fval))
else:
fval_repr = fval._repr(reentrances, reentrance_ids)
segments.append('%s=%s' % (fname, fval_repr))
head = self._features.get(self.__class__.headname)
if head is None: head = ''
if not len(segments): features = ''
else: features = "[%s]" % ', '.join(segments)
slash = self._features.get('/')
if slash is None: slash = ''
else: slash = '/%r' % slash
return '%s%s%s' % (head, features, slash)
_PARSE_RE = {'name': re.compile(r'\s*([^\s\(\)"\'\-=,\[\]/]+)\s*'),
'categorystart': re.compile(r'\s*([^\s\(\)"\'\-=,\[\]/]*)\s*(\[|/)'),
'bool': re.compile(r'\s*([-\+])'),
'ident': re.compile(r'\s*\((\d+)\)\s*'),
'arrow': re.compile(r'\s*->\s*'),
'assign': re.compile(r'\s*=\s*'),
'bracket': re.compile(r'\s*]\s*'),
'comma': re.compile(r'\s*,\s*'),
'none': re.compile(r'None(?=\s|\]|,)'),
'int': re.compile(r'-?\d+(?=\s|\]|,)'),
'var': re.compile(r'\?[a-zA-Z_][a-zA-Z0-9_]*'+'|'+
r'\?<[a-zA-Z_][a-zA-Z0-9_]*'+
r'(=[a-zA-Z_][a-zA-Z0-9_]*)*>'),
'symbol': re.compile(r'\w+'),
'disjunct': re.compile(r'\s*\|\s*'),
'slash': re.compile(r'\s*/\s*'),
'whitespace': re.compile(r'\s*'),
'stringmarker': re.compile("['\"\\\\]")}
def _parse(cls, s, position=0, reentrances=None):
# A set of useful regular expressions (precompiled)
_PARSE_RE = cls._PARSE_RE
features = {}
# Find the head, if there is one.
match = _PARSE_RE['name'].match(s, position)
if match is not None:
features[cls.headname] = match.group(1)
position = match.end()
# If the name is followed by an open bracket, start looking for
# features.
if position < len(s) and s[position] == '[':
position += 1
# Build a list of the features defined by the structure.
# Each feature has one of the three following forms:
# name = value
# +name
# -name
while True:
if not position < len(s):
raise ValueError('close bracket', position)
# Use these variables to hold info about the feature:
name = target = val = None
# Check for a close bracket at the beginning
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
position = match.end()
# Get out and check for a slash value.
break
# Is this a shorthand boolean value?
match = _PARSE_RE['bool'].match(s, position)
if match is not None:
if match.group(1) == '+': val = True
else: val = False
position = match.end()
# Find the next feature's name.
match = _PARSE_RE['name'].match(s, position)
if match is None: raise ValueError('feature name', position)
name = match.group(1)
position = match.end()
# If it's not a shorthand boolean, it must be an assignment.
if val is None:
match = _PARSE_RE['assign'].match(s, position)
if match is None: raise ValueError('equals sign', position)
position = match.end()
val, position = cls._parseval(s, position, reentrances)
features[name] = val
# Check for a close bracket
match = _PARSE_RE['bracket'].match(s, position)
if match is not None:
position = match.end()
# Get out and check for a slash value.
break
# Otherwise, there should be a comma
match = _PARSE_RE['comma'].match(s, position)
if match is None: raise ValueError('comma', position)
position = match.end()
# Check for a slash value
match = _PARSE_RE['slash'].match(s, position)
if match is not None:
position = match.end()
slash, position = cls._parseval(s, position, 0)
features['/'] = slash
return cls(**features), position
_parse = classmethod(_parse)
# vim:ts=4:sts=4:et:nowrap:
|
|
#
# SPDX-License-Identifier: MIT
# Copyright wtfsckgh@gmail.com
# Copyright iced contributors
#
import pytest
from iced_x86 import *
def test_register_ext():
assert RegisterExt.base(Register.DL) == Register.AL
assert RegisterExt.base(Register.R8W) == Register.AX
assert RegisterExt.base(Register.R15D) == Register.EAX
assert RegisterExt.base(Register.R13) == Register.RAX
assert RegisterExt.base(Register.FS) == Register.ES
assert RegisterExt.base(Register.XMM2) == Register.XMM0
assert RegisterExt.base(Register.YMM20) == Register.YMM0
assert RegisterExt.base(Register.ZMM31) == Register.ZMM0
assert RegisterExt.number(Register.DL) == 2
assert RegisterExt.number(Register.R15) == 15
assert RegisterExt.number(Register.YMM21) == 21
assert RegisterExt.full_register(Register.CL) == Register.RCX
assert RegisterExt.full_register(Register.DX) == Register.RDX
assert RegisterExt.full_register(Register.EBX) == Register.RBX
assert RegisterExt.full_register(Register.RSP) == Register.RSP
assert RegisterExt.full_register(Register.XMM2) == Register.ZMM2
assert RegisterExt.full_register(Register.YMM22) == Register.ZMM22
assert RegisterExt.full_register(Register.ZMM11) == Register.ZMM11
assert RegisterExt.full_register32(Register.CL) == Register.ECX
assert RegisterExt.full_register32(Register.DX) == Register.EDX
assert RegisterExt.full_register32(Register.EBX) == Register.EBX
assert RegisterExt.full_register32(Register.RSP) == Register.ESP
assert RegisterExt.full_register32(Register.XMM2) == Register.ZMM2
assert RegisterExt.full_register32(Register.YMM22) == Register.ZMM22
assert RegisterExt.full_register32(Register.ZMM11) == Register.ZMM11
assert RegisterExt.size(Register.DL) == 1
assert RegisterExt.size(Register.R8W) == 2
assert RegisterExt.size(Register.R15D) == 4
assert RegisterExt.size(Register.R13) == 8
assert RegisterExt.size(Register.FS) == 2
assert RegisterExt.size(Register.XMM2) == 16
assert RegisterExt.size(Register.YMM20) == 32
assert RegisterExt.size(Register.ZMM31) == 64
assert not RegisterExt.is_segment_register(Register.CX)
assert RegisterExt.is_segment_register(Register.GS)
assert RegisterExt.is_gpr(Register.CL)
assert RegisterExt.is_gpr(Register.DX)
assert RegisterExt.is_gpr(Register.ESP)
assert RegisterExt.is_gpr(Register.R15)
assert not RegisterExt.is_gpr(Register.ES)
assert RegisterExt.is_gpr8(Register.CL)
assert not RegisterExt.is_gpr8(Register.DX)
assert not RegisterExt.is_gpr8(Register.ESP)
assert not RegisterExt.is_gpr8(Register.R15)
assert not RegisterExt.is_gpr8(Register.ES)
assert not RegisterExt.is_gpr16(Register.CL)
assert RegisterExt.is_gpr16(Register.DX)
assert not RegisterExt.is_gpr16(Register.ESP)
assert not RegisterExt.is_gpr16(Register.R15)
assert not RegisterExt.is_gpr16(Register.ES)
assert not RegisterExt.is_gpr32(Register.CL)
assert not RegisterExt.is_gpr32(Register.DX)
assert RegisterExt.is_gpr32(Register.ESP)
assert not RegisterExt.is_gpr32(Register.R15)
assert not RegisterExt.is_gpr32(Register.ES)
assert not RegisterExt.is_gpr64(Register.CL)
assert not RegisterExt.is_gpr64(Register.DX)
assert not RegisterExt.is_gpr64(Register.ESP)
assert RegisterExt.is_gpr64(Register.R15)
assert not RegisterExt.is_gpr64(Register.ES)
assert not RegisterExt.is_vector_register(Register.CL)
assert RegisterExt.is_vector_register(Register.XMM1)
assert RegisterExt.is_vector_register(Register.YMM2)
assert RegisterExt.is_vector_register(Register.ZMM3)
assert not RegisterExt.is_xmm(Register.CL)
assert RegisterExt.is_xmm(Register.XMM1)
assert not RegisterExt.is_xmm(Register.YMM2)
assert not RegisterExt.is_xmm(Register.ZMM3)
assert not RegisterExt.is_ymm(Register.CL)
assert not RegisterExt.is_ymm(Register.XMM1)
assert RegisterExt.is_ymm(Register.YMM2)
assert not RegisterExt.is_ymm(Register.ZMM3)
assert not RegisterExt.is_zmm(Register.CL)
assert not RegisterExt.is_zmm(Register.XMM1)
assert not RegisterExt.is_zmm(Register.YMM2)
assert RegisterExt.is_zmm(Register.ZMM3)
assert not RegisterExt.is_ip(Register.CL)
assert RegisterExt.is_ip(Register.EIP)
assert RegisterExt.is_ip(Register.RIP)
assert not RegisterExt.is_k(Register.CL)
assert RegisterExt.is_k(Register.K3)
assert not RegisterExt.is_cr(Register.CL)
assert RegisterExt.is_cr(Register.CR3)
assert not RegisterExt.is_dr(Register.CL)
assert RegisterExt.is_dr(Register.DR3)
assert not RegisterExt.is_tr(Register.CL)
assert RegisterExt.is_tr(Register.TR3)
assert not RegisterExt.is_st(Register.CL)
assert RegisterExt.is_st(Register.ST3)
assert not RegisterExt.is_bnd(Register.CL)
assert RegisterExt.is_bnd(Register.BND3)
assert not RegisterExt.is_mm(Register.CL)
assert RegisterExt.is_mm(Register.MM3)
assert not RegisterExt.is_tmm(Register.CL)
assert RegisterExt.is_tmm(Register.TMM3)
@pytest.mark.parametrize("create", [
lambda register: RegisterExt.info(register),
lambda register: RegisterInfo(register),
])
def test_register_info(create):
info = create(Register.R10D)
assert info.register == Register.R10D
assert info.base == Register.EAX
assert info.number == 10
assert info.full_register == Register.R10
assert info.full_register32 == Register.R10D
assert info.size == 4
@pytest.mark.parametrize("create", [
lambda register: RegisterExt.info(register),
lambda register: RegisterInfo(register),
])
def test_register_invalid_arg(create):
with pytest.raises(ValueError):
create(1234)
def test_ext_base_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.base(1234)
def test_ext_number_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.number(1234)
def test_ext_full_register_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.full_register(1234)
def test_ext_full_register32_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.full_register32(1234)
def test_ext_size_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.size(1234)
def test_ext_is_segment_register_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_segment_register(1234)
def test_ext_is_gpr_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_gpr(1234)
def test_ext_is_gpr8_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_gpr8(1234)
def test_ext_is_gpr16_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_gpr16(1234)
def test_ext_is_gpr32_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_gpr32(1234)
def test_ext_is_gpr64_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_gpr64(1234)
def test_ext_is_xmm_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_xmm(1234)
def test_ext_is_ymm_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_ymm(1234)
def test_ext_is_zmm_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_zmm(1234)
def test_ext_is_vector_register_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_vector_register(1234)
def test_ext_is_ip_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_ip(1234)
def test_ext_is_k_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_k(1234)
def test_ext_is_cr_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_cr(1234)
def test_ext_is_dr_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_dr(1234)
def test_ext_is_tr_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_tr(1234)
def test_ext_is_st_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_st(1234)
def test_ext_is_bnd_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_bnd(1234)
def test_ext_is_mm_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_mm(1234)
def test_ext_is_tmm_invalid_arg():
with pytest.raises(ValueError):
RegisterExt.is_tmm(1234)
|
|
import re
import platform
from decimal import Decimal
from urllib import quote
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
from electrum_cesc_gui.qt.qrcodewidget import QRCodeWidget
from electrum_cesc import bmp, pyqrnative, BasePlugin
from electrum_cesc.i18n import _
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
column_index = 4
class QR_Window(QWidget):
def __init__(self, exchanger):
QWidget.__init__(self)
self.exchanger = exchanger
self.setWindowTitle('Electrum - '+_('Invoice'))
self.setMinimumSize(800, 250)
self.address = ''
self.label = ''
self.amount = 0
self.setFocusPolicy(QtCore.Qt.NoFocus)
main_box = QHBoxLayout()
self.qrw = QRCodeWidget()
main_box.addWidget(self.qrw, 1)
vbox = QVBoxLayout()
main_box.addLayout(vbox)
self.address_label = QLabel("")
#self.address_label.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.address_label)
self.label_label = QLabel("")
vbox.addWidget(self.label_label)
self.amount_label = QLabel("")
vbox.addWidget(self.amount_label)
vbox.addStretch(1)
self.setLayout(main_box)
def set_content(self, addr, label, amount, currency):
self.address = addr
address_text = "<span style='font-size: 18pt'>%s</span>" % addr if addr else ""
self.address_label.setText(address_text)
if currency == 'CESC': currency = None
amount_text = ''
if amount:
if currency:
try:
self.amount = Decimal(amount) / self.exchanger.exchange(1, currency) if currency else amount
except Exception:
self.amount = None
else:
self.amount = Decimal(amount)
self.amount = self.amount.quantize(Decimal('1.0000'))
if currency:
amount_text += "<span style='font-size: 18pt'>%s %s</span><br/>" % (amount, currency)
amount_text += "<span style='font-size: 21pt'>%s</span> <span style='font-size: 16pt'>CESC</span> " % str(self.amount)
else:
self.amount = None
self.amount_label.setText(amount_text)
self.label = label
label_text = "<span style='font-size: 21pt'>%s</span>" % label if label else ""
self.label_label.setText(label_text)
msg = 'cryptoescudo:'+self.address
if self.amount is not None:
msg += '?amount=%s'%(str( self.amount))
if self.label is not None:
encoded_label = quote(self.label)
msg += '&label=%s'%(encoded_label)
elif self.label is not None:
encoded_label = quote(self.label)
msg += '?label=%s'%(encoded_label)
self.qrw.set_addr( msg )
class Plugin(BasePlugin):
def fullname(self):
return 'Point of Sale'
def description(self):
return _('Show QR code window and amounts requested for each address. Add menu item to request amount.')+_(' Note: This requires the exchange rate plugin to be installed.')
def init(self):
self.window = self.gui.main_window
self.wallet = self.window.wallet
self.qr_window = None
self.merchant_name = self.config.get('merchant_name', 'Invoice')
self.window.expert_mode = True
self.window.receive_list.setColumnCount(5)
self.window.receive_list.setHeaderLabels([ _('Address'), _('Label'), _('Balance'), _('Tx'), _('Request')])
self.requested_amounts = {}
self.toggle_QR_window(True)
def enable(self):
if not self.config.get('use_exchange_rate'):
self.gui.main_window.show_message("Please enable exchange rates first!")
return False
return BasePlugin.enable(self)
def load_wallet(self, wallet):
self.wallet = wallet
self.requested_amounts = self.wallet.storage.get('requested_amounts',{})
def close(self):
self.window.receive_list.setHeaderLabels([ _('Address'), _('Label'), _('Balance'), _('Tx')])
self.window.receive_list.setColumnCount(4)
for i,width in enumerate(self.window.column_widths['receive']):
self.window.receive_list.setColumnWidth(i, width)
self.toggle_QR_window(False)
def close_main_window(self):
if self.qr_window:
self.qr_window.close()
self.qr_window = None
def timer_actions(self):
if self.qr_window:
self.qr_window.qrw.update_qr()
def toggle_QR_window(self, show):
if show and not self.qr_window:
self.qr_window = QR_Window(self.gui.exchanger)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
item = self.window.receive_list.currentItem()
if item:
address = str(item.text(1))
label = self.wallet.labels.get(address)
amount, currency = self.requested_amounts.get(address, (None, None))
self.qr_window.set_content( address, label, amount, currency )
elif show and self.qr_window and not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
elif not show and self.qr_window and self.qr_window.isVisible():
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
def update_receive_item(self, address, item):
try:
amount, currency = self.requested_amounts.get(address, (None, None))
except Exception:
print "cannot get requested amount", address, self.requested_amounts.get(address)
amount, currency = None, None
self.requested_amounts.pop(address)
amount_str = amount + (' ' + currency if currency else '') if amount is not None else ''
item.setData(column_index,0,amount_str)
def current_item_changed(self, a):
if not self.wallet:
return
if a is not None and self.qr_window and self.qr_window.isVisible():
address = str(a.text(0))
label = self.wallet.labels.get(address)
try:
amount, currency = self.requested_amounts.get(address, (None, None))
except Exception:
amount, currency = None, None
self.qr_window.set_content( address, label, amount, currency )
def item_changed(self, item, column):
if column != column_index:
return
address = str( item.text(0) )
text = str( item.text(column) )
try:
seq = self.wallet.get_address_index(address)
index = seq[1][1]
except Exception:
print "cannot get index"
return
text = text.strip().upper()
#print text
m = re.match('^(\d*(|\.\d*))\s*(|CESC|EUR|USD|GBP|CNY|JPY|RUB|BRL)$', text)
if m and m.group(1) and m.group(1)!='.':
amount = m.group(1)
currency = m.group(3)
if not currency:
currency = 'CESC'
else:
currency = currency.upper()
self.requested_amounts[address] = (amount, currency)
self.wallet.storage.put('requested_amounts', self.requested_amounts, True)
label = self.wallet.labels.get(address)
if label is None:
label = self.merchant_name + ' - %04d'%(index+1)
self.wallet.labels[address] = label
if self.qr_window:
self.qr_window.set_content( address, label, amount, currency )
else:
item.setText(column,'')
if address in self.requested_amounts:
self.requested_amounts.pop(address)
self.window.update_receive_item(self.window.receive_list.currentItem())
def edit_amount(self):
l = self.window.receive_list
item = l.currentItem()
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
l.editItem( item, column_index )
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
print item
def receive_menu(self, menu, addr):
menu.addAction(_("Request amount"), self.edit_amount)
menu.addAction(_("Show Invoice"), lambda: self.toggle_QR_window(True))
|
|
from functools import partial
import threading
from PyQt5.Qt import Qt
from PyQt5.Qt import QGridLayout, QInputDialog, QPushButton
from PyQt5.Qt import QVBoxLayout, QLabel
from electrum.gui.qt.util import *
from electrum.i18n import _
from electrum.plugin import hook, DeviceMgr
from electrum.util import PrintError, UserCancelled, bh2u
from electrum.wallet import Wallet, Standard_Wallet
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
from .keepkey import KeepKeyPlugin, TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Electrum wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your bitcoins if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"bitcoins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
CHARACTER_RECOVERY = (
"Use the recovery cipher shown on your device to input your seed words. "
"The cipher changes with every keypress.\n"
"After at most 4 letters the device will auto-complete a word.\n"
"Press SPACE or the Accept Word button to accept the device's auto-"
"completed word and advance to the next one.\n"
"Press BACKSPACE to go back a character or word.\n"
"Press ENTER or the Seed Entered button once the last word in your "
"seed is auto-completed.")
class CharacterButton(QPushButton):
def __init__(self, text=None):
QPushButton.__init__(self, text)
def keyPressEvent(self, event):
event.setAccepted(False) # Pass through Enter and Space keys
class CharacterDialog(WindowModalDialog):
def __init__(self, parent):
super(CharacterDialog, self).__init__(parent)
self.setWindowTitle(_("KeepKey Seed Recovery"))
self.character_pos = 0
self.word_pos = 0
self.loop = QEventLoop()
self.word_help = QLabel()
self.char_buttons = []
vbox = QVBoxLayout(self)
vbox.addWidget(WWLabel(CHARACTER_RECOVERY))
hbox = QHBoxLayout()
hbox.addWidget(self.word_help)
for i in range(4):
char_button = CharacterButton('*')
char_button.setMaximumWidth(36)
self.char_buttons.append(char_button)
hbox.addWidget(char_button)
self.accept_button = CharacterButton(_("Accept Word"))
self.accept_button.clicked.connect(partial(self.process_key, 32))
self.rejected.connect(partial(self.loop.exit, 1))
hbox.addWidget(self.accept_button)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.finished_button = QPushButton(_("Seed Entered"))
self.cancel_button = QPushButton(_("Cancel"))
self.finished_button.clicked.connect(partial(self.process_key,
Qt.Key_Return))
self.cancel_button.clicked.connect(self.rejected)
buttons = Buttons(self.finished_button, self.cancel_button)
vbox.addSpacing(40)
vbox.addLayout(buttons)
self.refresh()
self.show()
def refresh(self):
self.word_help.setText("Enter seed word %2d:" % (self.word_pos + 1))
self.accept_button.setEnabled(self.character_pos >= 3)
self.finished_button.setEnabled((self.word_pos in (11, 17, 23)
and self.character_pos >= 3))
for n, button in enumerate(self.char_buttons):
button.setEnabled(n == self.character_pos)
if n == self.character_pos:
button.setFocus()
def is_valid_alpha_space(self, key):
# Auto-completion requires at least 3 characters
if key == ord(' ') and self.character_pos >= 3:
return True
# Firmware aborts protocol if the 5th character is non-space
if self.character_pos >= 4:
return False
return (key >= ord('a') and key <= ord('z')
or (key >= ord('A') and key <= ord('Z')))
def process_key(self, key):
self.data = None
if key == Qt.Key_Return and self.finished_button.isEnabled():
self.data = {'done': True}
elif key == Qt.Key_Backspace and (self.word_pos or self.character_pos):
self.data = {'delete': True}
elif self.is_valid_alpha_space(key):
self.data = {'character': chr(key).lower()}
if self.data:
self.loop.exit(0)
def keyPressEvent(self, event):
self.process_key(event.key())
if not self.data:
QDialog.keyPressEvent(self, event)
def get_char(self, word_pos, character_pos):
self.word_pos = word_pos
self.character_pos = character_pos
self.refresh()
if self.loop.exec_():
self.data = None # User cancelled
class QtHandler(QtHandlerBase):
char_signal = pyqtSignal(object)
pin_signal = pyqtSignal(object)
close_char_dialog_signal = pyqtSignal()
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.char_signal.connect(self.update_character_dialog)
self.pin_signal.connect(self.pin_dialog)
self.close_char_dialog_signal.connect(self._close_char_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
self.character_dialog = None
def get_char(self, msg):
self.done.clear()
self.char_signal.emit(msg)
self.done.wait()
data = self.character_dialog.data
if not data or 'done' in data:
self.close_char_dialog_signal.emit()
return data
def _close_char_dialog(self):
if self.character_dialog:
self.character_dialog.accept()
self.character_dialog = None
def get_pin(self, msg):
self.done.clear()
self.pin_signal.emit(msg)
self.done.wait()
return self.response
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def update_character_dialog(self, msg):
if not self.character_dialog:
self.character_dialog = CharacterDialog(self.top_level_window())
self.character_dialog.get_char(msg.word_pos, msg.character_pos)
self.done.set()
class QtPlugin(QtPluginBase):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if len(addrs) != 1:
return
for keystore in wallet.get_keystores():
if type(keystore) == self.keystore_class:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0], keystore))
device_name = "{} ({})".format(self.device, keystore.label)
menu.addAction(_("Show on {}").format(device_name), show_address)
def show_settings_dialog(self, window, keystore):
device_id = self.choose_device(window, keystore)
if device_id:
SettingsDialog(window, self, keystore, device_id).exec_()
def request_trezor_init_settings(self, wizard, method, device):
vbox = QVBoxLayout()
next_enabled = True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = widget.toPlainText().strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
# KeepKey recovery doesn't need a word count
if method == TIM_NEW:
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("{} words").format(count))
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
from keystore import is_xprv
wizard.next_button.setEnabled(is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,9}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.exec_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, name.text(), pin, cb_phrase.isChecked())
class Plugin(KeepKeyPlugin, QtPlugin):
icon_paired = ":icons/keepkey.png"
icon_unpaired = ":icons/keepkey_unpaired.png"
@classmethod
def pin_matrix_widget_class(self):
from keepkeylib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget
class SettingsDialog(WindowModalDialog):
'''This dialog doesn't require a device be paired with a wallet.
We want users to be able to wipe a device even if they've forgotten
their PIN.'''
def __init__(self, window, plugin, keystore, device_id):
title = _("{} Settings").format(plugin.device)
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = keystore.handler
thread = keystore.thread
hs_rows, hs_cols = (64, 128)
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
bl_hash = bh2u(features.bootloader_hash)
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
coins = ", ".join(coin.coin_name for coin in features.coins)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
coins_label.setText(coins)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', label_edit.text())
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Electrum wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Electrum wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def change_homescreen():
from PIL import Image # FIXME
dialog = QFileDialog(self, _("Choose Homescreen"))
filename, __ = dialog.getOpenFileName()
if filename:
im = Image.open(str(filename))
if im.size != (hs_cols, hs_rows):
raise Exception('Image must be 64 x 128 pixels')
im = im.convert('1')
pix = im.load()
img = ''
for j in range(hs_rows):
for i in range(hs_cols):
img += '1' if pix[i, j] else '0'
img = ''.join(chr(int(img[i:i + 8], 2))
for i in range(0, len(img), 8))
invoke_client('change_homescreen', img)
def clear_homescreen():
invoke_client('change_homescreen', '\x00')
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
wallet = window.wallet
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has bitcoins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("%2d minutes") % mins)
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
# Information tab
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
coins_label = QLabel()
coins_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Supported Coins"), coins_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
# Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
# Settings tab - Label
label_msg = QLabel(_("Name this {}. If you have multiple devices "
"their labels help distinguish them.")
.format(plugin.device))
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
# Settings tab - PIN
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your bitcoins if they obtain physical "
"access to your {}.").format(plugin.device))
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
# Settings tab - Session Timeout
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
# Advanced tab
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
# Advanced tab - clear PIN
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"{} device can spend your bitcoins.").format(plugin.device))
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
# Advanced tab - toggle passphrase protection
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
# Advanced tab - wipe device
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the bitcoins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
# Update information
invoke_client(None)
|
|
import datetime
from unittest import mock, skipIf
from django.core.exceptions import FieldError
from django.db import NotSupportedError, connection
from django.db.models import (
Avg, BooleanField, Case, F, Func, Max, Min, OuterRef, Q, RowRange,
Subquery, Sum, Value, ValueRange, When, Window, WindowFrame,
)
from django.db.models.functions import (
CumeDist, DenseRank, ExtractYear, FirstValue, Lag, LastValue, Lead,
NthValue, Ntile, PercentRank, Rank, RowNumber, Upper,
)
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from .models import Employee
@skipUnlessDBFeature('supports_over_clause')
class WindowFunctionTests(TestCase):
@classmethod
def setUpTestData(cls):
Employee.objects.bulk_create([
Employee(name=e[0], salary=e[1], department=e[2], hire_date=e[3], age=e[4])
for e in [
('Jones', 45000, 'Accounting', datetime.datetime(2005, 11, 1), 20),
('Williams', 37000, 'Accounting', datetime.datetime(2009, 6, 1), 20),
('Jenson', 45000, 'Accounting', datetime.datetime(2008, 4, 1), 20),
('Adams', 50000, 'Accounting', datetime.datetime(2013, 7, 1), 50),
('Smith', 55000, 'Sales', datetime.datetime(2007, 6, 1), 30),
('Brown', 53000, 'Sales', datetime.datetime(2009, 9, 1), 30),
('Johnson', 40000, 'Marketing', datetime.datetime(2012, 3, 1), 30),
('Smith', 38000, 'Marketing', datetime.datetime(2009, 10, 1), 20),
('Wilkinson', 60000, 'IT', datetime.datetime(2011, 3, 1), 40),
('Moore', 34000, 'IT', datetime.datetime(2013, 8, 1), 40),
('Miller', 100000, 'Management', datetime.datetime(2005, 6, 1), 40),
('Johnson', 80000, 'Management', datetime.datetime(2005, 7, 1), 50),
]
])
def test_dense_rank(self):
qs = Employee.objects.annotate(rank=Window(
expression=DenseRank(),
order_by=ExtractYear(F('hire_date')).asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 2),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 3),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 4),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 4),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 4),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 5),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 6),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 7),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 7),
], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False)
def test_department_salary(self):
qs = Employee.objects.annotate(department_sum=Window(
expression=Sum('salary'),
partition_by=F('department'),
order_by=[F('hire_date').asc()],
)).order_by('department', 'department_sum')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, 45000),
('Jenson', 'Accounting', 45000, 90000),
('Williams', 'Accounting', 37000, 127000),
('Adams', 'Accounting', 50000, 177000),
('Wilkinson', 'IT', 60000, 60000),
('Moore', 'IT', 34000, 94000),
('Miller', 'Management', 100000, 100000),
('Johnson', 'Management', 80000, 180000),
('Smith', 'Marketing', 38000, 38000),
('Johnson', 'Marketing', 40000, 78000),
('Smith', 'Sales', 55000, 55000),
('Brown', 'Sales', 53000, 108000),
], lambda entry: (entry.name, entry.department, entry.salary, entry.department_sum))
def test_rank(self):
"""
Rank the employees based on the year they're were hired. Since there
are multiple employees hired in different years, this will contain
gaps.
"""
qs = Employee.objects.annotate(rank=Window(
expression=Rank(),
order_by=ExtractYear(F('hire_date')).asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 4),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 5),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 6),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 6),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 6),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 9),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 10),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 11),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 11),
], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False)
def test_row_number(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted. Depending on the backend,
Oracle requires an ordering-clause in the Window expression.
"""
qs = Employee.objects.annotate(row_number=Window(
expression=RowNumber(),
order_by=F('pk').asc(),
)).order_by('pk')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 1),
('Williams', 'Accounting', 2),
('Jenson', 'Accounting', 3),
('Adams', 'Accounting', 4),
('Smith', 'Sales', 5),
('Brown', 'Sales', 6),
('Johnson', 'Marketing', 7),
('Smith', 'Marketing', 8),
('Wilkinson', 'IT', 9),
('Moore', 'IT', 10),
('Miller', 'Management', 11),
('Johnson', 'Management', 12),
], lambda entry: (entry.name, entry.department, entry.row_number))
@skipIf(connection.vendor == 'oracle', "Oracle requires ORDER BY in row_number, ANSI:SQL doesn't")
def test_row_number_no_ordering(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted.
"""
# Add a default ordering for consistent results across databases.
qs = Employee.objects.annotate(row_number=Window(
expression=RowNumber(),
)).order_by('pk')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 1),
('Williams', 'Accounting', 2),
('Jenson', 'Accounting', 3),
('Adams', 'Accounting', 4),
('Smith', 'Sales', 5),
('Brown', 'Sales', 6),
('Johnson', 'Marketing', 7),
('Smith', 'Marketing', 8),
('Wilkinson', 'IT', 9),
('Moore', 'IT', 10),
('Miller', 'Management', 11),
('Johnson', 'Management', 12),
], lambda entry: (entry.name, entry.department, entry.row_number))
def test_avg_salary_department(self):
qs = Employee.objects.annotate(avg_salary=Window(
expression=Avg('salary'),
order_by=F('department').asc(),
partition_by='department',
)).order_by('department', '-salary', 'name')
self.assertQuerysetEqual(qs, [
('Adams', 50000, 'Accounting', 44250.00),
('Jenson', 45000, 'Accounting', 44250.00),
('Jones', 45000, 'Accounting', 44250.00),
('Williams', 37000, 'Accounting', 44250.00),
('Wilkinson', 60000, 'IT', 47000.00),
('Moore', 34000, 'IT', 47000.00),
('Miller', 100000, 'Management', 90000.00),
('Johnson', 80000, 'Management', 90000.00),
('Johnson', 40000, 'Marketing', 39000.00),
('Smith', 38000, 'Marketing', 39000.00),
('Smith', 55000, 'Sales', 54000.00),
('Brown', 53000, 'Sales', 54000.00),
], transform=lambda row: (row.name, row.salary, row.department, row.avg_salary))
def test_lag(self):
"""
Compute the difference between an employee's salary and the next
highest salary in the employee's department. Return None if the
employee has the lowest salary.
"""
qs = Employee.objects.annotate(lag=Window(
expression=Lag(expression='salary', offset=1),
partition_by=F('department'),
order_by=[F('salary').asc(), F('name').asc()],
)).order_by('department', F('salary').asc(), F('name').asc())
self.assertQuerysetEqual(qs, [
('Williams', 37000, 'Accounting', None),
('Jenson', 45000, 'Accounting', 37000),
('Jones', 45000, 'Accounting', 45000),
('Adams', 50000, 'Accounting', 45000),
('Moore', 34000, 'IT', None),
('Wilkinson', 60000, 'IT', 34000),
('Johnson', 80000, 'Management', None),
('Miller', 100000, 'Management', 80000),
('Smith', 38000, 'Marketing', None),
('Johnson', 40000, 'Marketing', 38000),
('Brown', 53000, 'Sales', None),
('Smith', 55000, 'Sales', 53000),
], transform=lambda row: (row.name, row.salary, row.department, row.lag))
def test_first_value(self):
qs = Employee.objects.annotate(first_value=Window(
expression=FirstValue('salary'),
partition_by=F('department'),
order_by=F('hire_date').asc(),
)).order_by('department', 'hire_date')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 45000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 45000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 60000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 38000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 55000),
], lambda row: (row.name, row.salary, row.department, row.hire_date, row.first_value))
def test_last_value(self):
qs = Employee.objects.annotate(last_value=Window(
expression=LastValue('hire_date'),
partition_by=F('department'),
order_by=F('hire_date').asc(),
))
self.assertQuerysetEqual(qs, [
('Adams', 'Accounting', datetime.date(2013, 7, 1), 50000, datetime.date(2013, 7, 1)),
('Jenson', 'Accounting', datetime.date(2008, 4, 1), 45000, datetime.date(2008, 4, 1)),
('Jones', 'Accounting', datetime.date(2005, 11, 1), 45000, datetime.date(2005, 11, 1)),
('Williams', 'Accounting', datetime.date(2009, 6, 1), 37000, datetime.date(2009, 6, 1)),
('Moore', 'IT', datetime.date(2013, 8, 1), 34000, datetime.date(2013, 8, 1)),
('Wilkinson', 'IT', datetime.date(2011, 3, 1), 60000, datetime.date(2011, 3, 1)),
('Miller', 'Management', datetime.date(2005, 6, 1), 100000, datetime.date(2005, 6, 1)),
('Johnson', 'Management', datetime.date(2005, 7, 1), 80000, datetime.date(2005, 7, 1)),
('Johnson', 'Marketing', datetime.date(2012, 3, 1), 40000, datetime.date(2012, 3, 1)),
('Smith', 'Marketing', datetime.date(2009, 10, 1), 38000, datetime.date(2009, 10, 1)),
('Brown', 'Sales', datetime.date(2009, 9, 1), 53000, datetime.date(2009, 9, 1)),
('Smith', 'Sales', datetime.date(2007, 6, 1), 55000, datetime.date(2007, 6, 1)),
], transform=lambda row: (row.name, row.department, row.hire_date, row.salary, row.last_value), ordered=False)
def test_function_list_of_values(self):
qs = Employee.objects.annotate(lead=Window(
expression=Lead(expression='salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by='department',
)).values_list('name', 'salary', 'department', 'hire_date', 'lead') \
.order_by('department', F('hire_date').asc(), F('name').desc())
self.assertNotIn('GROUP BY', str(qs.query))
self.assertSequenceEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 50000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
])
def test_min_department(self):
"""An alternative way to specify a query for FirstValue."""
qs = Employee.objects.annotate(min_salary=Window(
expression=Min('salary'),
partition_by=F('department'),
order_by=[F('salary').asc(), F('name').asc()]
)).order_by('department', 'salary', 'name')
self.assertQuerysetEqual(qs, [
('Williams', 'Accounting', 37000, 37000),
('Jenson', 'Accounting', 45000, 37000),
('Jones', 'Accounting', 45000, 37000),
('Adams', 'Accounting', 50000, 37000),
('Moore', 'IT', 34000, 34000),
('Wilkinson', 'IT', 60000, 34000),
('Johnson', 'Management', 80000, 80000),
('Miller', 'Management', 100000, 80000),
('Smith', 'Marketing', 38000, 38000),
('Johnson', 'Marketing', 40000, 38000),
('Brown', 'Sales', 53000, 53000),
('Smith', 'Sales', 55000, 53000),
], lambda row: (row.name, row.department, row.salary, row.min_salary))
def test_max_per_year(self):
"""
Find the maximum salary awarded in the same year as the
employee was hired, regardless of the department.
"""
qs = Employee.objects.annotate(max_salary_year=Window(
expression=Max('salary'),
order_by=ExtractYear('hire_date').asc(),
partition_by=ExtractYear('hire_date')
)).order_by(ExtractYear('hire_date'), 'salary')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, 2005, 100000),
('Johnson', 'Management', 80000, 2005, 100000),
('Miller', 'Management', 100000, 2005, 100000),
('Smith', 'Sales', 55000, 2007, 55000),
('Jenson', 'Accounting', 45000, 2008, 45000),
('Williams', 'Accounting', 37000, 2009, 53000),
('Smith', 'Marketing', 38000, 2009, 53000),
('Brown', 'Sales', 53000, 2009, 53000),
('Wilkinson', 'IT', 60000, 2011, 60000),
('Johnson', 'Marketing', 40000, 2012, 40000),
('Moore', 'IT', 34000, 2013, 50000),
('Adams', 'Accounting', 50000, 2013, 50000),
], lambda row: (row.name, row.department, row.salary, row.hire_date.year, row.max_salary_year))
def test_cume_dist(self):
"""
Compute the cumulative distribution for the employees based on the
salary in increasing order. Equal to rank/total number of rows (12).
"""
qs = Employee.objects.annotate(cume_dist=Window(
expression=CumeDist(),
order_by=F('salary').asc(),
)).order_by('salary', 'name')
# Round result of cume_dist because Oracle uses greater precision.
self.assertQuerysetEqual(qs, [
('Moore', 'IT', 34000, 0.0833333333),
('Williams', 'Accounting', 37000, 0.1666666667),
('Smith', 'Marketing', 38000, 0.25),
('Johnson', 'Marketing', 40000, 0.3333333333),
('Jenson', 'Accounting', 45000, 0.5),
('Jones', 'Accounting', 45000, 0.5),
('Adams', 'Accounting', 50000, 0.5833333333),
('Brown', 'Sales', 53000, 0.6666666667),
('Smith', 'Sales', 55000, 0.75),
('Wilkinson', 'IT', 60000, 0.8333333333),
('Johnson', 'Management', 80000, 0.9166666667),
('Miller', 'Management', 100000, 1),
], lambda row: (row.name, row.department, row.salary, round(row.cume_dist, 10)))
def test_nthvalue(self):
qs = Employee.objects.annotate(
nth_value=Window(expression=NthValue(
expression='salary', nth=2),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by=F('department'),
)
).order_by('department', 'hire_date', 'name')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', datetime.date(2005, 11, 1), 45000, None),
('Jenson', 'Accounting', datetime.date(2008, 4, 1), 45000, 45000),
('Williams', 'Accounting', datetime.date(2009, 6, 1), 37000, 45000),
('Adams', 'Accounting', datetime.date(2013, 7, 1), 50000, 45000),
('Wilkinson', 'IT', datetime.date(2011, 3, 1), 60000, None),
('Moore', 'IT', datetime.date(2013, 8, 1), 34000, 34000),
('Miller', 'Management', datetime.date(2005, 6, 1), 100000, None),
('Johnson', 'Management', datetime.date(2005, 7, 1), 80000, 80000),
('Smith', 'Marketing', datetime.date(2009, 10, 1), 38000, None),
('Johnson', 'Marketing', datetime.date(2012, 3, 1), 40000, 40000),
('Smith', 'Sales', datetime.date(2007, 6, 1), 55000, None),
('Brown', 'Sales', datetime.date(2009, 9, 1), 53000, 53000),
], lambda row: (row.name, row.department, row.hire_date, row.salary, row.nth_value))
def test_lead(self):
"""
Determine what the next person hired in the same department makes.
Because the dataset is ambiguous, the name is also part of the
ordering clause. No default is provided, so None/NULL should be
returned.
"""
qs = Employee.objects.annotate(lead=Window(
expression=Lead(expression='salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by='department',
)).order_by('department', F('hire_date').asc(), F('name').desc())
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 50000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead))
def test_lead_offset(self):
"""
Determine what the person hired after someone makes. Due to
ambiguity, the name is also included in the ordering.
"""
qs = Employee.objects.annotate(lead=Window(
expression=Lead('salary', offset=2),
partition_by='department',
order_by=F('hire_date').asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 37000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 50000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), None),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), None),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), None),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), None),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead),
ordered=False
)
@skipUnlessDBFeature('supports_default_in_lead_lag')
def test_lead_default(self):
qs = Employee.objects.annotate(lead_default=Window(
expression=Lead(expression='salary', offset=5, default=60000),
partition_by=F('department'),
order_by=F('department').asc(),
))
self.assertEqual(list(qs.values_list('lead_default', flat=True).distinct()), [60000])
def test_ntile(self):
"""
Compute the group for each of the employees across the entire company,
based on how high the salary is for them. There are twelve employees
so it divides evenly into four groups.
"""
qs = Employee.objects.annotate(ntile=Window(
expression=Ntile(num_buckets=4),
order_by=F('salary').desc(),
)).order_by('ntile', '-salary', 'name')
self.assertQuerysetEqual(qs, [
('Miller', 'Management', 100000, 1),
('Johnson', 'Management', 80000, 1),
('Wilkinson', 'IT', 60000, 1),
('Smith', 'Sales', 55000, 2),
('Brown', 'Sales', 53000, 2),
('Adams', 'Accounting', 50000, 2),
('Jenson', 'Accounting', 45000, 3),
('Jones', 'Accounting', 45000, 3),
('Johnson', 'Marketing', 40000, 3),
('Smith', 'Marketing', 38000, 4),
('Williams', 'Accounting', 37000, 4),
('Moore', 'IT', 34000, 4),
], lambda x: (x.name, x.department, x.salary, x.ntile))
def test_percent_rank(self):
"""
Calculate the percentage rank of the employees across the entire
company based on salary and name (in case of ambiguity).
"""
qs = Employee.objects.annotate(percent_rank=Window(
expression=PercentRank(),
order_by=[F('salary').asc(), F('name').asc()],
)).order_by('percent_rank')
# Round to account for precision differences among databases.
self.assertQuerysetEqual(qs, [
('Moore', 'IT', 34000, 0.0),
('Williams', 'Accounting', 37000, 0.0909090909),
('Smith', 'Marketing', 38000, 0.1818181818),
('Johnson', 'Marketing', 40000, 0.2727272727),
('Jenson', 'Accounting', 45000, 0.3636363636),
('Jones', 'Accounting', 45000, 0.4545454545),
('Adams', 'Accounting', 50000, 0.5454545455),
('Brown', 'Sales', 53000, 0.6363636364),
('Smith', 'Sales', 55000, 0.7272727273),
('Wilkinson', 'IT', 60000, 0.8181818182),
('Johnson', 'Management', 80000, 0.9090909091),
('Miller', 'Management', 100000, 1.0),
], transform=lambda row: (row.name, row.department, row.salary, round(row.percent_rank, 10)))
def test_nth_returns_null(self):
"""
Find the nth row of the data set. None is returned since there are
fewer than 20 rows in the test data.
"""
qs = Employee.objects.annotate(nth_value=Window(
expression=NthValue('salary', nth=20),
order_by=F('salary').asc()
))
self.assertEqual(list(qs.values_list('nth_value', flat=True).distinct()), [None])
def test_multiple_partitioning(self):
"""
Find the maximum salary for each department for people hired in the
same year.
"""
qs = Employee.objects.annotate(max=Window(
expression=Max('salary'),
partition_by=[F('department'), ExtractYear(F('hire_date'))],
)).order_by('department', 'hire_date', 'name')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.max))
def test_multiple_ordering(self):
"""
Accumulate the salaries over the departments based on hire_date.
If two people were hired on the same date in the same department, the
ordering clause will render a different result for those people.
"""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
partition_by='department',
order_by=[F('hire_date').asc(), F('name').asc()],
)).order_by('department', 'sum')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 127000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 177000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 94000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 180000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 78000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 108000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum))
def test_related_ordering_with_count(self):
qs = Employee.objects.annotate(department_sum=Window(
expression=Sum('salary'),
partition_by=F('department'),
order_by=['classification__code'],
))
self.assertEqual(qs.count(), 12)
@skipUnlessDBFeature('supports_frame_range_fixed_distance')
def test_range_n_preceding_and_following(self):
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
order_by=F('salary').asc(),
partition_by='department',
frame=ValueRange(start=-2, end=2),
))
self.assertIn('RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000),
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 90000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 80000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum), ordered=False)
def test_range_unbound(self):
"""A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING."""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
partition_by='age',
order_by=[F('age').asc()],
frame=ValueRange(start=None, end=None),
)).order_by('department', 'hire_date', 'name')
self.assertIn('RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, datetime.date(2005, 11, 1), 165000),
('Jenson', 'Accounting', 45000, datetime.date(2008, 4, 1), 165000),
('Williams', 'Accounting', 37000, datetime.date(2009, 6, 1), 165000),
('Adams', 'Accounting', 50000, datetime.date(2013, 7, 1), 130000),
('Wilkinson', 'IT', 60000, datetime.date(2011, 3, 1), 194000),
('Moore', 'IT', 34000, datetime.date(2013, 8, 1), 194000),
('Miller', 'Management', 100000, datetime.date(2005, 6, 1), 194000),
('Johnson', 'Management', 80000, datetime.date(2005, 7, 1), 130000),
('Smith', 'Marketing', 38000, datetime.date(2009, 10, 1), 165000),
('Johnson', 'Marketing', 40000, datetime.date(2012, 3, 1), 148000),
('Smith', 'Sales', 55000, datetime.date(2007, 6, 1), 148000),
('Brown', 'Sales', 53000, datetime.date(2009, 9, 1), 148000)
], transform=lambda row: (row.name, row.department, row.salary, row.hire_date, row.sum))
@skipIf(
connection.vendor == 'sqlite' and connection.Database.sqlite_version_info < (3, 27),
'Nondeterministic failure on SQLite < 3.27.'
)
def test_subquery_row_range_rank(self):
qs = Employee.objects.annotate(
highest_avg_salary_date=Subquery(
Employee.objects.filter(
department=OuterRef('department'),
).annotate(
avg_salary=Window(
expression=Avg('salary'),
order_by=[F('hire_date').asc()],
frame=RowRange(start=-1, end=1),
),
).order_by('-avg_salary', 'hire_date').values('hire_date')[:1],
),
).order_by('department', 'name')
self.assertQuerysetEqual(qs, [
('Adams', 'Accounting', datetime.date(2005, 11, 1)),
('Jenson', 'Accounting', datetime.date(2005, 11, 1)),
('Jones', 'Accounting', datetime.date(2005, 11, 1)),
('Williams', 'Accounting', datetime.date(2005, 11, 1)),
('Moore', 'IT', datetime.date(2011, 3, 1)),
('Wilkinson', 'IT', datetime.date(2011, 3, 1)),
('Johnson', 'Management', datetime.date(2005, 6, 1)),
('Miller', 'Management', datetime.date(2005, 6, 1)),
('Johnson', 'Marketing', datetime.date(2009, 10, 1)),
('Smith', 'Marketing', datetime.date(2009, 10, 1)),
('Brown', 'Sales', datetime.date(2007, 6, 1)),
('Smith', 'Sales', datetime.date(2007, 6, 1)),
], transform=lambda row: (row.name, row.department, row.highest_avg_salary_date))
def test_row_range_rank(self):
"""
A query with ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING.
The resulting sum is the sum of the three next (if they exist) and all
previous rows according to the ordering clause.
"""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
frame=RowRange(start=None, end=3),
)).order_by('sum', 'hire_date')
self.assertIn('ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 280000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 325000),
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 362000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 415000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 453000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 513000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 553000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 603000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 637000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 637000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 637000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 637000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum))
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_window_function(self):
"""
Window functions are not aggregates, and hence a query to filter out
duplicates may be useful.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum('salary'),
partition_by=ExtractYear('hire_date'),
order_by=ExtractYear('hire_date')
),
year=ExtractYear('hire_date'),
).values('year', 'sum').distinct('year').order_by('year')
results = [
{'year': 2005, 'sum': 225000}, {'year': 2007, 'sum': 55000},
{'year': 2008, 'sum': 45000}, {'year': 2009, 'sum': 128000},
{'year': 2011, 'sum': 60000}, {'year': 2012, 'sum': 40000},
{'year': 2013, 'sum': 84000},
]
for idx, val in zip(range(len(results)), results):
with self.subTest(result=val):
self.assertEqual(qs[idx], val)
def test_fail_update(self):
"""Window expressions can't be used in an UPDATE statement."""
msg = (
'Window expressions are not allowed in this query (salary=<Window: '
'Max(Col(expressions_window_employee, expressions_window.Employee.salary)) '
'OVER (PARTITION BY Col(expressions_window_employee, '
'expressions_window.Employee.department))>).'
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.filter(department='Management').update(
salary=Window(expression=Max('salary'), partition_by='department'),
)
def test_fail_insert(self):
"""Window expressions can't be used in an INSERT statement."""
msg = (
'Window expressions are not allowed in this query (salary=<Window: '
'Sum(Value(10000), order_by=OrderBy(F(pk), descending=False)) OVER ()'
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.create(
name='Jameson', department='Management', hire_date=datetime.date(2007, 7, 1),
salary=Window(expression=Sum(Value(10000), order_by=F('pk').asc())),
)
def test_window_expression_within_subquery(self):
subquery_qs = Employee.objects.annotate(
highest=Window(FirstValue('id'), partition_by=F('department'), order_by=F('salary').desc())
).values('highest')
highest_salary = Employee.objects.filter(pk__in=subquery_qs)
self.assertCountEqual(highest_salary.values('department', 'salary'), [
{'department': 'Accounting', 'salary': 50000},
{'department': 'Sales', 'salary': 55000},
{'department': 'Marketing', 'salary': 40000},
{'department': 'IT', 'salary': 60000},
{'department': 'Management', 'salary': 100000}
])
def test_invalid_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got '3'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(start=3),
)))
def test_invalid_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got '-3'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(end=-3),
)))
def test_invalid_type_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(end='a'),
)))
def test_invalid_type_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
frame=ValueRange(start='a'),
)))
def test_invalid_type_end_row_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
frame=RowRange(end='a'),
)))
@skipUnlessDBFeature('only_supports_unbounded_with_preceding_and_following')
def test_unsupported_range_frame_start(self):
msg = '%s only supports UNBOUNDED together with PRECEDING and FOLLOWING.' % connection.display_name
with self.assertRaisesMessage(NotSupportedError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(start=-1),
)))
@skipUnlessDBFeature('only_supports_unbounded_with_preceding_and_following')
def test_unsupported_range_frame_end(self):
msg = '%s only supports UNBOUNDED together with PRECEDING and FOLLOWING.' % connection.display_name
with self.assertRaisesMessage(NotSupportedError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(end=1),
)))
def test_invalid_type_start_row_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=RowRange(start='a'),
)))
class WindowUnsupportedTests(TestCase):
def test_unsupported_backend(self):
msg = 'This backend does not support window expressions.'
with mock.patch.object(connection.features, 'supports_over_clause', False):
with self.assertRaisesMessage(NotSupportedError, msg):
Employee.objects.annotate(dense_rank=Window(expression=DenseRank())).get()
class NonQueryWindowTests(SimpleTestCase):
def test_window_repr(self):
self.assertEqual(
repr(Window(expression=Sum('salary'), partition_by='department')),
'<Window: Sum(F(salary)) OVER (PARTITION BY F(department))>'
)
self.assertEqual(
repr(Window(expression=Avg('salary'), order_by=F('department').asc())),
'<Window: Avg(F(salary)) OVER (ORDER BY OrderBy(F(department), descending=False))>'
)
def test_window_frame_repr(self):
self.assertEqual(
repr(RowRange(start=-1)),
'<RowRange: ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING>'
)
self.assertEqual(
repr(ValueRange(start=None, end=1)),
'<ValueRange: RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING>'
)
self.assertEqual(
repr(ValueRange(start=0, end=0)),
'<ValueRange: RANGE BETWEEN CURRENT ROW AND CURRENT ROW>'
)
self.assertEqual(
repr(RowRange(start=0, end=0)),
'<RowRange: ROWS BETWEEN CURRENT ROW AND CURRENT ROW>'
)
def test_empty_group_by_cols(self):
window = Window(expression=Sum('pk'))
self.assertEqual(window.get_group_by_cols(), [])
self.assertFalse(window.contains_aggregate)
def test_frame_empty_group_by_cols(self):
frame = WindowFrame()
self.assertEqual(frame.get_group_by_cols(), [])
def test_frame_window_frame_notimplemented(self):
frame = WindowFrame()
msg = 'Subclasses must implement window_frame_start_end().'
with self.assertRaisesMessage(NotImplementedError, msg):
frame.window_frame_start_end(None, None, None)
def test_invalid_filter(self):
msg = 'Window is disallowed in the filter clause'
qs = Employee.objects.annotate(dense_rank=Window(expression=DenseRank()))
with self.assertRaisesMessage(NotSupportedError, msg):
qs.filter(dense_rank__gte=1)
with self.assertRaisesMessage(NotSupportedError, msg):
qs.annotate(inc_rank=F('dense_rank') + Value(1)).filter(inc_rank__gte=1)
with self.assertRaisesMessage(NotSupportedError, msg):
qs.filter(id=F('dense_rank'))
with self.assertRaisesMessage(NotSupportedError, msg):
qs.filter(id=Func('dense_rank', 2, function='div'))
with self.assertRaisesMessage(NotSupportedError, msg):
qs.annotate(total=Sum('dense_rank', filter=Q(name='Jones'))).filter(total=1)
def test_conditional_annotation(self):
qs = Employee.objects.annotate(
dense_rank=Window(expression=DenseRank()),
).annotate(
equal=Case(
When(id=F('dense_rank'), then=Value(True)),
default=Value(False),
output_field=BooleanField(),
),
)
# The SQL standard disallows referencing window functions in the WHERE
# clause.
msg = 'Window is disallowed in the filter clause'
with self.assertRaisesMessage(NotSupportedError, msg):
qs.filter(equal=True)
def test_invalid_order_by(self):
msg = 'order_by must be either an Expression or a sequence of expressions'
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Sum('power'), order_by='-horse')
def test_invalid_source_expression(self):
msg = "Expression 'Upper' isn't compatible with OVER clauses."
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Upper('name'))
|
|
"""DBF record definition.
"""
"""History (most recent first):
11-feb-2007 [als] __repr__: added special case for invalid field values
10-feb-2007 [als] added .rawFromStream()
30-oct-2006 [als] fix record length in .fromStream()
04-jul-2006 [als] added export declaration
20-dec-2005 [yc] DbfRecord.write() -> DbfRecord._write();
added delete() method.
16-dec-2005 [yc] record definition moved from `dbf`.
"""
__version__ = "$Revision: 1.7 $"[11:-2]
__date__ = "$Date: 2007/02/11 09:05:49 $"[7:-2]
__all__ = ["DbfRecord"]
import sys
from . import utils
class DbfRecord:
"""DBF record.
Instances of this class shouldn't be created manually,
use `dbf.Dbf.newRecord` instead.
Class implements mapping/sequence interface, so
fields could be accessed via their names or indexes
(names is a preferred way to access fields).
Hint:
Use `store` method to save modified record.
Examples:
Add new record to the database:
db = Dbf(filename)
rec = db.newRecord()
rec["FIELD1"] = value1
rec["FIELD2"] = value2
rec.store()
Or the same, but modify existed
(second in this case) record:
db = Dbf(filename)
rec = db[2]
rec["FIELD1"] = value1
rec["FIELD2"] = value2
rec.store()
"""
__slots__ = "dbf", "index", "deleted", "fieldData"
# creation and initialization
def __init__(self, dbf, index=None, deleted=False, data=None):
"""Instance initialization.
Arguments:
dbf:
A `Dbf.Dbf` instance this record belongs to.
index:
An integer record index or None. If this value is
None, record will be appended to the DBF.
deleted:
Boolean flag indicating whether this record
is a deleted record.
data:
A sequence or None. This is a data of the fields.
If this argument is None, default values will be used.
"""
self.dbf = dbf
# XXX: I'm not sure ``index`` is necessary
self.index = index
self.deleted = deleted
if data is None:
self.fieldData = [_fd.defaultValue for _fd in dbf.header.fields]
else:
self.fieldData = list(data)
# XXX: validate self.index before calculating position?
position = property(lambda self: self.dbf.header.headerLength + \
self.index * self.dbf.header.recordLength)
def rawFromStream(cls, dbf, index):
"""Return raw record contents read from the stream.
Arguments:
dbf:
A `Dbf.Dbf` instance containing the record.
index:
Index of the record in the records' container.
This argument can't be None in this call.
Return value is a string containing record data in DBF format.
"""
# XXX: may be write smth assuming, that current stream
# position is the required one? it could save some
# time required to calculate where to seek in the file
dbf.stream.seek(dbf.header.headerLength +
index * dbf.header.recordLength)
return dbf.stream.read(dbf.header.recordLength)
rawFromStream = classmethod(rawFromStream)
def fromStream(cls, dbf, index):
"""Return a record read from the stream.
Arguments:
dbf:
A `Dbf.Dbf` instance new record should belong to.
index:
Index of the record in the records' container.
This argument can't be None in this call.
Return value is an instance of the current class.
"""
return cls.fromString(dbf, cls.rawFromStream(dbf, index), index)
fromStream = classmethod(fromStream)
def fromString(cls, dbf, string, index=None):
"""Return record read from the string object.
Arguments:
dbf:
A `Dbf.Dbf` instance new record should belong to.
string:
A string new record should be created from.
index:
Index of the record in the container. If this
argument is None, record will be appended.
Return value is an instance of the current class.
"""
return cls(dbf, index, string[0]=="*",
[_fd.decodeFromRecord(string) for _fd in dbf.header.fields])
fromString = classmethod(fromString)
# object representation
def __repr__(self):
_template = "%%%ds: %%s (%%s)" % max([len(_fld)
for _fld in self.dbf.fieldNames])
_rv = []
for _fld in self.dbf.fieldNames:
_val = self[_fld]
if _val is utils.INVALID_VALUE:
_rv.append(_template %
(_fld, "None", "value cannot be decoded"))
else:
_rv.append(_template % (_fld, _val, type(_val)))
return "\n".join(_rv)
# protected methods
def _write(self):
"""Write data to the dbf stream.
Note:
This isn't a public method, it's better to
use 'store' instead publicly.
Be design ``_write`` method should be called
only from the `Dbf` instance.
"""
self._validateIndex(False)
self.dbf.stream.seek(self.position)
self.dbf.stream.write(bytes(self.toString(),
sys.getfilesystemencoding()))
# FIXME: may be move this write somewhere else?
# why we should check this condition for each record?
if self.index == len(self.dbf):
# this is the last record,
# we should write SUB (ASCII 26)
self.dbf.stream.write(b"\x1A")
# utility methods
def _validateIndex(self, allowUndefined=True, checkRange=False):
"""Valid ``self.index`` value.
If ``allowUndefined`` argument is True functions does nothing
in case of ``self.index`` pointing to None object.
"""
if self.index is None:
if not allowUndefined:
raise ValueError("Index is undefined")
elif self.index < 0:
raise ValueError("Index can't be negative (%s)" % self.index)
elif checkRange and self.index <= self.dbf.header.recordCount:
raise ValueError("There are only %d records in the DBF" %
self.dbf.header.recordCount)
# interface methods
def store(self):
"""Store current record in the DBF.
If ``self.index`` is None, this record will be appended to the
records of the DBF this records belongs to; or replaced otherwise.
"""
self._validateIndex()
if self.index is None:
self.index = len(self.dbf)
self.dbf.append(self)
else:
self.dbf[self.index] = self
def delete(self):
"""Mark method as deleted."""
self.deleted = True
def toString(self):
"""Return string packed record values."""
# for (_def, _dat) in zip(self.dbf.header.fields, self.fieldData):
#
return "".join([" *"[self.deleted]] + [
_def.encodeValue(_dat)
for (_def, _dat) in zip(self.dbf.header.fields, self.fieldData)
])
def asList(self):
"""Return a flat list of fields.
Note:
Change of the list's values won't change
real values stored in this object.
"""
return self.fieldData[:]
def asDict(self):
"""Return a dictionary of fields.
Note:
Change of the dicts's values won't change
real values stored in this object.
"""
return dict([_i for _i in zip(self.dbf.fieldNames, self.fieldData)])
def __getitem__(self, key):
"""Return value by field name or field index."""
if isinstance(key, int):
# integer index of the field
return self.fieldData[key]
# assuming string field name
return self.fieldData[self.dbf.indexOfFieldName(key)]
def __setitem__(self, key, value):
"""Set field value by integer index of the field or string name."""
if isinstance(key, int):
# integer index of the field
return self.fieldData[key]
# assuming string field name
self.fieldData[self.dbf.indexOfFieldName(key)] = value
# vim: et sts=4 sw=4 :
|
|
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for group API.
"""
import ddt
import mock
from cinder import context
from cinder import exception
import cinder.group
from cinder import objects
from cinder.objects import fields
from cinder.policies import group_snapshots as g_snap_policies
from cinder import quota
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils
GROUP_QUOTAS = quota.GROUP_QUOTAS
@ddt.ddt
class GroupAPITestCase(test.TestCase):
"""Test Case for group API."""
def setUp(self):
super(GroupAPITestCase, self).setUp()
self.group_api = cinder.group.API()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=True)
self.user_ctxt = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
@mock.patch('cinder.objects.Group.get_by_id')
def test_get(self, mock_group_get):
fake_group = {'name': 'fake_group'}
mock_group_get.return_value = fake_group
grp = self.group_api.get(self.ctxt, fake.GROUP_ID)
self.assertEqual(fake_group, grp)
@ddt.data(True, False)
@mock.patch('cinder.objects.GroupList.get_all')
@mock.patch('cinder.objects.GroupList.get_all_by_project')
def test_get_all(self, is_admin, mock_get_all_by_project,
mock_get_all):
self.group_api.LOG = mock.Mock()
fake_groups = ['fake_group1', 'fake_group2']
fake_groups_by_project = ['fake_group1']
mock_get_all.return_value = fake_groups
mock_get_all_by_project.return_value = fake_groups_by_project
if is_admin:
grps = self.group_api.get_all(self.ctxt,
filters={'all_tenants': True})
self.assertEqual(fake_groups, grps)
else:
grps = self.group_api.get_all(self.user_ctxt)
self.assertEqual(fake_groups_by_project, grps)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group')
@mock.patch('cinder.db.volume_get_all_by_generic_group')
@mock.patch('cinder.db.volumes_update')
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_create_delete(self, mock_volume_types_get,
mock_group_type_get, mock_group,
mock_update_quota, mock_cast_create_group,
mock_volumes_update, mock_volume_get_all,
mock_rpc_delete_group):
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name=name, description=description,
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
ret_group = self.group_api.create(self.ctxt, name, description,
fake.GROUP_TYPE_ID,
[fake.VOLUME_TYPE_ID],
availability_zone='nova')
self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive())
ret_group.host = "test_host@fakedrv#fakepool"
ret_group.status = fields.GroupStatus.AVAILABLE
ret_group.assert_not_frozen = mock.Mock(return_value=True)
ret_group.group_snapshots = []
self.group_api.delete(self.ctxt, ret_group, delete_volumes=True)
mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id)
mock_volumes_update.assert_called_once_with(self.ctxt, [])
mock_rpc_delete_group.assert_called_once_with(self.ctxt, ret_group)
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get_by_name')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_create_with_group_name(self, mock_volume_types_get,
mock_group_type_get, mock_group,
mock_update_quota, mock_cast_create_group):
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name=name, description=description,
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
ret_group = self.group_api.create(self.ctxt, name, description,
"fake-grouptype-name",
[fake.VOLUME_TYPE_ID],
availability_zone='nova')
self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive())
mock_group_type_get.assert_called_once_with(self.ctxt,
"fake-grouptype-name")
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.db.group_type_get_by_name')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_create_with_multi_types(self, mock_volume_types_get,
mock_group_type_get,
mock_update_quota,
mock_cast_create_group):
volume_types = [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]
mock_volume_types_get.return_value = volume_types
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
volume_type_names = ['fake-volume-type1', 'fake-volume-type2']
name = "test_group"
description = "this is a test group"
group = self.group_api.create(self.ctxt, name, description,
"fake-grouptype-name",
volume_type_names,
availability_zone='nova')
self.assertEqual(group["volume_type_ids"],
[t['id'] for t in volume_types])
self.assertEqual(group["group_type_id"], fake.GROUP_TYPE_ID)
mock_group_type_get.assert_called_once_with(self.ctxt,
"fake-grouptype-name")
mock_volume_types_get.assert_called_once_with(mock.ANY,
volume_type_names)
@mock.patch('oslo_utils.timeutils.utcnow')
@mock.patch('cinder.objects.Group')
def test_reset_status(self, mock_group, mock_time_util):
mock_time_util.return_value = "time_now"
self.group_api.reset_status(self.ctxt, mock_group,
fields.GroupStatus.AVAILABLE)
update_field = {'updated_at': "time_now",
'status': fields.GroupStatus.AVAILABLE}
mock_group.update.assert_called_once_with(update_field)
mock_group.save.assert_called_once_with()
@mock.patch.object(GROUP_QUOTAS, "reserve")
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get_by_name')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_create_group_failed_update_quota(self,
mock_volume_types_get,
mock_group_type_get, mock_group,
mock_group_quota_reserve):
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
fake_overs = ['groups']
fake_quotas = {'groups': 1}
fake_usages = {'groups': {'reserved': 0, 'in_use': 1}}
mock_group_quota_reserve.side_effect = exception.OverQuota(
overs=fake_overs,
quotas=fake_quotas,
usages=fake_usages)
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name=name, description=description,
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
self.assertRaises(exception.GroupLimitExceeded,
self.group_api.create,
self.ctxt, name, description,
"fake-grouptype-name",
[fake.VOLUME_TYPE_ID],
availability_zone='nova')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.volume_get')
def test__validate_add_volumes(self, mock_volume_get, mock_group):
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name="name", description="description",
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
fake_volume_obj = fake_volume.fake_volume_obj(self.ctxt)
mock_volume_get.return_value = fake_volume_obj
self.assertRaises(exception.InvalidVolume,
self.group_api._validate_add_volumes, self.ctxt,
[], ['123456789'], grp)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.update_group')
@mock.patch('cinder.db.volume_get_all_by_generic_group')
@mock.patch('cinder.group.api.API._cast_create_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.db.group_type_get')
@mock.patch('cinder.db.volume_types_get_by_name_or_id')
def test_update(self, mock_volume_types_get,
mock_group_type_get, mock_group,
mock_update_quota, mock_cast_create_group,
mock_volume_get_all, mock_rpc_update_group):
vol_type_dict = {'id': fake.VOLUME_TYPE_ID,
'name': 'fake_volume_type'}
vol_type = objects.VolumeType(self.ctxt, **vol_type_dict)
mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}]
mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID}
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova', host=None,
name=name, description=description,
status=fields.GroupStatus.CREATING)
mock_group.return_value = grp
ret_group = self.group_api.create(self.ctxt, name, description,
fake.GROUP_TYPE_ID,
[fake.VOLUME_TYPE_ID],
availability_zone='nova')
self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive())
ret_group.volume_types = [vol_type]
ret_group.host = "test_host@fakedrv#fakepool"
ret_group.status = fields.GroupStatus.AVAILABLE
ret_group.id = fake.GROUP_ID
vol1 = utils.create_volume(
self.ctxt, host=ret_group.host,
availability_zone=ret_group.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID)
vol2 = utils.create_volume(
self.ctxt, host=ret_group.host,
availability_zone=ret_group.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=fake.GROUP_ID)
vol2_dict = {
'id': vol2.id,
'group_id': fake.GROUP_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'availability_zone': ret_group.availability_zone,
'host': ret_group.host,
'status': 'available',
}
mock_volume_get_all.return_value = [vol2_dict]
new_name = "new_group_name"
new_desc = "this is a new group"
self.group_api.update(self.ctxt, ret_group, new_name, new_desc,
vol1.id, vol2.id)
mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id)
mock_rpc_update_group.assert_called_once_with(self.ctxt, ret_group,
add_volumes=vol1.id,
remove_volumes=vol2.id)
@mock.patch('cinder.objects.GroupSnapshot.get_by_id')
@mock.patch('cinder.context.RequestContext.authorize')
def test_get_group_snapshot(self, mock_authorize, mock_group_snap):
fake_group_snap = 'fake_group_snap'
mock_group_snap.return_value = fake_group_snap
grp_snap = self.group_api.get_group_snapshot(
self.ctxt, fake.GROUP_SNAPSHOT_ID)
self.assertEqual(fake_group_snap, grp_snap)
mock_authorize.assert_called_once_with(
g_snap_policies.GET_POLICY,
target_obj=fake_group_snap)
@ddt.data(True, False)
@mock.patch('cinder.objects.GroupSnapshotList.get_all')
@mock.patch('cinder.objects.GroupSnapshotList.get_all_by_project')
def test_get_all_group_snapshots(self, is_admin,
mock_get_all_by_project,
mock_get_all):
fake_group_snaps = ['fake_group_snap1', 'fake_group_snap2']
fake_group_snaps_by_project = ['fake_group_snap1']
mock_get_all.return_value = fake_group_snaps
mock_get_all_by_project.return_value = fake_group_snaps_by_project
if is_admin:
grp_snaps = self.group_api.get_all_group_snapshots(
self.ctxt, filters={'all_tenants': True})
self.assertEqual(fake_group_snaps, grp_snaps)
else:
grp_snaps = self.group_api.get_all_group_snapshots(
self.user_ctxt)
self.assertEqual(fake_group_snaps_by_project, grp_snaps)
@mock.patch('cinder.objects.GroupSnapshot')
def test_update_group_snapshot(self, mock_group_snap):
grp_snap_update = {"name": "new_name",
"description": "This is a new description"}
self.group_api.update_group_snapshot(self.ctxt, mock_group_snap,
grp_snap_update)
mock_group_snap.update.assert_called_once_with(grp_snap_update)
mock_group_snap.save.assert_called_once_with()
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group_snapshot')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_snapshot')
@mock.patch('cinder.volume.api.API.create_snapshots_in_db')
@mock.patch('cinder.objects.Group')
@mock.patch('cinder.objects.GroupSnapshot')
@mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot')
def test_create_delete_group_snapshot(self,
mock_snap_get_all,
mock_group_snap, mock_group,
mock_create_in_db,
mock_create_api, mock_delete_api):
name = "fake_name"
description = "fake description"
mock_group.id = fake.GROUP_ID
mock_group.group_type_id = fake.GROUP_TYPE_ID
mock_group.assert_not_frozen = mock.Mock(return_value=True)
mock_group.volumes = []
ret_group_snap = self.group_api.create_group_snapshot(
self.ctxt, mock_group, name, description)
mock_snap_get_all.return_value = []
options = {'group_id': fake.GROUP_ID,
'user_id': self.ctxt.user_id,
'project_id': self.ctxt.project_id,
'status': "creating",
'name': name,
'description': description,
'group_type_id': fake.GROUP_TYPE_ID}
mock_group_snap.assert_called_once_with(self.ctxt, **options)
ret_group_snap.create.assert_called_once_with()
mock_create_in_db.assert_called_once_with(self.ctxt, [],
ret_group_snap.name,
ret_group_snap.description,
None,
ret_group_snap.id)
mock_create_api.assert_called_once_with(self.ctxt, ret_group_snap)
ret_group_snap.assert_not_frozen = mock.Mock(return_value=True)
self.group_api.delete_group_snapshot(self.ctxt, ret_group_snap)
mock_delete_api.assert_called_once_with(mock.ANY, ret_group_snap)
@mock.patch('cinder.volume.api.API.delete')
@mock.patch('cinder.objects.VolumeType.get_by_name_or_id')
@mock.patch('cinder.db.group_volume_type_mapping_create')
@mock.patch('cinder.volume.api.API.create')
@mock.patch('cinder.objects.GroupSnapshot.get_by_id')
@mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src')
@mock.patch('cinder.objects.VolumeList.get_all_by_generic_group')
def test_create_group_from_snap_volume_failed(
self, mock_volume_get_all,
mock_rpc_create_group_from_src,
mock_snap_get_all, mock_group_snap_get,
mock_volume_api_create,
mock_mapping_create,
mock_get_volume_type, mock_volume_delete):
mock_volume_api_create.side_effect = [exception.CinderException]
vol_type = fake_volume.fake_volume_type_obj(
self.ctxt,
id=fake.VOLUME_TYPE_ID,
name='fake_volume_type')
mock_get_volume_type.return_value = vol_type
grp_snap = utils.create_group_snapshot(
self.ctxt, fake.GROUP_ID,
group_type_id=fake.GROUP_TYPE_ID,
status=fields.GroupStatus.CREATING)
mock_group_snap_get.return_value = grp_snap
vol1 = utils.create_volume(
self.ctxt,
availability_zone='nova',
volume_type_id=vol_type['id'],
group_id=fake.GROUP_ID)
snap = utils.create_snapshot(self.ctxt, vol1.id,
volume_type_id=vol_type['id'],
status=fields.GroupStatus.CREATING)
mock_snap_get_all.return_value = [snap]
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
name=name, description=description,
group_snapshot_id=grp_snap.id,
status=fields.GroupStatus.CREATING)
vol2 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=vol_type['id'],
group_id=grp.id,
snapshot_id=snap.id)
mock_volume_get_all.return_value = [vol2]
self.assertRaises(
exception.CinderException,
self.group_api._create_group_from_group_snapshot,
self.ctxt, grp, grp_snap.id)
mock_volume_api_create.assert_called_once_with(
self.ctxt, 1, None, None,
availability_zone=grp.availability_zone,
group_snapshot=grp_snap,
group=grp,
snapshot=snap,
volume_type=vol_type)
mock_rpc_create_group_from_src.assert_not_called()
mock_volume_delete.assert_called_once_with(self.ctxt, vol2)
vol2.destroy()
grp.destroy()
snap.destroy()
vol1.destroy()
grp_snap.destroy()
@mock.patch('cinder.objects.VolumeType.get_by_name_or_id')
@mock.patch('cinder.db.group_volume_type_mapping_create')
@mock.patch('cinder.volume.api.API.create')
@mock.patch('cinder.objects.GroupSnapshot.get_by_id')
@mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src')
@mock.patch('cinder.objects.VolumeList.get_all_by_generic_group')
def test_create_group_from_snap(self, mock_volume_get_all,
mock_rpc_create_group_from_src,
mock_snap_get_all, mock_group_snap_get,
mock_volume_api_create,
mock_mapping_create,
mock_get_volume_type):
vol_type = fake_volume.fake_volume_type_obj(
self.ctxt,
id=fake.VOLUME_TYPE_ID,
name='fake_volume_type')
mock_get_volume_type.return_value = vol_type
grp_snap = utils.create_group_snapshot(
self.ctxt, fake.GROUP_ID,
group_type_id=fake.GROUP_TYPE_ID,
status=fields.GroupStatus.CREATING)
mock_group_snap_get.return_value = grp_snap
vol1 = utils.create_volume(
self.ctxt,
availability_zone='nova',
volume_type_id=vol_type['id'],
group_id=fake.GROUP_ID)
snap = utils.create_snapshot(self.ctxt, vol1.id,
volume_type_id=vol_type['id'],
status=fields.GroupStatus.CREATING)
mock_snap_get_all.return_value = [snap]
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
name=name, description=description,
group_snapshot_id=grp_snap.id,
status=fields.GroupStatus.CREATING)
vol2 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=vol_type['id'],
group_id=grp.id,
snapshot_id=snap.id)
mock_volume_get_all.return_value = [vol2]
self.group_api._create_group_from_group_snapshot(self.ctxt, grp,
grp_snap.id)
mock_volume_api_create.assert_called_once_with(
self.ctxt, 1, None, None,
availability_zone=grp.availability_zone,
group_snapshot=grp_snap,
group=grp,
snapshot=snap,
volume_type=vol_type)
mock_rpc_create_group_from_src.assert_called_once_with(
self.ctxt, grp, grp_snap)
vol2.destroy()
grp.destroy()
snap.destroy()
vol1.destroy()
grp_snap.destroy()
@mock.patch('cinder.objects.VolumeType.get_by_name_or_id')
@mock.patch('cinder.db.group_volume_type_mapping_create')
@mock.patch('cinder.volume.api.API.create')
@mock.patch('cinder.objects.Group.get_by_id')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src')
@mock.patch('cinder.objects.VolumeList.get_all_by_generic_group')
def test_create_group_from_group(self, mock_volume_get_all,
mock_rpc_create_group_from_src,
mock_group_get,
mock_volume_api_create,
mock_mapping_create,
mock_get_volume_type):
vol_type = fake_volume.fake_volume_type_obj(
self.ctxt,
id=fake.VOLUME_TYPE_ID,
name='fake_volume_type')
mock_get_volume_type.return_value = vol_type
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
status=fields.GroupStatus.CREATING)
mock_group_get.return_value = grp
vol = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=grp.id)
mock_volume_get_all.return_value = [vol]
grp2 = utils.create_group(self.ctxt,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
source_group_id=grp.id,
status=fields.GroupStatus.CREATING)
vol2 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=vol_type['id'],
group_id=grp2.id,
source_volid=vol.id)
self.group_api._create_group_from_source_group(self.ctxt, grp2,
grp.id)
mock_volume_api_create.assert_called_once_with(
self.ctxt, 1, None, None,
availability_zone=grp.availability_zone,
source_group=grp,
group=grp2,
source_volume=vol,
volume_type=vol_type)
mock_rpc_create_group_from_src.assert_called_once_with(
self.ctxt, grp2, None, grp)
vol2.destroy()
grp2.destroy()
vol.destroy()
grp.destroy()
@mock.patch('cinder.volume.api.API.delete')
@mock.patch('cinder.objects.VolumeType.get_by_name_or_id')
@mock.patch('cinder.db.group_volume_type_mapping_create')
@mock.patch('cinder.volume.api.API.create')
@mock.patch('cinder.objects.Group.get_by_id')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src')
@mock.patch('cinder.objects.VolumeList.get_all_by_generic_group')
def test_create_group_from_group_create_volume_failed(
self, mock_volume_get_all, mock_rpc_create_group_from_src,
mock_group_get, mock_volume_api_create, mock_mapping_create,
mock_get_volume_type, mock_volume_delete):
vol_type = fake_volume.fake_volume_type_obj(
self.ctxt,
id=fake.VOLUME_TYPE_ID,
name='fake_volume_type')
mock_get_volume_type.return_value = vol_type
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
status=fields.GroupStatus.CREATING)
mock_group_get.return_value = grp
vol1 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=grp.id)
vol2 = utils.create_volume(
self.ctxt,
availability_zone=grp.availability_zone,
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=grp.id)
mock_volume_get_all.side_effect = [[vol1, vol2], [vol1]]
grp2 = utils.create_group(self.ctxt,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[vol_type['id']],
availability_zone='nova',
source_group_id=grp.id,
status=fields.GroupStatus.CREATING)
mock_volume_api_create.side_effect = [None, exception.CinderException]
self.assertRaises(
exception.CinderException,
self.group_api._create_group_from_source_group,
self.ctxt, grp2, grp.id)
mock_rpc_create_group_from_src.assert_not_called()
mock_volume_delete.assert_called_once_with(self.ctxt, vol1)
grp2.destroy()
vol2.destroy()
vol1.destroy()
grp.destroy()
@mock.patch('cinder.group.api.API._create_group_from_group_snapshot')
@mock.patch('cinder.group.api.API._create_group_from_source_group')
@mock.patch('cinder.group.api.API.update_quota')
@mock.patch('cinder.objects.GroupSnapshot.get_by_id')
@mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot')
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity')
def test_create_from_src(self, mock_validate_host, mock_snap_get_all,
mock_group_snap_get, mock_update_quota,
mock_create_from_group,
mock_create_from_snap):
name = "test_group"
description = "this is a test group"
grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova',
name=name, description=description,
status=fields.GroupStatus.AVAILABLE,)
vol1 = utils.create_volume(
self.ctxt,
availability_zone='nova',
volume_type_id=fake.VOLUME_TYPE_ID,
group_id=grp.id)
snap = utils.create_snapshot(self.ctxt, vol1.id,
volume_type_id=fake.VOLUME_TYPE_ID,
status=fields.SnapshotStatus.AVAILABLE)
mock_snap_get_all.return_value = [snap]
mock_validate_host.return_host = True
grp_snap = utils.create_group_snapshot(
self.ctxt, grp.id,
group_type_id=fake.GROUP_TYPE_ID,
status=fields.GroupStatus.AVAILABLE)
mock_group_snap_get.return_value = grp_snap
grp2 = utils.create_group(self.ctxt,
group_type_id=fake.GROUP_TYPE_ID,
volume_type_ids=[fake.VOLUME_TYPE_ID],
availability_zone='nova',
name=name, description=description,
status=fields.GroupStatus.CREATING,
group_snapshot_id=grp_snap.id)
with mock.patch('cinder.objects.Group') as mock_group:
mock_group.return_value = grp2
with mock.patch('cinder.objects.group.Group.create'):
ret_group = self.group_api.create_from_src(
self.ctxt, name, description,
group_snapshot_id=grp_snap.id,
source_group_id=None)
self.assertEqual(grp2.obj_to_primitive(),
ret_group.obj_to_primitive())
mock_create_from_snap.assert_called_once_with(
self.ctxt, grp2, grp_snap.id)
snap.destroy()
grp_snap.destroy()
vol1.destroy()
grp.destroy()
grp2.destroy()
@mock.patch('oslo_utils.timeutils.utcnow')
@mock.patch('cinder.objects.GroupSnapshot')
def test_reset_group_snapshot_status(self,
mock_group_snapshot,
mock_time_util):
mock_time_util.return_value = "time_now"
self.group_api.reset_group_snapshot_status(
self.ctxt, mock_group_snapshot, fields.GroupSnapshotStatus.ERROR)
update_field = {'updated_at': "time_now",
'status': fields.GroupSnapshotStatus.ERROR}
mock_group_snapshot.update.assert_called_once_with(update_field)
mock_group_snapshot.save.assert_called_once_with()
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity')
def test_create_group_from_src_frozen(self, mock_validate_host):
service = utils.create_service(self.ctxt, {'frozen': True})
group = utils.create_group(self.ctxt, host=service.host,
group_type_id='gt')
mock_validate_host.return_value = True
group_api = cinder.group.api.API()
self.assertRaises(exception.InvalidInput,
group_api.create_from_src,
self.ctxt, 'group', 'desc',
group_snapshot_id=None, source_group_id=group.id)
def test_delete_group_frozen(self):
service = utils.create_service(self.ctxt, {'frozen': True})
group = utils.create_group(self.ctxt, host=service.host,
group_type_id='gt')
group_api = cinder.group.api.API()
self.assertRaises(exception.InvalidInput,
group_api.delete, self.ctxt, group)
def test_create_group_snapshot_frozen(self):
service = utils.create_service(self.ctxt, {'frozen': True})
group = utils.create_group(self.ctxt, host=service.host,
group_type_id='gt')
group_api = cinder.group.api.API()
self.assertRaises(exception.InvalidInput,
group_api.create_group_snapshot,
self.ctxt, group, 'group_snapshot', 'desc')
def test_delete_group_snapshot_frozen(self):
service = utils.create_service(self.ctxt, {'frozen': True})
group = utils.create_group(self.ctxt, host=service.host,
group_type_id='gt')
gsnap = utils.create_group_snapshot(self.ctxt, group.id)
group_api = cinder.group.api.API()
self.assertRaises(exception.InvalidInput,
group_api.delete_group_snapshot,
self.ctxt, gsnap)
|
|
import os
import re
import sys
import datetime
from django.conf import settings
from django.template import Template, Context, TemplateDoesNotExist
from django.utils.html import escape
from django.http import HttpResponse, HttpResponseServerError, HttpResponseNotFound
from django.utils.encoding import smart_unicode
HIDDEN_SETTINGS = re.compile('SECRET|PASSWORD|PROFANITIES_LIST')
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
if HIDDEN_SETTINGS.search(k):
settings_dict[k] = '********************'
else:
settings_dict[k] = getattr(settings, k)
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
return HttpResponseServerError(html, mimetype='text/html')
class ExceptionReporter:
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb):
self.request = request
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, basestring):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_html(self):
"Return HTML code for traceback."
if issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
for loader in template_source_loaders:
try:
source_list_func = getattr(__import__(loader.__module__, {}, {}, ['get_template_sources']), 'get_template_sources')
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{'name': t, 'exists': os.path.exists(t)} \
for t in source_list_func(str(self.exc_value))]
except (ImportError, AttributeError):
template_list = []
self.loader_debug_info.append({
'loader': loader.__module__ + '.' + loader.__name__,
'templates': template_list,
})
if settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source'):
self.get_template_exception_info()
frames = self.get_traceback_frames()
unicode_hint = ''
if issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context({
'exception_type': self.exc_type.__name__,
'exception_value': smart_unicode(self.exc_value, errors='replace'),
'unicode_hint': unicode_hint,
'frames': frames,
'lastframe': frames[-1],
'request': self.request,
'request_protocol': self.request.is_secure() and "https" or "http",
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
})
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
self.template_info = {
'message': self.exc_value.args[0],
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
f = open(filename)
try:
source = f.readlines()
finally:
f.close()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(r'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1)
break
source = [unicode(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__')
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': tb.tb_frame.f_locals.items(),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
if not frames:
frames = [{
'filename': '<unknown>',
'function': '?',
'lineno': '?',
'context_line': '???',
}]
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError):
tried = []
else:
if not tried:
# tried exists but is an empty list. The URLconf must've been empty.
return empty_urlconf(request)
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path[1:], # Trim leading slash
'urlpatterns': tried,
'reason': str(exception),
'request': request,
'request_protocol': request.is_secure() and "https" or "http",
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), mimetype='text/html')
def empty_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(EMPTY_URLCONF_TEMPLATE, name='Empty URLConf template')
c = Context({
'project_name': settings.SETTINGS_MODULE.split('.')[0]
})
return HttpResponse(t.render(c), mimetype='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{{ exception_type }} at {{ request.path|escape }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code div { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; }
ul.traceback li.frame { margin-bottom:1em; }
div.context { margin: 10px 0; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#666; cursor:pointer; }
div.context ol.context-line li { color:black; background-color:#ccc; }
div.context ol.context-line li span { float: right; }
div.commands { margin-left: 40px; }
div.commands a { color:black; text-decoration:none; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
</head>
<body>
<div id="summary">
<h1>{{ exception_type }} at {{ request.path|escape }}</h1>
<pre class="exception_value">{{ exception_value|escape }}</pre>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request_protocol }}://{{ request.META.HTTP_HOST }}{{ request.path|escape }}</td>
</tr>
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|escape }}<pre></td>
</tr>
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td>{{ sys_path }}</td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>{% for t in loader.templates %}<li><code>{{ t.name }}</code> (File {% if t.exists %}exists{% else %}does not exist{% endif %})</li>{% endfor %}</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Template error</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
<div id="traceback">
<h2>Traceback <span class="commands"><a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span></h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')">{{ line|escape }}</li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')">{{ frame.context_line|escape }} <span>...</span></li></ol>
{% if frame.post_context %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')">{{ line|escape }}</li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|escape }}</td>
<td class="code"><div>{{ var.1|pprint|escape }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }} at {{ request.path|escape }}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request_protocol }}://{{ request.META.HTTP_HOST }}{{ request.path|escape }}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} (File {% if t.exists %}exists{% else %}does not exist{% endif %})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }} at {{ request.path|escape }}
Exception Value: {{ exception_value|escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
<div id="requestinfo">
<h2>Request information</h2>
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if request.POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><div>{{ var.1|pprint }}</div></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
</body>
</html>
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request_protocol }}://{{ request.META.HTTP_HOST }}{{ request.path|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ settings.ROOT_URLCONF }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>{{ pattern }}</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
EMPTY_URLCONF_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
ul { margin-left: 2em; margin-top: 1em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>Of course, you haven't actually done any work yet. Here's what to do next:</p>
<ul>
<li>If you plan to use a database, edit the <code>DATABASE_*</code> settings in <code>{{ project_name }}/settings.py</code>.</li>
<li>Start your first app by running <code>python {{ project_name }}/manage.py startapp [appname]</code>.</li>
</ul>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
|
|
from __future__ import unicode_literals
import datetime
import json
import re
import sys
import time
from email.header import Header
from django.conf import settings
from django.core import signals, signing
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.http.cookie import SimpleCookie
from django.utils import six, timezone
from django.utils.encoding import (
force_bytes, force_str, force_text, iri_to_uri,
)
from django.utils.http import cookie_date
from django.utils.six.moves import map
from django.utils.six.moves.http_client import responses
from django.utils.six.moves.urllib.parse import urlparse
_charset_from_content_type_re = re.compile(r';\s*charset=(?P<charset>[^\s;]+)', re.I)
class BadHeaderError(ValueError):
pass
class HttpResponseBase(six.Iterator):
"""
An HTTP response base class with dictionary-accessed headers.
This class doesn't handle content. It should not be used directly.
Use the HttpResponse and StreamingHttpResponse subclasses instead.
"""
status_code = 200
reason_phrase = None # Use default reason phrase for status code.
def __init__(self, content_type=None, status=None, reason=None, charset=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._closable_objects = []
# This parameter is set by the handler. It's necessary to preserve the
# historical behavior of request_finished.
self._handler_class = None
self.cookies = SimpleCookie()
self.closed = False
if status is not None:
self.status_code = status
if reason is not None:
self.reason_phrase = reason
elif self.reason_phrase is None:
self.reason_phrase = responses.get(self.status_code, 'Unknown Status Code')
self._charset = charset
if content_type is None:
content_type = '%s; charset=%s' % (settings.DEFAULT_CONTENT_TYPE,
self.charset)
self['Content-Type'] = content_type
@property
def charset(self):
if self._charset is not None:
return self._charset
content_type = self.get('Content-Type', '')
matched = _charset_from_content_type_re.search(content_type)
if matched:
# Extract the charset and strip its double quotes
return matched.group('charset').replace('"', '')
return settings.DEFAULT_CHARSET
@charset.setter
def charset(self, value):
self._charset = value
def serialize_headers(self):
"""HTTP headers as a bytestring."""
def to_bytes(val, encoding):
return val if isinstance(val, bytes) else val.encode(encoding)
headers = [
(b': '.join([to_bytes(key, 'ascii'), to_bytes(value, 'latin-1')]))
for key, value in self._headers.values()
]
return b'\r\n'.join(headers)
if six.PY3:
__bytes__ = serialize_headers
else:
__str__ = serialize_headers
def _convert_to_charset(self, value, charset, mime_encode=False):
"""Converts headers key/value to ascii/latin-1 native strings.
`charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
`value` can't be represented in the given charset, MIME-encoding
is applied.
"""
if not isinstance(value, (bytes, six.text_type)):
value = str(value)
try:
if six.PY3:
if isinstance(value, str):
# Ensure string is valid in given charset
value.encode(charset)
else:
# Convert bytestring using given charset
value = value.decode(charset)
else:
if isinstance(value, str):
# Ensure string is valid in given charset
value.decode(charset)
else:
# Convert unicode string to given charset
value = value.encode(charset)
except UnicodeError as e:
if mime_encode:
# Wrapping in str() is a workaround for #12422 under Python 2.
value = str(Header(value, 'utf-8', maxlinelen=sys.maxsize).encode())
else:
e.reason += ', HTTP response headers must be in %s format' % charset
raise
if str('\n') in value or str('\r') in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
return value
def __setitem__(self, header, value):
header = self._convert_to_charset(header, 'ascii')
value = self._convert_to_charset(value, 'latin-1', mime_encode=True)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then ``max_age`` will be calculated.
"""
value = force_str(value)
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_aware(expires):
expires = timezone.make_naive(expires, timezone.utc)
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def setdefault(self, key, value):
"""Sets a header unless it has already been set."""
if key not in self:
self[key] = value
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
# Common methods used by subclasses
def make_bytes(self, value):
"""Turn a value into a bytestring encoded in the output charset."""
# Per PEP 3333, this response body must be bytes. To avoid returning
# an instance of a subclass, this function returns `bytes(value)`.
# This doesn't make a copy when `value` already contains bytes.
# Handle string types -- we can't rely on force_bytes here because:
# - under Python 3 it attempts str conversion first
# - when self._charset != 'utf-8' it re-encodes the content
if isinstance(value, bytes):
return bytes(value)
if isinstance(value, six.text_type):
return bytes(value.encode(self.charset))
# Handle non-string types (#16494)
return force_bytes(value, self.charset)
# These methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
# The WSGI server must call this method upon completion of the request.
# See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
def close(self):
for closable in self._closable_objects:
try:
closable.close()
except Exception:
pass
self.closed = True
signals.request_finished.send(sender=self._handler_class)
def write(self, content):
raise IOError("This %s instance is not writable" % self.__class__.__name__)
def flush(self):
pass
def tell(self):
raise IOError("This %s instance cannot tell its position" % self.__class__.__name__)
# These methods partially implement a stream-like object interface.
# See https://docs.python.org/library/io.html#io.IOBase
def writable(self):
return False
def writelines(self, lines):
raise IOError("This %s instance is not writable" % self.__class__.__name__)
class HttpResponse(HttpResponseBase):
"""
An HTTP response class with a string as content.
This content that can be read, appended to or replaced.
"""
streaming = False
def __init__(self, content=b'', *args, **kwargs):
super(HttpResponse, self).__init__(*args, **kwargs)
# Content is a bytestring. See the `content` property methods.
self.content = content
def serialize(self):
"""Full HTTP message, including headers, as a bytestring."""
return self.serialize_headers() + b'\r\n\r\n' + self.content
if six.PY3:
__bytes__ = serialize
else:
__str__ = serialize
@property
def content(self):
return b''.join(self._container)
@content.setter
def content(self, value):
# Consume iterators upon assignment to allow repeated iteration.
if hasattr(value, '__iter__') and not isinstance(value, (bytes, six.string_types)):
if hasattr(value, 'close'):
self._closable_objects.append(value)
value = b''.join(self.make_bytes(chunk) for chunk in value)
else:
value = self.make_bytes(value)
# Create a list of properly encoded bytestrings to support write().
self._container = [value]
def __iter__(self):
return iter(self._container)
def write(self, content):
self._container.append(self.make_bytes(content))
def tell(self):
return len(self.content)
def getvalue(self):
return self.content
def writable(self):
return True
def writelines(self, lines):
for line in lines:
self.write(line)
class StreamingHttpResponse(HttpResponseBase):
"""
A streaming HTTP response class with an iterator as content.
This should only be iterated once, when the response is streamed to the
client. However, it can be appended to or replaced with a new iterator
that wraps the original content (or yields entirely new content).
"""
streaming = True
def __init__(self, streaming_content=(), *args, **kwargs):
super(StreamingHttpResponse, self).__init__(*args, **kwargs)
# `streaming_content` should be an iterable of bytestrings.
# See the `streaming_content` property methods.
self.streaming_content = streaming_content
@property
def content(self):
raise AttributeError("This %s instance has no `content` attribute. "
"Use `streaming_content` instead." % self.__class__.__name__)
@property
def streaming_content(self):
return map(self.make_bytes, self._iterator)
@streaming_content.setter
def streaming_content(self, value):
self._set_streaming_content(value)
def _set_streaming_content(self, value):
# Ensure we can never iterate on "value" more than once.
self._iterator = iter(value)
if hasattr(value, 'close'):
self._closable_objects.append(value)
def __iter__(self):
return self.streaming_content
def getvalue(self):
return b''.join(self.streaming_content)
class FileResponse(StreamingHttpResponse):
"""
A streaming HTTP response class optimized for files.
"""
block_size = 4096
def _set_streaming_content(self, value):
if hasattr(value, 'read'):
self.file_to_stream = value
filelike = value
if hasattr(filelike, 'close'):
self._closable_objects.append(filelike)
value = iter(lambda: filelike.read(self.block_size), b'')
else:
self.file_to_stream = None
super(FileResponse, self)._set_streaming_content(value)
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ['http', 'https', 'ftp']
def __init__(self, redirect_to, *args, **kwargs):
parsed = urlparse(force_text(redirect_to))
if parsed.scheme and parsed.scheme not in self.allowed_schemes:
raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
super(HttpResponseRedirectBase, self).__init__(*args, **kwargs)
self['Location'] = iri_to_uri(redirect_to)
url = property(lambda self: self['Location'])
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
def __init__(self, *args, **kwargs):
super(HttpResponseNotModified, self).__init__(*args, **kwargs)
del self['content-type']
@HttpResponse.content.setter
def content(self, value):
if value:
raise AttributeError("You cannot set content to a 304 (Not Modified) response")
self._container = []
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods, *args, **kwargs):
super(HttpResponseNotAllowed, self).__init__(*args, **kwargs)
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
class Http404(Exception):
pass
class JsonResponse(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before EcmaScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be an json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
"""
def __init__(self, data, encoder=DjangoJSONEncoder, safe=True, **kwargs):
if safe and not isinstance(data, dict):
raise TypeError('In order to allow non-dict objects to be '
'serialized set the safe parameter to False')
kwargs.setdefault('content_type', 'application/json')
data = json.dumps(data, cls=encoder)
super(JsonResponse, self).__init__(content=data, **kwargs)
|
|
#!/usr/bin/env python
#
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
#
import collections
import fnmatch
import os
import sys
import tempfile
import unittest
from io import StringIO
try:
from ldgen.entity import Entity, EntityDB
from ldgen.fragments import parse_fragment_file
from ldgen.generation import Generation, GenerationException
from ldgen.linker_script import LinkerScript
from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
from ldgen.sdkconfig import SDKConfig
except ImportError:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from ldgen.entity import Entity, EntityDB
from ldgen.fragments import parse_fragment_file
from ldgen.generation import Generation, GenerationException
from ldgen.linker_script import LinkerScript
from ldgen.output_commands import AlignAtAddress, InputSectionDesc, SymbolAtAddress
from ldgen.sdkconfig import SDKConfig
ROOT = Entity('*')
FREERTOS = Entity('libfreertos.a')
CROUTINE = Entity('libfreertos.a', 'croutine')
TIMERS = Entity('libfreertos.a', 'timers')
FREERTOS2 = Entity('libfreertos2.a')
class GenerationTest(unittest.TestCase):
def setUp(self):
self.generation = Generation()
self.entities = None
self.linker_script = None
with tempfile.NamedTemporaryFile(delete=False) as f:
self.kconfigs_source_file = os.path.join(tempfile.gettempdir(), f.name)
self.addCleanup(os.remove, self.kconfigs_source_file)
with tempfile.NamedTemporaryFile(delete=False) as f:
self.kconfig_projbuilds_source_file = os.path.join(tempfile.gettempdir(), f.name)
self.addCleanup(os.remove, self.kconfig_projbuilds_source_file)
os.environ['COMPONENT_KCONFIGS_SOURCE_FILE'] = self.kconfigs_source_file
os.environ['COMPONENT_KCONFIGS_PROJBUILD_SOURCE_FILE'] = self.kconfig_projbuilds_source_file
os.environ['COMPONENT_KCONFIGS'] = ''
os.environ['COMPONENT_KCONFIGS_PROJBUILD'] = ''
# prepare_kconfig_files.py doesn't have to be called because COMPONENT_KCONFIGS and
# COMPONENT_KCONFIGS_PROJBUILD are empty
self.sdkconfig = SDKConfig('data/Kconfig', 'data/sdkconfig')
fragment_file = parse_fragment_file('data/base.lf', self.sdkconfig)
self.generation.add_fragments_from_file(fragment_file)
self.entities = EntityDB()
with open('data/libfreertos.a.txt') as objdump:
self.entities.add_sections_info(objdump)
with open('data/linker_script.ld') as linker_script:
self.linker_script = LinkerScript(linker_script)
@staticmethod
def create_fragment_file(contents, name='test_fragment.lf'):
f = StringIO(contents)
f.name = name
return f
def add_fragments(self, text):
fragment_file = self.create_fragment_file(text)
fragment_file = parse_fragment_file(fragment_file, self.sdkconfig)
self.generation.add_fragments_from_file(fragment_file)
def write(self, expected, actual):
self.linker_script.fill(expected)
self.linker_script.write(open('expected.ld', 'w'))
self.linker_script.fill(actual)
self.linker_script.write(open('actual.ld', 'w'))
def generate_default_rules(self):
rules = collections.defaultdict(list)
rules['dram0_bss'].append(InputSectionDesc(ROOT, ['.bss', '.bss.*'], []))
rules['dram0_bss'].append(InputSectionDesc(ROOT, ['COMMON'], []))
rules['dram0_data'].append(InputSectionDesc(ROOT, ['.data', '.data.*'], []))
rules['dram0_data'].append(InputSectionDesc(ROOT, ['.dram', '.dram.*'], []))
rules['flash_text'].append(InputSectionDesc(ROOT, ['.literal', '.literal.*', '.text', '.text.*'], []))
rules['flash_rodata'].append(InputSectionDesc(ROOT, ['.rodata', '.rodata.*'], []))
rules['iram0_text'].append(InputSectionDesc(ROOT, ['.iram', '.iram.*'], []))
rules['rtc_bss'].append(InputSectionDesc(ROOT, ['.rtc.bss'], []))
rules['rtc_data'].append(InputSectionDesc(ROOT, ['.rtc.data'], []))
rules['rtc_data'].append(InputSectionDesc(ROOT, ['.rtc.rodata'], []))
rules['rtc_text'].append(InputSectionDesc(ROOT, ['.rtc.text', '.rtc.literal'], []))
return rules
def compare_rules(self, expected, actual):
self.assertEqual(expected, actual)
def get_default(self, target, rules):
return rules[target][0]
class DefaultMappingTest(GenerationTest):
def test_rule_generation_default(self):
# Checks that default rules are generated from
# the default scheme properly and even if no mappings
# are defined.
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
self.compare_rules(expected, actual)
def test_default_mapping_lib(self):
# Mapping a library with default mapping. This should not emit additional rules,
# other than the default ones.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
* (default)
"""
self.add_fragments(mapping)
self.test_rule_generation_default()
def test_default_mapping_obj(self):
# Mapping an object with default mapping. This should not emit additional rules,
# other than the default ones.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (default)
"""
self.add_fragments(mapping)
self.test_rule_generation_default()
def test_default_mapping_symbol(self):
# Mapping a symbol with default mapping. This should not emit additional rules,
# other than the default ones.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine:prvCheckPendingReadyList (default) #1
"""
self.add_fragments(mapping)
self.test_rule_generation_default()
def test_default_mapping_all(self):
# Mapping a library, object, and symbol with default mapping. This should not emit additional rules,
# other than the default ones.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
* (default) #1
croutine (default) #2
croutine:prvCheckPendingReadyList (default) #3
"""
self.add_fragments(mapping)
self.test_rule_generation_default()
def test_default_mapping_lib_symbol(self):
# Mapping a library, and symbol with default mapping. This should not emit additional rules,
# other than the default ones.
#
# This is a check needed to make sure generation does not generate
# intermediate commands due to presence of symbol mapping.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
* (default) #1
croutine:prvCheckPendingReadyList (default) #2
"""
self.add_fragments(mapping)
self.test_rule_generation_default()
def test_default_mapping_obj_symbol(self):
# Mapping a library, and symbol with default mapping. This should not emit additional rules,
# other than the default ones.
#
# This is a check needed to make sure generation does not generate
# intermediate commands due to presence of symbol mapping.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (default) #1
croutine:prvCheckPendingReadyList (default) #2
"""
self.add_fragments(mapping)
self.test_rule_generation_default()
class BasicTest(GenerationTest):
# Test basic and fundamental interactions between typical
# entries.
def test_nondefault_mapping_lib(self, alt=None):
# Test mapping entry different from default for a library.
# There should be exclusions in the default commands for flash_text and flash_rodata:
#
# flash_text
# *((EXCLUDE_FILE(libfreertos.a)) .literal ...) A
#
# Commands placing the entire library in iram, dram should be generated:
#
# iram0_text
# *(.iram ...)
# *libfreertos.a(.literal ...) B
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
* (noflash) #1
"""
self.add_fragments(alt if alt else mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
iram0_text = expected['iram0_text']
dram0_data = expected['dram0_data']
# Generate exclusions in flash_text and flash_rodata A
flash_text[0].exclusions.add(FREERTOS)
flash_rodata[0].exclusions.add(FREERTOS)
# Input section commands in iram_text and dram0_data for #1 B
iram0_text.append(InputSectionDesc(FREERTOS, flash_text[0].sections, []))
dram0_data.append(InputSectionDesc(FREERTOS, flash_rodata[0].sections, []))
self.compare_rules(expected, actual)
def test_nondefault_mapping_obj(self, alt=None):
# Test mapping entry different from default for an object.
# There should be exclusions in the default commands for flash_text and flash_rodata:
#
# flash_text
# *((EXCLUDE_FILE(libfreertos.a:croutine)) .literal ...) A
#
# Commands placing the entire library in iram, dram should be generated:
#
# iram0_text
# *(.iram ...)
# *libfreertos.a:croutine(.literal ...) B
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (noflash) #1
"""
self.add_fragments(alt if alt else mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
iram0_text = expected['iram0_text']
dram0_data = expected['dram0_data']
# Generate exclusions in flash_text and flash_rodata A
flash_text[0].exclusions.add(CROUTINE)
flash_rodata[0].exclusions.add(CROUTINE)
# Input section commands in iram_text and dram0_data for #1 B
iram0_text.append(InputSectionDesc(CROUTINE, flash_text[0].sections, []))
dram0_data.append(InputSectionDesc(CROUTINE, flash_rodata[0].sections, []))
self.compare_rules(expected, actual)
def test_nondefault_mapping_symbol(self):
# Test mapping entry different from default for symbol.
# There should be exclusions in the default commands for flash_text, as well as the implicit intermediate object command
# with an exclusion from default:
#
# flash_text
# *((EXCLUDE_FILE(libfreertos.a:croutine)) .literal ...) A
# *libfreertos.a:croutine(.literal .literal.prvCheckDelayedList ...) B
#
# Commands placing the entire library in iram should be generated:
#
# iram0_text
# *(.iram ...)
# *libfreertos.a:croutine(.text.prvCheckPendingReadyList .literal.prvCheckPendingReadyList) C
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine:prvCheckPendingReadyList (noflash) #1
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
# Generate exclusion in flash_text A
flash_text[0].exclusions.add(CROUTINE)
# Generate intermediate command B
# List all relevant sections except the symbol
# being mapped
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
flash_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), []))
# Input section commands in iram_text for #1 C
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_default_symbol_nondefault_lib(self):
# Test default symbol mapping with different lib mapping. This should create an implicit intermediate object command.
# The significant targets are flash_text, flash_rodata, iram0_text, dram0_data.
#
# flash_text
# *(EXCLUDE_FILE(libfreertos.a) .text ...) A
# libfreertos.a:croutine (.text.prvCheckPendingReadyList .literal.prvCheckPendingReadyList) B
#
# flash_rodata
# *(EXCLUDE_FILE(libfreertos.a) .rodata ...) A
#
# iram0_text
# * ( .iram ...)
# libfreertos.a (EXCLUDE_FILE(libfreertos:croutine) .text ...) C.1
# *libfreertos.a:croutine(.literal .literal.prvCheckDelayedList ...) D
#
# dram0_data
# * ( .dram ...)
# libfreertos.a ( .rodata ...) C.2
#
# Only default commands are in the other targets.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
* (noflash) #1
croutine:prvCheckPendingReadyList (default) #2
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
iram0_text = expected['iram0_text']
dram0_data = expected['dram0_data']
# Exclusions for #1 A
flash_text[0].exclusions.add(FREERTOS)
flash_rodata[0].exclusions.add(FREERTOS)
# Commands for #1 C.1 & C.2
# C.1 excludes intermediate command for #2
iram0_text.append(InputSectionDesc(FREERTOS, flash_text[0].sections, [CROUTINE]))
dram0_data.append(InputSectionDesc(FREERTOS, flash_rodata[0].sections, []))
# Intermediate command for excluding #2 D
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
iram0_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), []))
# Command for #2 B
flash_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_default_symbol_nondefault_obj(self):
# Test default symbol mapping with different obj mapping. Since there is an explicit entry for the object,
# the sections for that object should just be expanded and the symbol section subtracted, to be placed
# using another command.
#
# flash_text
# *(EXCLUDE_FILE(libfreertos.a:croutine) .text ...) A
# libfreertos.a:croutine (.text.prvCheckPendingReadyList .literal.prvCheckPendingReadyList) B
#
# flash_rodata
# *(EXCLUDE_FILE(libfreertos.a:croutine) .rodata ...) A
#
# iram0_text
# *( .iram ...)
# *libfreertos.a:croutine(.literal .literal.prvCheckDelayedList ...) C.1
#
# dram0_data
# *(.data ..)
# *libfreertos.a:croutine(.rodata ....) C.2
#
# Only default commands are in the other targets
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (noflash) #1
croutine:prvCheckPendingReadyList (default) #2
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
iram0_text = expected['iram0_text']
dram0_data = expected['dram0_data']
# Exclusions for #1 A
flash_text[0].exclusions.add(CROUTINE)
flash_rodata[0].exclusions.add(CROUTINE)
# Commands for #1 C.1 & C.2
# C.1 list relevant sections for libfreertos.a:croutine to
# exclude symbol to map
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
iram0_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), []))
dram0_data.append(InputSectionDesc(CROUTINE, flash_rodata[0].sections, []))
# Command for #2 B
flash_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_default_nondefault_alternating(self):
# Here, each of the entries map sections to something different
# than its one-level-up entry.
#
# * text -> flash, rodata -> flash
# libfreertos.a text -> iram, rodata -> dram
# libfreertos.a:croutine text -> flash, rodata -> flash
# croutine:prvCheckPendingReadyList text -> iram
#
# The significant targets are flash_text, flash_rodata, iram0_text, and dram0_data.
#
# flash_text
# *(EXCLUDE_FILE(libfreertos.a) .text ...) A
# *libfreertos.a:croutine(.literal .literal.prvCheckDelayedList ...) B.1
#
# flash_rodata
# *(EXCLUDE_FILE(libfreertos.a) .rodata ...) A
# *libfreertos.a:croutine(.rodata .rodata.*) B.2
#
# iram0_text
# * ( .iram ...)
# libfreertos.a (EXCLUDE_FILE(libfreertos:croutine) .text ...) C
# libfreertos.a:croutine (.text.prvCheckPendingReadyList .literal.prvCheckPendingReadyList) D
#
# dram0_data
# * ( .dram ...)
# libfreertos.a (EXCLUDE_FILE(libfreertos:croutine) .rodata ...) C
#
# For the other targets only the default commands should be present.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
* (noflash) #1
croutine (default) #2
croutine:prvCheckPendingReadyList (noflash) #3
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
iram0_text = expected['iram0_text']
dram0_data = expected['dram0_data']
# Exclusions for #1 A
# Only for flash_text and flash_rodata
flash_text[0].exclusions.add(FREERTOS)
flash_rodata[0].exclusions.add(FREERTOS)
# Commands for #1 C
# with exclusions for #2
iram0_text.append(InputSectionDesc(FREERTOS, flash_text[0].sections, [CROUTINE]))
dram0_data.append(InputSectionDesc(FREERTOS, flash_rodata[0].sections, [CROUTINE]))
# Commands for #2 B.1
flash_rodata.append(InputSectionDesc(CROUTINE, flash_rodata[0].sections, []))
# List all relevant sections in case of flash_text B.2
# as exclusion for #3
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
flash_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), []))
# Command for #3 D
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_nondefault_but_same_lib_and_obj(self):
# Extension of DefaultMappingTest. Commands should not be generated for #2, since it does similar mapping
# to #1. Output is similar to test_different_mapping_lib.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
* (noflash) #1
croutine (noflash) #2
"""
self.test_nondefault_mapping_lib(mapping)
def test_nondefault_but_same_lib_and_sym(self):
# Extension of DefaultMappingTest. Commands should not be generated for #2, since it does similar mapping
# to #1. Output is similar to test_different_mapping_lib.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
* (noflash) #1
croutine:prvCheckPendingReadyList (noflash) #2
"""
self.test_nondefault_mapping_lib(mapping)
def test_nondefault_but_same_obj_and_sym(self):
# Commands should not be generated for #2, since it does similar mapping
# to #1. Output is similar to test_different_mapping_obj.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (noflash) #1
croutine:prvCheckPendingReadyList (noflash) #2
"""
self.test_nondefault_mapping_obj(mapping)
def test_multiple_symbols_excluded_from_intermediate_command(self):
# Test mapping multiple symbols from the same object.
# All these symbols must be succesfully excluded from
# the intermediate command.
#
# flash_text
# * (EXCLUDE_FILE(libfreertos.a:croutine) .text ...) A
# libfreertos:croutine(.text ...) B
#
# iram0_text
#
#
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine:prvCheckPendingReadyList (noflash) #1
croutine:prvCheckDelayedList (noflash) #2
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
# Exclusions for #1 & #2 intermediate command A
flash_text[0].exclusions.add(CROUTINE)
# Intermediate command for #1 & #2 which lists B
# all relevant sections in croutine except prvCheckPendingReadyList
# and prvCheckDelayedList
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckDelayedList')]
filtered_sections.append('.text')
flash_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), []))
# Commands for #1 & 2
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckDelayedList', '.literal.prvCheckDelayedList']), []))
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_root_mapping_fragment(self):
# Test creation of a mapping fragment that maps '*'.
# This should generate another default command in iram0_text:
#
# iram0_text
# * (.custom_section) A
# * (.iram .iram.*)
mapping = u"""
[sections:custom_section]
entries:
.custom_section
[scheme:custom_scheme]
entries:
custom_section -> iram0_text
[mapping:default2]
archive: *
entries:
* (custom_scheme) #1
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
# Generate default command A
# Since these are the same 'specificity', the commands
# are arranged alphabetically.
expected['iram0_text'].append(expected['iram0_text'][0])
expected['iram0_text'][0] = InputSectionDesc(ROOT, ['.custom_section'], [])
self.compare_rules(expected, actual)
class AdvancedTest(GenerationTest):
# Test valid but quirky cases, corner cases, failure cases, and
# cases involving interaction between schemes, other mapping
# fragments.
def test_same_entity_no_scheme_common(self):
# Test same entity being mapped by schemes that have nothing in common.
#
# noflash_data: rodata -> dram0_data
# noflash_text: text -> iram0_text
#
# This operation should succeed with the following commands:
#
# flash_text
# *(EXCLUDE_FILE(libfreertos.a:croutine) .text ...) A
#
# flash_rodata
# *(EXCLUDE_FILE(libfreertos.a:croutine) .rodata ...) B
#
# iram0_text
# *(.iram ...)
# *libfreertos.a:croutine(.text .text.* ...) C
#
# dram0_data
# *(.data ..)
# *(.dram ...)
# *libfreertos.a:croutine(.rodata .rodata.*) D
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (noflash_text) #1
croutine (noflash_data) #2
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
iram0_text = expected['iram0_text']
dram0_data = expected['dram0_data']
# Exclusions for #1 A
flash_text[0].exclusions.add(CROUTINE)
# Exclusions for #2 B
flash_rodata[0].exclusions.add(CROUTINE)
# Command for #1 C
iram0_text.append(InputSectionDesc(CROUTINE, flash_text[0].sections, []))
# Command for #2 D
dram0_data.append(InputSectionDesc(CROUTINE, flash_rodata[0].sections, []))
self.compare_rules(expected, actual)
def test_same_entity_sub_scheme(self):
# Test same entity being mapped by scheme that is a subset of the other.
#
# noflash: text -> iram0_text, rodata -> dram0_data
# noflash_text: text -> iram0_text
#
# `text -> iram0_text` is common between the two schemes.
#
# This operation should succeed with the following commands:
#
# flash_text
# *(EXCLUDE_FILE(libfreertos.a:croutine) .text ...) A
#
# flash_rodata
# *(EXCLUDE_FILE(libfreertos.a:croutine) .rodata ...) B
#
# iram0_text
# *(.iram ...)
# *libfreertos.a:croutine(.text .text.* ...) C
#
# dram0_data
# *(.data ..)
# *(.dram ...)
# *libfreertos.a:croutine(.rodata .rodata.*) D
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (noflash) #1
croutine (noflash_data) #2
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
iram0_text = expected['iram0_text']
dram0_data = expected['dram0_data']
# Exclusions for #1 A
flash_text[0].exclusions.add(CROUTINE)
# Exclusions for #1 & #2 B
flash_rodata[0].exclusions.add(CROUTINE)
# Command for #1 C
iram0_text.append(InputSectionDesc(CROUTINE, flash_text[0].sections, []))
# Command for #1 & #2 D
dram0_data.append(InputSectionDesc(CROUTINE, flash_rodata[0].sections, []))
self.compare_rules(expected, actual)
def test_same_entity_conflicting_scheme(self, alt=None):
# Test same entity being mapped by scheme conflicting with another.
#
# rtc = text -> rtc_text, rodata -> rtc_data
# noflash = text -> iram0_text, rodata -> dram0_data
#
# This operation should fail.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (noflash) #1
croutine (rtc) #2
"""
self.add_fragments(alt if alt else mapping)
with self.assertRaises(GenerationException):
self.generation.generate(self.entities)
def test_complex_mapping_case(self, alt=None):
# Test a complex case where an object is mapped using
# one scheme, but a specific symbol in that object is mapped
# using another. Another object and symbol is mapped the other way around.
#
# flash_text
# *(EXCLUDE_FILE(libfreertos.a:croutine libfreertos.a:timers) .text ...) A, B
#
# flash_rodata
# *(EXCLUDE_FILE(libfreertos.a:croutine libfreertos.a:timers) .rodata ...) A, B
#
# dram0_data
# *(EXCLUDE_FILES(libfreertos.a:timers) .data ..) B
# *(.dram ...)
# *libfreertos.a:croutine(.rodata .rodata.*) C
# *libfreertos.a:timers(.rodata.prvProcessReceivedCommands ...) E
#
# dram0_bss
# *(EXCLUDE_FILE(libfreertos.a:timers) .bss .bss.* ...) B
# *(EXCLUDE_FILE(libfreertos.a:timers) COMMON) B
#
# iram0_text
# *(.iram ...)
# *libfreertos.a:croutine(.literal .literal.prvCheckDelayedList ...) C
# *libfreertos.a:timers(.literal .literal.prvProcessReceivedCommands ...) E
#
# rtc_text
# *(rtc.text .rtc.literal)
# libfreertos.a:croutine (.text.prvCheckPendingReadyList .literal.prvCheckPendingReadyList) F
# libfreertos.a:timers (.text .text.prvCheckForValidListAndQueue ...) D.2
#
# rtc_data
# *(rtc.data)
# *(rtc.rodata)
# libfreertos.a:timers (.data .data.*) D
# libfreertos.a:timers (.rodata ...) D.2
#
# rtc_bss
# *(rtc.bss .rtc.bss)
# libfreertos.a:timers (.bss .bss.*) D
# libfreertos.a:timers (COMMON) D
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (noflash) #1
timers (rtc) #2
timers:prvProcessReceivedCommands (noflash) #3
croutine:prvCheckPendingReadyList (rtc) #4
"""
self.add_fragments(alt if alt else mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
dram0_data = expected['dram0_data']
iram0_text = expected['iram0_text']
dram0_bss = expected['dram0_bss']
rtc_text = expected['rtc_text']
rtc_data = expected['rtc_data']
rtc_bss = expected['rtc_bss']
# Exclusions for #1 A
flash_text[0].exclusions.add(CROUTINE)
flash_rodata[0].exclusions.add(CROUTINE)
# Exclusions for #2 B
flash_text[0].exclusions.add(TIMERS)
flash_rodata[0].exclusions.add(TIMERS)
dram0_data[0].exclusions.add(TIMERS)
dram0_bss[0].exclusions.add(TIMERS)
dram0_bss[1].exclusions.add(TIMERS)
# Commands for #1 C
# List all relevant sections excluding #4 for text -> iram0_text
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
iram0_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), []))
dram0_data.append(InputSectionDesc(CROUTINE, flash_rodata[0].sections, []))
# Commands for #4 F
# Processed first due to alphabetical ordering
rtc_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
# Commands for #2 D
# List all relevant sections excluding #3 for text -> rtc_text and D.2
# rodata -> rtc_data
timers_sections = self.entities.get_sections('libfreertos.a', 'timers')
filtered_sections = fnmatch.filter(timers_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(timers_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvProcessReceivedCommands')]
filtered_sections.append('.text')
rtc_text.append(InputSectionDesc(TIMERS, set(filtered_sections), []))
rtc_data.append(InputSectionDesc(TIMERS, dram0_data[0].sections, []))
filtered_sections = fnmatch.filter(timers_sections, '.rodata.*')
filtered_sections = [s for s in filtered_sections if not s.endswith('prvProcessReceivedCommands')]
rtc_data.append(InputSectionDesc(TIMERS, set(filtered_sections), []))
rtc_bss.append(InputSectionDesc(TIMERS, dram0_bss[0].sections, []))
rtc_bss.append(InputSectionDesc(TIMERS, dram0_bss[1].sections, []))
# Commands for #3 E
iram0_text.append(InputSectionDesc(TIMERS, set(['.text.prvProcessReceivedCommands', '.literal.prvProcessReceivedCommands']), []))
dram0_data.append(InputSectionDesc(TIMERS, set(['.rodata.prvProcessReceivedCommands']), []))
self.compare_rules(expected, actual)
def test_multiple_mapping_fragments(self):
# Test mapping multiple fragments succeeds, particularly
# generating exclusions from the default command of archive
# and object specificity.
#
# flash_text
# * (EXCLUDE_FILE(libfreertos.a libfreertos.a:croutine) .text ...)
#
# flash_rodata
# * (EXCLUDE_FILE(libfreertos.a libfreertos.a:croutine) .text ...)
#
# iram0_text
mapping = u"""
[mapping:test_1]
archive: libfreertos.a
entries:
croutine (noflash) #1
[mapping:test_2]
archive: libfreertos2.a
entries:
* (noflash) #2
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
iram0_text = expected['iram0_text']
dram0_data = expected['dram0_data']
# Exclusions for #1 A
flash_text[0].exclusions.add(CROUTINE)
flash_rodata[0].exclusions.add(CROUTINE)
# Exclusions for #1 & #2 B
flash_text[0].exclusions.add(FREERTOS2)
flash_rodata[0].exclusions.add(FREERTOS2)
# Command for #1 C
iram0_text.append(InputSectionDesc(CROUTINE, flash_text[0].sections, []))
dram0_data.append(InputSectionDesc(CROUTINE, flash_rodata[0].sections, []))
# Command for #1 & #2 D
iram0_text.append(InputSectionDesc(FREERTOS2, flash_text[0].sections, []))
dram0_data.append(InputSectionDesc(FREERTOS2, flash_rodata[0].sections, []))
self.compare_rules(expected, actual)
def test_mapping_same_lib_in_multiple_fragments_no_conflict(self):
# Test mapping fragments operating on the same archive.
# In these cases, the entries are taken together.
#
# Uses the same entries as C_05 but spreads them across
# two fragments. The output should still be the same.
mapping = u"""
[mapping:test_1]
archive: libfreertos.a
entries:
croutine (noflash) #1
timers:prvProcessReceivedCommands (noflash) #3
[mapping:test_2]
archive: libfreertos.a
entries:
timers (rtc) #2
croutine:prvCheckPendingReadyList (rtc) #4
"""
self.test_complex_mapping_case(mapping)
def test_mapping_same_lib_in_multiple_fragments_conflict(self):
# Test mapping fragments operating on the same archive
# with conflicting mappings.
mapping = u"""
[mapping:test_1]
archive: libfreertos.a
entries:
croutine (noflash) #1
[mapping:test_2]
archive: libfreertos.a
entries:
croutine (rtc) #2
"""
self.test_same_entity_conflicting_scheme(mapping)
def test_command_order(self):
# Test command order sorting: the commands should be sorted by specificity, then
# alphabetically. This contributes to deterministic output given
# the same input mapping entries.
#
# This ordering is also tested in other tests as a side-effect.
#
# flash_text
# * (EXCLUDE_FILE(libfreertos.a:croutine libfreertos.a:croutine2)) A
# libfreertos.a:croutine(.text ....) B
#
# iram0_text
#
# * (.iram .iram.*)
# libfreertos:croutine(.text .literal ...) C
# libfreertos:croutine(.text.prvCheckDelayedList .literal.prvCheckDelayedList) F
# libfreertos:croutine(.text.prvCheckPendingReadyList .literal.prvCheckPendingReadyList) G
# libfreertos2:croutine(.text .literal ...) D
# libfreertos2:croutine2(.text .literal ...) E
mapping = u"""
[mapping:freertos2]
archive: libfreertos2.a
entries:
croutine2 (noflash_text) #1
croutine (noflash_text) #2
[mapping:freertos]
archive: libfreertos.a
entries:
croutine:prvCheckPendingReadyList (noflash_text) #3
croutine:prvCheckDelayedList (noflash_text) #4
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
# Exclusions for #1 A
flash_text[0].exclusions.add(CROUTINE)
flash_text[0].exclusions.add(Entity(FREERTOS2.archive, 'croutine2'))
flash_text[0].exclusions.add(Entity(FREERTOS2.archive, 'croutine'))
# Intermediate command for #3 and #4 B
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckDelayedList')]
filtered_sections.append('.text')
flash_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), []))
# Command for
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckDelayedList', '.literal.prvCheckDelayedList']), []))
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
iram0_text.append(InputSectionDesc(Entity(FREERTOS2.archive, 'croutine'), flash_text[0].sections, []))
iram0_text.append(InputSectionDesc(Entity(FREERTOS2.archive, 'croutine2'), flash_text[0].sections, []))
self.compare_rules(expected, actual)
def test_ambigious_obj(self):
# Command generation for ambiguous entry should fail.
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
port:xPortGetTickRateHz (noflash) #1
"""
self.add_fragments(mapping)
with self.assertRaises(GenerationException):
self.generation.generate(self.entities)
def test_root_mapping_fragment_conflict(self):
# Test that root mapping fragments are also checked for
# conflicts.
#
# 'custom_scheme' entries conflict the 'default' scheme
# entries.
mapping = u"""
[scheme:custom_scheme]
entries:
flash_text -> iram0_text
[mapping:default2]
archive: *
entries:
* (custom_scheme)
"""
self.add_fragments(mapping)
with self.assertRaises(GenerationException):
self.generation.generate(self.entities)
def test_root_mapping_fragment_duplicate(self):
# Same root mappings have no effect.
#
# custom_scheme has the 'iram -> iram0_text' in common with
# default scheme
mapping = u"""
[sections:custom_section]
entries:
.custom_section
[scheme:custom_scheme]
entries:
iram -> iram0_text
custom_section -> iram0_text
[mapping:default2]
archive: *
entries:
* (custom_scheme)
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
# Generate default command A
# Since these are the same 'specificity', the commands
# are arranged alphabetically.
expected['iram0_text'].append(expected['iram0_text'][0])
expected['iram0_text'][0] = InputSectionDesc(ROOT, ['.custom_section'], [])
self.compare_rules(expected, actual)
class ConfigTest(GenerationTest):
# Test command generation with conditions
def _test_conditional_on_scheme(self, perf, alt=None):
# Test that proper commands are generated if using
# schemes with conditional entries.
scheme = u"""
[sections:cond_text_data]
entries:
if PERFORMANCE_LEVEL >= 1:
.text+
.literal+
else:
.rodata+
[scheme:cond_noflash]
entries:
if PERFORMANCE_LEVEL >= 1:
cond_text_data -> iram0_text
else:
cond_text_data -> dram0_data
"""
mapping = u"""
[mapping:test]
archive: lib.a
entries:
* (cond_noflash)
"""
self.sdkconfig.config.syms['PERFORMANCE_LEVEL'].set_value(str(perf))
self.add_fragments(scheme)
self.add_fragments(alt if alt else mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
if perf >= 1:
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
flash_text[0].exclusions.add(Entity('lib.a'))
iram0_text.append(InputSectionDesc(Entity('lib.a'), flash_text[0].sections, []))
else:
flash_rodata = expected['flash_rodata']
dram0_data = expected['dram0_data']
flash_rodata[0].exclusions.add(Entity('lib.a'))
dram0_data.append(InputSectionDesc(Entity('lib.a'), flash_rodata[0].sections, []))
self.compare_rules(expected, actual)
def test_conditional_on_scheme_00(self):
self._test_conditional_on_scheme(0)
def test_conditional_on_scheme_01(self):
self._test_conditional_on_scheme(1)
def test_conditional_mapping(self, alt=None):
# Test that proper commands are generated
# in conditional mapping entries.
mapping = u"""
[mapping:default]
archive: *
entries:
* (default)
[mapping:test]
archive: lib.a
entries:
if PERFORMANCE_LEVEL = 1:
obj1 (noflash)
elif PERFORMANCE_LEVEL = 2:
obj1 (noflash)
obj2 (noflash)
elif PERFORMANCE_LEVEL = 3:
obj1 (noflash)
obj2 (noflash)
obj3 (noflash)
"""
for perf_level in range(0, 4):
self.sdkconfig.config.syms['PERFORMANCE_LEVEL'].set_value(str(perf_level))
self.generation.mappings = {}
self.add_fragments(alt if alt else mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
if perf_level < 4 and perf_level > 0:
for append_no in range(1, perf_level + 1):
flash_text = expected['flash_text']
flash_rodata = expected['flash_rodata']
iram0_text = expected['iram0_text']
dram0_data = expected['dram0_data']
obj_str = 'obj' + str(append_no)
flash_text[0].exclusions.add(Entity('lib.a', obj_str))
flash_rodata[0].exclusions.add(Entity('lib.a', obj_str))
iram0_text.append(InputSectionDesc(Entity('lib.a', obj_str), flash_text[0].sections, []))
dram0_data.append(InputSectionDesc(Entity('lib.a', obj_str), flash_rodata[0].sections, []))
self.compare_rules(expected, actual)
def test_multiple_fragment_same_lib_conditional(self):
# Test conditional entries on new mapping fragment grammar.
# across multiple fragments.
mapping = u"""
[mapping:default]
archive: *
entries:
* (default)
[mapping:base]
archive: lib.a
entries:
if PERFORMANCE_LEVEL = 1:
obj1 (noflash)
elif PERFORMANCE_LEVEL = 2:
obj1 (noflash)
elif PERFORMANCE_LEVEL = 3:
obj1 (noflash)
[mapping:extra]
archive: lib.a
entries:
if PERFORMANCE_LEVEL = 1:
obj1 (noflash) # ignore duplicate definition
elif PERFORMANCE_LEVEL = 2:
obj2 (noflash)
elif PERFORMANCE_LEVEL = 3:
obj2 (noflash)
obj3 (noflash)
"""
self.test_conditional_mapping(mapping)
class FlagTest(GenerationTest):
# Test correct generation of mapping fragment entries
# with flags.
def test_flags_basics(self):
# Test that input section commands additions are done (KEEP SORT).
# Test that order dependent commands are properly generated (ALIGN, SURROUND)
# Normally, if an entry has the same mapping as parent, commands.
# are not emitted for them. However, if there are flags, they should be -
# only for the scheme entries that have flags, though.
# Flag entries split across multiple entries work.
#
# flash_text
# *((EXCLUDE_FILE(libfreertos:timers libfreertos:croutine).text ...) A
# KEEP(* (SORT_BY_NAME(EXCLUDE_FILE(libfreertos:timers).text) ...) B
#
# flash_rodata
# *((EXCLUDE_FILE(libfreertos:timers) .rodata ...) C
# _sym2_start D.1
# . = ALIGN(4) E.1
# KEEP(* (EXCLUDE_FILE(libfreertos:timers) .rodata ...) F
# _sym2_end D.2
# . = ALIGN(4) E.2
#
# iram0_text
# *(.iram .iram.*)
# . = ALIGN(4) G.1
# _sym1_start H.1
# libfreertos.a:croutine(.text .literal ...) I
# . = ALIGN(4) G.2
# _sym1_end H.2
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
croutine (noflash_text);
text->iram0_text ALIGN(4, pre, post) SURROUND(sym1) #1
timers (default);
text->flash_text KEEP() SORT(name) #2
timers (default);
rodata->flash_rodata SURROUND(sym2) ALIGN(4, pre, post) #3
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
flash_rodata = expected['flash_rodata']
# Exclusions in flash_text for timers and croutine A
flash_text[0].exclusions.add(CROUTINE)
flash_text[0].exclusions.add(TIMERS)
# Command for #3 B
flash_text.append(InputSectionDesc(TIMERS, flash_text[0].sections, [], keep=True, sort=('name', None)))
# Exclusions in flash_rodata for timers C
flash_rodata[0].exclusions.add(TIMERS)
# Commands for #3 D.1, E.1, F, D.2, E.2
flash_rodata.append(SymbolAtAddress('_sym2_start'))
flash_rodata.append(AlignAtAddress(4))
flash_rodata.append(InputSectionDesc(TIMERS, flash_rodata[0].sections, []))
flash_rodata.append(SymbolAtAddress('_sym2_end'))
flash_rodata.append(AlignAtAddress(4))
# Commands for # G.1, H.1, I, G.2, H.2
iram0_text.append(AlignAtAddress(4))
iram0_text.append(SymbolAtAddress('_sym1_start'))
iram0_text.append(InputSectionDesc(CROUTINE, flash_text[0].sections, []))
iram0_text.append(AlignAtAddress(4))
iram0_text.append(SymbolAtAddress('_sym1_end'))
self.compare_rules(expected, actual)
def test_flags_intermediate_exclusion_command_root(self):
# Test that intermediate exclusion commands from root-level commands
# are included in the flags.
#
# flash_text
# _sym1_start A.1
# KEEP(* (EXCLUDE_FILE(libfreertos:croutine).text ...) B
# KEEP(libfreertos.a:croutine(...))) C
# _sym1_end A.2
#
# iram0_text
# *(.iram .iram.*)
# libfreertos.a:croutine(.text.prvCheckPendingReadyList ...) D
mapping = u"""
[mapping:default]
archive: *
entries:
# 1
* (default);
text->flash_text SURROUND(sym1) KEEP() #2
[mapping:test]
archive: libfreertos.a
entries:
croutine:prvCheckPendingReadyList (noflash_text) #3
"""
self.generation.mappings = {}
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
# Command for #2, pre A.1
flash_text.insert(0, SymbolAtAddress('_sym1_start'))
# Command for #1 with KEEP() B
# and exclusion for #3
flash_text[1].keep = True
flash_text[1].exclusions.add(CROUTINE)
# Implicit exclusion command for #3 C
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
flash_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), [], keep=True))
# Command for #2, post A.2
flash_text.append(SymbolAtAddress('_sym1_end'))
# Command for #3 D
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_flags_intermediate_exclusion_command_lib(self):
# Test that intermediate exclusion commands from lib-level commands
# are included in the flags.
#
# flash_text
# *(EXCLUDE_FILE(libfreertos.a).text ...)
# _sym1_start A.1
# KEEP(libfreertos.a(EXCLUDE_FILE(libfreertos:croutine).text.* ...)) B
# KEEP(libfreertos.a:croutine(...))) C
# _sym1_end A.2
#
# iram0_text
# *(.iram .iram.*)
# libfreertos.a:croutine(.text.prvCheckPendingReadyList ...) D
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
# 1
* (default);
text->flash_text SURROUND(sym1) KEEP() #2
croutine:prvCheckPendingReadyList (noflash_text) #3
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
# Command for #2, pre A.1
flash_text.append(SymbolAtAddress('_sym1_start'))
flash_text[0].exclusions.add(FREERTOS)
# Command for #1 with KEEP() B
# and exclusion for #3
flash_text.append(InputSectionDesc(FREERTOS, flash_text[0].sections, [CROUTINE], keep=True))
# Implicit exclusion command for #3 C
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
flash_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), [], keep=True))
# Command for #2, post A.2
flash_text.append(SymbolAtAddress('_sym1_end'))
# Command for #3 C
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_flags_intermediate_exclusion_command_obj(self):
# Test that intermediate exclusion commands from obj-level commands
# are included in the flags.
#
# flash_text
# *(EXCLUDE_FILE(libfreertos.a).text ...)
# _sym1_start A.1
# KEEP(libfreertos.a:croutine(...))) B
# _sym1_end A.2
#
# iram0_text
# *(.iram .iram.*)
# libfreertos.a:croutine(.text.prvCheckPendingReadyList ...) C
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
# 1
croutine (default);
text->flash_text SURROUND(sym1) KEEP() #2
croutine:prvCheckPendingReadyList (noflash_text) #3
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
# Command for #2, pre A.1
flash_text.append(SymbolAtAddress('_sym1_start'))
flash_text[0].exclusions.add(CROUTINE)
# Implicit exclusion command for #3 B
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
flash_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), [], keep=True))
# Command for #2, post A.2
flash_text.append(SymbolAtAddress('_sym1_end'))
# Command for #3 C
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_flags_separate_exclusion_command_if_explicit_root(self):
# Explicit commands are separated from the parent's flags.
#
# flash_text
# _sym1_start A.1
# KEEP(* (EXCLUDE_FILE(libfreertos:croutine).text ...) B
# _sym1_end A.2
# KEEP(libfreertos.a:croutine(...))) C
#
# iram0_text
# *(.iram .iram.*)
# libfreertos.a:croutine(.text.prvCheckPendingReadyList ...) D
mapping = u"""
[mapping:default]
archive: *
entries:
# 1
* (default);
text->flash_text SURROUND(sym1) KEEP() #2
[mapping:test]
archive: libfreertos.a
entries:
croutine (default) #3
croutine:prvCheckPendingReadyList (noflash_text) #4
"""
self.generation.mappings = {}
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
# Command for #2, pre A.1
flash_text.insert(0, SymbolAtAddress('_sym1_start'))
# Command for #1 with KEEP() B
# and exclusion for #3
flash_text[1].keep = True
flash_text[1].exclusions.add(CROUTINE)
# Command for #2, post A.2
flash_text.append(SymbolAtAddress('_sym1_end'))
# Command for #3 C
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
flash_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), []))
# Command for #4 D
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_flags_separate_exclusion_command_if_explicit_lib(self):
# Explicit commands are separated from the parent's flags.
#
# flash_text
# *(EXCLUDE_FILE(libfreertos.a).text ...)
# _sym1_start A.1
# KEEP(libfreertos.a(EXCLUDE_FILE(libfreertos:croutine).text.* ...)) B
# _sym1_end A.2
# KEEP(libfreertos.a:croutine(...))) C
#
# iram0_text
# *(.iram .iram.*)
# libfreertos.a:croutine(.text.prvCheckPendingReadyList ...) D
mapping = u"""
[mapping:test]
archive: libfreertos.a
entries:
# 1
* (default);
text->flash_text SURROUND(sym1) KEEP()
croutine (default) #2
croutine:prvCheckPendingReadyList (noflash_text) #3
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
iram0_text = expected['iram0_text']
# Command for #2, pre A.1
flash_text.append(SymbolAtAddress('_sym1_start'))
flash_text[0].exclusions.add(FREERTOS)
# Command for #1 with KEEP() B
# and exclusion for #3
flash_text.append(InputSectionDesc(FREERTOS, flash_text[0].sections, [CROUTINE], keep=True))
# Command for #2, post A.2
flash_text.append(SymbolAtAddress('_sym1_end'))
# Implicit exclusion command for #3 C
croutine_sections = self.entities.get_sections('libfreertos.a', 'croutine')
filtered_sections = fnmatch.filter(croutine_sections, '.literal.*')
filtered_sections.extend(fnmatch.filter(croutine_sections, '.text.*'))
filtered_sections = [s for s in filtered_sections if not s.endswith('prvCheckPendingReadyList')]
filtered_sections.append('.text')
flash_text.append(InputSectionDesc(CROUTINE, set(filtered_sections), []))
# Command for #3 C
iram0_text.append(InputSectionDesc(CROUTINE, set(['.text.prvCheckPendingReadyList', '.literal.prvCheckPendingReadyList']), []))
self.compare_rules(expected, actual)
def test_flag_additions(self):
# Test ability to add flags as long as no other mapping fragments
# does the same thing.
mapping = u"""
[mapping:default_add_flag]
archive: *
entries:
* (default);
text->flash_text KEEP()
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_text[0].keep = True
self.compare_rules(expected, actual)
def test_flags_flag_additions_duplicate(self):
# Test same flags added to same entity - these
# are ignored.
mapping = u"""
[mapping:default_add_flag_1]
archive: *
entries:
* (default);
text->flash_text KEEP()
[mapping:default_add_flag_2]
archive: *
entries:
* (default);
text->flash_text KEEP()
"""
self.add_fragments(mapping)
actual = self.generation.generate(self.entities)
expected = self.generate_default_rules()
flash_text = expected['flash_text']
flash_text[0].keep = True
self.compare_rules(expected, actual)
def test_flags_flag_additions_conflict(self):
# Test condition where multiple fragments specifies flags
# to same entity - should generate exception.
mapping = u"""
[mapping:default_add_flag_1]
archive: *
entries:
* (default);
text->flash_text ALIGN(2)
[mapping:default_add_flag_2]
archive: *
entries:
* (default);
text->flash_text SURROUND(sym1)
"""
self.add_fragments(mapping)
with self.assertRaises(GenerationException):
self.generation.generate(self.entities)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
"""Renderers that render RDFValues into JSON compatible data structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import base64
import inspect
import logging
import numbers
from future.builtins import str
from future.utils import iteritems
from future.utils import itervalues
from future.utils import with_metaclass
from typing import Text
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto.api import reflection_pb2
from grr_response_server.gui.api_plugins import output_plugin as api_output_plugin
from grr_response_server.gui.api_plugins import stats as api_stats
class Error(Exception):
pass
class DefaultValueError(Error):
pass
class ApiRDFAllowedEnumValueDescriptor(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiRDFAllowedEnumValueDescriptor
class ApiRDFValueFieldDescriptor(rdf_structs.RDFProtoStruct):
"""Descriptor for an RDFValue Field."""
protobuf = reflection_pb2.ApiRDFValueFieldDescriptor
rdf_deps = [
ApiRDFAllowedEnumValueDescriptor,
]
def GetDefaultValueClass(self):
if self.type == "bool":
return bool
else:
return rdfvalue.RDFValue.classes.get(self.type)
class ApiRDFValueDescriptor(rdf_structs.RDFProtoStruct):
protobuf = reflection_pb2.ApiRDFValueDescriptor
rdf_deps = [
ApiRDFValueFieldDescriptor,
]
def GetDefaultValueClass(self):
return rdfvalue.RDFValue.classes.get(self.name)
def StripTypeInfo(rendered_data):
"""Strips type information from rendered data. Useful for debugging."""
if isinstance(rendered_data, (list, tuple)):
return [StripTypeInfo(d) for d in rendered_data]
elif isinstance(rendered_data, dict):
if "value" in rendered_data and "type" in rendered_data:
return StripTypeInfo(rendered_data["value"])
else:
result = {}
for k, v in iteritems(rendered_data):
result[k] = StripTypeInfo(v)
return result
else:
return rendered_data
class ApiValueRenderer(with_metaclass(registry.MetaclassRegistry, object)):
"""Baseclass for API renderers that render RDFValues."""
value_class = object
_type_list_cache = {}
_renderers_cache = {}
@classmethod
def GetRendererForValueOrClass(cls, value, limit_lists=-1):
"""Returns renderer corresponding to a given value and rendering args."""
if inspect.isclass(value):
value_cls = value
else:
value_cls = value.__class__
cache_key = "%s_%d" % (value_cls.__name__, limit_lists)
try:
renderer_cls = cls._renderers_cache[cache_key]
except KeyError:
candidates = []
for candidate in itervalues(ApiValueRenderer.classes):
if candidate.value_class:
candidate_class = candidate.value_class
else:
continue
if inspect.isclass(value):
if issubclass(value_cls, candidate_class):
candidates.append((candidate, candidate_class))
else:
if isinstance(value, candidate_class):
candidates.append((candidate, candidate_class))
if not candidates:
raise RuntimeError(
"No renderer found for value %s." % value.__class__.__name__)
candidates = sorted(
candidates, key=lambda candidate: len(candidate[1].mro()))
renderer_cls = candidates[-1][0]
cls._renderers_cache[cache_key] = renderer_cls
return renderer_cls(limit_lists=limit_lists)
def __init__(self, limit_lists=-1):
super(ApiValueRenderer, self).__init__()
self.limit_lists = limit_lists
def _PassThrough(self, value):
renderer = ApiValueRenderer.GetRendererForValueOrClass(
value, limit_lists=self.limit_lists)
return renderer.RenderValue(value)
def _IncludeTypeInfo(self, result, original_value):
return dict(type=original_value.__class__.__name__, value=result)
def RenderValue(self, value):
"""Renders given value into plain old python objects."""
return self._IncludeTypeInfo(str(value), value)
def BuildDefaultValue(self, value_cls):
"""Renders default value of a given class.
Args:
value_cls: Default value of this class will be rendered. This class has to
be (or to be a subclass of) a self.value_class (i.e. a class that this
renderer is capable of rendering).
Returns:
An initialized default value.
Raises:
DefaultValueError: if something goes wrong.
"""
try:
return value_cls()
except Exception as e: # pylint: disable=broad-except
logging.exception(e)
raise DefaultValueError(
"Can't create default for value %s: %s" % (value_cls.__name__, e))
def BuildTypeDescriptor(self, value_cls):
"""Renders metadata of a given value class.
Args:
value_cls: Metadata of this class will be rendered. This class has to be
(or to be a subclass of) a self.value_class (i.e. a class that this
renderer is capable of rendering).
Returns:
Dictionary with class metadata.
"""
result = ApiRDFValueDescriptor(
name=value_cls.__name__,
parents=[klass.__name__ for klass in value_cls.__mro__],
doc=value_cls.__doc__ or "",
kind="PRIMITIVE")
result.default = self.BuildDefaultValue(value_cls)
return result
class ApiNumberRenderer(ApiValueRenderer):
"""Renderer for numbers."""
value_class = numbers.Number
def RenderValue(self, value):
if isinstance(value, numbers.Integral):
return dict(type="long", value=value)
else:
return dict(type="float", value=value)
class ApiStringRenderer(ApiValueRenderer):
"""Renderer for strings."""
value_class = Text
def RenderValue(self, value):
return dict(type="unicode", value=value)
class ApiEnumNamedValueRenderer(ApiValueRenderer):
"""Renderer for new-style enums."""
value_class = rdf_structs.EnumNamedValue
def RenderValue(self, value):
return self._IncludeTypeInfo(value.name, value)
class ApiDictRenderer(ApiValueRenderer):
"""Renderer for dicts."""
value_class = dict
def RenderValue(self, value):
result = {}
for k, v in iteritems(value):
result[str(k)] = self._PassThrough(v)
return self._IncludeTypeInfo(result, value)
class ApiRDFDictRenderer(ApiDictRenderer):
"""Renderer for RDF Dict instances."""
value_class = rdf_protodict.Dict
class FetchMoreLink(rdfvalue.RDFValue):
"""Stub used to display 'More data available...' link."""
@classmethod
def FromSerializedBytes(cls, value):
del value # Unused.
return cls()
def SerializeToBytes(self):
return b""
class ApiListRenderer(ApiValueRenderer):
"""Renderer for lists."""
value_class = list
def RenderValue(self, value):
if self.limit_lists == 0:
return "<lists are omitted>"
elif self.limit_lists == -1:
return [self._PassThrough(v) for v in value]
else:
result = [self._PassThrough(v) for v in list(value)[:self.limit_lists]]
if len(value) > self.limit_lists:
result.append(
dict(type=FetchMoreLink.__name__, url="to/be/implemented"))
return result
class ApiTupleRenderer(ApiListRenderer):
"""Renderer for tuples."""
value_class = tuple
class ApiSetRenderer(ApiListRenderer):
"""Renderer for sets."""
value_class = set
class ApiRepeatedFieldHelperRenderer(ApiListRenderer):
"""Renderer for repeated fields helpers."""
value_class = rdf_structs.RepeatedFieldHelper
class ApiRDFValueArrayRenderer(ApiListRenderer):
"""Renderer for RDFValueArray."""
value_class = rdf_protodict.RDFValueArray
class ApiBoolRenderer(ApiValueRenderer):
"""Renderer for bool."""
value_class = bool
def RenderValue(self, value):
return dict(type="bool", value=bool(value))
class ApiBytesRenderer(ApiValueRenderer):
"""Renderer for bytes."""
# ApiStringRenderer renders unicode objects. We assume that
# non-unicode strings are effectively bytes and render them
# as base64-encoded values.
value_class = bytes
def RenderValue(self, value):
result = base64.b64encode(value).decode("ascii")
return dict(type="bytes", value=result)
class ApiRDFBytesRenderer(ApiValueRenderer):
"""Renderer for RDFBytes."""
value_class = rdfvalue.RDFBytes
def RenderValue(self, value):
result = base64.b64encode(value.SerializeToBytes()).decode("ascii")
return self._IncludeTypeInfo(result, value)
class ApiRDFZippedBytesRenderer(ApiValueRenderer):
"""Renderer for RDFZippedBytes."""
value_class = rdfvalue.RDFZippedBytes
def RenderValue(self, value):
result = str(base64.b64encode(value.Uncompress()))
return self._IncludeTypeInfo(result, value)
class ApiRDFStringRenderer(ApiValueRenderer):
"""Renderer for RDFString."""
value_class = rdfvalue.RDFString
def RenderValue(self, value):
result = str(value)
return self._IncludeTypeInfo(result, value)
class ApiRDFIntegerRenderer(ApiValueRenderer):
"""Renderer for RDFInteger."""
value_class = rdfvalue.RDFInteger
def RenderValue(self, value):
result = int(value)
return self._IncludeTypeInfo(result, value)
class ApiDurationRenderer(ApiValueRenderer):
"""Renderer for Duration."""
value_class = rdfvalue.Duration
def RenderValue(self, value):
if isinstance(value, rdfvalue.DurationSeconds):
raw = value.ToInt(rdfvalue.SECONDS)
else:
raw = value.microseconds
return self._IncludeTypeInfo(raw, value)
class ApiDateTimeRenderer(ApiValueRenderer):
"""Renderer for RDFDateTime."""
value_class = rdfvalue.RDFDatetime
def RenderValue(self, value):
return self._IncludeTypeInfo(int(value), value)
class ApiDataBlobRenderer(ApiValueRenderer):
"""Renderer for DataBlob."""
value_class = rdf_protodict.DataBlob
def RenderValue(self, value):
return self._PassThrough(value.GetValue())
class ApiStatsStoreMetricDataPointRenderer(ApiValueRenderer):
"""Renderer for ApiStatsStoreMetricDataPoint."""
value_class = api_stats.ApiStatsStoreMetricDataPoint
def RenderValue(self, value):
if value.timestamp:
timestamp = value.timestamp.AsMicrosecondsSinceEpoch() / 1000.0
else:
timestamp = 0
return [timestamp, value.value]
class SampleFloatRenderer(ApiValueRenderer):
"""Renderer for SampleFloat."""
value_class = rdf_stats.SampleFloat
def RenderValue(self, value):
return dict(x_value=value.x_value, y_value=value.y_value)
class ApiOutputPluginDescriptorRenderer(ApiValueRenderer):
"""Renderer for ApiOutputPlugingDescriptor."""
value_class = api_output_plugin.ApiOutputPluginDescriptor
def RenderValue(self, value):
return StripTypeInfo(ApiRDFProtoStructRenderer().RenderValue(value))
class ApiRDFValueDescriptorRenderer(ApiValueRenderer):
"""Renderer for ApiRDFValueDescriptor."""
value_class = ApiRDFValueDescriptor
def RenderValue(self, value):
result = dict(
name=value.name,
mro=list(value.parents),
doc=value.doc,
kind=value.kind.name.lower())
if value.fields:
result["fields"] = []
for field in value.fields:
rendered_field = StripTypeInfo(self._PassThrough(field))
if field.HasField("default"):
rendered_field["default"] = self._PassThrough(field.default)
result["fields"].append(rendered_field)
if value.HasField("default"):
result["default"] = self._PassThrough(value.default)
if value.HasField("union_field_name"):
result["union_field"] = value.union_field_name
return result
class ApiEmbeddedRDFValueRenderer(ApiValueRenderer):
"""Renderer for EmbeddedRDFValue."""
value_class = rdf_protodict.EmbeddedRDFValue
def RenderValue(self, value):
return self._PassThrough(value.payload)
class ApiRDFProtoStructRenderer(ApiValueRenderer):
"""Renderer for RDFProtoStructs."""
value_class = rdf_structs.RDFProtoStruct
value_processors = []
descriptor_processors = []
def RenderValue(self, value):
result = value.AsDict()
for k, v in iteritems(result):
result[k] = self._PassThrough(v)
for processor in self.value_processors:
result = processor(self, result, value)
result = self._IncludeTypeInfo(result, value)
return result
def BuildTypeDescriptor(self, value_cls):
result = ApiRDFValueDescriptor(
name=value_cls.__name__,
parents=[klass.__name__ for klass in value_cls.__mro__],
doc=value_cls.__doc__ or "",
kind="STRUCT")
for field_desc in value_cls.type_infos:
repeated = isinstance(field_desc, rdf_structs.ProtoList)
if hasattr(field_desc, "delegate"):
field_desc = field_desc.delegate
field = ApiRDFValueFieldDescriptor(
name=field_desc.name,
index=field_desc.field_number,
repeated=repeated,
dynamic=isinstance(field_desc, rdf_structs.ProtoDynamicEmbedded))
field_type = field_desc.type
if field_type is not None:
field.type = field_type.__name__
if getattr(field_type, "context_help_url", None) is not None:
# Class attribute context_help_url masks similarly named protobuf
# attribute. Using the Set method to set the right attribute.
field.Set("context_help_url", field_type.context_help_url)
if field_type == rdf_structs.EnumNamedValue:
for enum_label in sorted(field_desc.enum, key=field_desc.enum.get):
enum_value = field_desc.enum[enum_label]
labels = [
rdf_structs.SemanticDescriptor.Labels.reverse_enum[x]
for x in enum_value.labels or []
]
field.allowed_values.append(
ApiRDFAllowedEnumValueDescriptor(
name=enum_label,
value=int(enum_value),
labels=labels,
doc=enum_value.description))
if (field_desc.default is not None and
not issubclass(field_type, rdf_structs.RDFStruct) and
hasattr(field_desc, "GetDefault")):
default_val = field_desc.GetDefault()
field.default = field.GetDefaultValueClass()(default_val)
if field_desc.description:
field.doc = field_desc.description
if field_desc.friendly_name:
field.friendly_name = field_desc.friendly_name
if field_desc.labels:
field.labels = [
rdf_structs.SemanticDescriptor.Labels.reverse_enum[x]
for x in field_desc.labels
]
result.fields.append(field)
for processor in self.descriptor_processors:
result.fields = processor(self, result.fields)
if getattr(value_cls, "union_field", None):
result.union_field_name = value_cls.union_field
try:
result.default = value_cls()
except Exception as e: # pylint: disable=broad-except
# TODO(user): Some RDFStruct classes can't be constructed using
# default constructor (without arguments). Fix the code so that
# we can either construct all the RDFStruct classes with default
# constructors or know exactly which classes can't be constructed
# with default constructors.
logging.debug("Can't create default for struct %s: %s",
field_type.__name__, e)
return result
class ApiGrrMessageRenderer(ApiRDFProtoStructRenderer):
"""Renderer for GrrMessage objects."""
value_class = rdf_flows.GrrMessage
def RenderPayload(self, result, value):
"""Renders GrrMessage payload and renames args_rdf_name field."""
if "args_rdf_name" in result:
result["payload_type"] = result["args_rdf_name"]
del result["args_rdf_name"]
if "args" in result:
result["payload"] = self._PassThrough(value.payload)
del result["args"]
return result
def AdjustDescriptor(self, fields):
"""Payload-aware metadata processor."""
for f in fields:
if f.name == "args_rdf_name":
f.name = "payload_type"
if f.name == "args":
f.name = "payload"
return fields
value_processors = [RenderPayload]
descriptor_processors = [AdjustDescriptor]
def RenderValue(value, limit_lists=-1):
"""Render given RDFValue as plain old python objects."""
if value is None:
return None
renderer = ApiValueRenderer.GetRendererForValueOrClass(
value, limit_lists=limit_lists)
return renderer.RenderValue(value)
def BuildTypeDescriptor(value_cls):
renderer = ApiValueRenderer.GetRendererForValueOrClass(value_cls)
return renderer.BuildTypeDescriptor(value_cls)
|
|
#!/usr/bin/env python
from __future__ import absolute_import
import locale
import logging
import os
import optparse
import warnings
import sys
import re
# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks,
# but if invoked (i.e. imported), it will issue a warning to stderr if socks
# isn't available. requests unconditionally imports urllib3's socks contrib
# module, triggering this warning. The warning breaks DEP-8 tests (because of
# the stderr output) and is just plain annoying in normal usage. I don't want
# to add socks as yet another dependency for pip, nor do I want to allow-stder
# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to
# be done before the import of pip.vcs.
from pip._vendor.requests.packages.urllib3.exceptions import DependencyWarning
warnings.filterwarnings("ignore", category=DependencyWarning) # noqa
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation, dist_is_editable
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "9.1.0.dev0"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWarning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash, zsh or fish).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip.utils.encoding.auto_decode
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
# setlocale can apparently crash if locale are uninitialized
logger.debug("Ignoring error %s when setting locale", e)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
sys.exit(main())
|
|
#-*-coding: utf-8 -*-
'''
Given a dataset of movies and their ratings by different
users, how can we compute the similarity between pairs of
movies?
This module computes similarities between movies
by representing each movie as a vector of ratings and
computing similarity scores over these vectors.
Copied from:
https://github.com/marcelcaraciolo/recsys-mapreduce-mrjob/blob/master/moviesSimilarities.py
'''
__author__ = 'Marcel Caraciolo <caraciol@gmail.com>'
from mrjob.job import MRJob
#from metrics import correlation
from numpy import correlate as correlation
#from metrics import cosine, regularized_correlation
#from numpy import cos as cosine
from math import sqrt
try:
from itertools import combinations
except ImportError:
from metrics import combinations
PRIOR_COUNT = 10
PRIOR_CORRELATION = 0
def correlation(size, dot_product, rating_sum, \
rating2sum, rating_norm_squared, rating2_norm_squared):
numberator = size* rating2sum -
def cosine(dot_product, rating_norm_squared, rating2_norm_squared):
'''
The cosine between two vectors A, B
dotProduct(A, B) / (norm(A) * norm(B))
'''
numerator = dot_product
denominator = rating_norm_squared * rating2_norm_squared
return (numerator / (float(denominator))) if denominator else 0.0
def regularized_correlation(size, dot_product, rating_sum, \
rating2sum, rating_norm_squared, rating2_norm_squared,
virtual_cont, prior_correlation):
'''
The Regularized Correlation between two vectors A, B
RegularizedCorrelation = w * ActualCorrelation + (1 - w) * PriorCorrelation
where w = # actualPairs / (# actualPairs + # virtualPairs).
'''
unregularizedCorrelation = correlation(size, dot_product, rating_sum, \
rating2sum, rating_norm_squared, rating2_norm_squared)
w = size / float(size + virtual_cont)
return w * unregularizedCorrelation + (1.0 - w) * prior_correlation
def regularized_correlation(size, dot_product, rating_sum, \
rating2sum, rating_norm_squared, rating2_norm_squared,
virtual_cont, prior_correlation):
'''
The Regularized Correlation between two vectors A, B
RegularizedCorrelation = w * ActualCorrelation + (1 - w) * PriorCorrelation
where w = # actualPairs / (# actualPairs + # virtualPairs).
'''
unregularizedCorrelation = correlation(size, dot_product, rating_sum, \
rating2sum, rating_norm_squared, rating2_norm_squared)
w = size / float(size + virtual_cont)
return w * unregularizedCorrelation + (1.0 - w)
def jaccard(users_in_common, total_users1, total_users2):
'''
The Jaccard Similarity between 2 two vectors
|Intersection(A, B)| / |Union(A, B)|
'''
union = total_users1 + total_users2 - users_in_common
return (users_in_common / (float(union))) if union else 0.0
class SemicolonValueProtocol(object):
# don't need to implement read() since we aren't using it
def write(self, key, values):
return ';'.join(str(v) for v in values)
class MoviesSimilarities(MRJob):
OUTPUT_PROTOCOL = SemicolonValueProtocol
def steps(self):
return [
self.mr(mapper=self.group_by_user_rating,
reducer=self.count_ratings_users_freq),
self.mr(mapper=self.pairwise_items,
reducer=self.calculate_similarity),
self.mr(mapper=self.calculate_ranking,
reducer=self.top_similar_items)]
def group_by_user_rating(self, key, line):
"""
Emit the user_id and group by their ratings (item and rating)
17 70,3
35 21,1
49 19,2
49 21,1
49 70,4
87 19,1
87 21,2
98 19,2
"""
user_id, item_id, rating, timestamp = line.split('\t')
#yield (item_id, int(rating)), user_id
#yield item_id, (user_id, int(rating))
yield user_id, (item_id, float(rating))
#yield (user_id, item_id), int(rating)
def count_ratings_users_freq(self, user_id, values):
"""
For each user, emit a row containing their "postings"
(item,rating pairs)
Also emit user rating sum and count for use later steps.
17 1,3,(70,3)
35 1,1,(21,1)
49 3,7,(19,2 21,1 70,4)
87 2,3,(19,1 21,2)
98 1,2,(19,2)
"""
item_count = 0
item_sum = 0
final = []
for item_id, rating in values:
item_count += 1
item_sum += rating
final.append((item_id, rating))
yield user_id, (item_count, item_sum, final)
def pairwise_items(self, user_id, values):
'''
The output drops the user from the key entirely, instead it emits
the pair of items as the key:
19,21 2,1
19,70 2,4
21,70 1,4
19,21 1,2
This mapper is the main performance bottleneck. One improvement
would be to create a java Combiner to aggregate the
outputs by key before writing to hdfs, another would be to use
a vector format and SequenceFiles instead of streaming text
for the matrix data.
'''
item_count, item_sum, ratings = values
#print item_count, item_sum, [r for r in combinations(ratings, 2)]
#bottleneck at combinations
for item1, item2 in combinations(ratings, 2):
yield (item1[0], item2[0]), \
(item1[1], item2[1])
def calculate_similarity(self, pair_key, lines):
'''
Sum components of each corating pair across all users who rated both
item x and item y, then calculate pairwise pearson similarity and
corating counts. The similarities are normalized to the [0,1] scale
because we do a numerical sort.
19,21 0.4,2
21,19 0.4,2
19,70 0.6,1
70,19 0.6,1
21,70 0.1,1
70,21 0.1,1
'''
sum_xx, sum_xy, sum_yy, sum_x, sum_y, n = (0.0, 0.0, 0.0, 0.0, 0.0, 0)
item_pair, co_ratings = pair_key, lines
item_xname, item_yname = item_pair
items_x = []
items_y = []
for item_x, item_y in lines:
sum_xx += item_x * item_x
sum_yy += item_y * item_y
sum_xy += item_x * item_y
sum_y += item_y
sum_x += item_x
n += 1
# items_x.append(item_x)
# items_y.append(item_y)
corr_sim = correlation(n, sum_xy, sum_x, \
sum_y, sum_xx, sum_yy)
#corr_sim = correlation(items_x, items_y)
reg_corr_sim = regularized_correlation(n, sum_xy, sum_x, \
sum_y, sum_xx, sum_yy, PRIOR_COUNT, PRIOR_CORRELATION)
cos_sim = cosine(sum_xy, sqrt(sum_xx), sqrt(sum_yy))
jaccard_sim = 0.0
yield (item_xname, item_yname), (corr_sim, \
cos_sim, reg_corr_sim, jaccard_sim, n)
def calculate_ranking(self, item_keys, values):
'''
Emit items with similarity in key for ranking:
19,0.4 70,1
19,0.6 21,2
21,0.6 19,2
21,0.9 70,1
70,0.4 19,1
70,0.9 21,1
'''
corr_sim, cos_sim, reg_corr_sim, jaccard_sim, n = values
item_x, item_y = item_keys
if int(n) > 0:
yield (item_x, corr_sim, cos_sim, reg_corr_sim, jaccard_sim), \
(item_y, n)
def top_similar_items(self, key_sim, similar_ns):
'''
For each item emit K closest items in comma separated file:
De La Soul;A Tribe Called Quest;0.6;1
De La Soul;2Pac;0.4;2
'''
item_x, corr_sim, cos_sim, reg_corr_sim, jaccard_sim = key_sim
for item_y, n in similar_ns:
yield None, (item_x, item_y, corr_sim, cos_sim, reg_corr_sim,
jaccard_sim, n)
if __name__ == '__main__':
MoviesSimilarities.run()
|
|
import os
from sys import argv
from json import load, dump, JSONEncoder
from shutil import disk_usage, copy2, copytree, rmtree
j = os.path.join
# Logging file name
LOGFILE = "backupbuffet.json"
# Source & Destination directories
SRC = os.path.abspath(argv[1])
DEST = os.path.abspath(argv[2])
# Max free space to leave in bytes
# 1gb
MAX_FREE = 1024 ** 3
# Drive ID
DRIVE = 0
# Simulation mode stuff
if "--sim" in argv:
voider = lambda x: True
voider_2arg = lambda x, y: True
os.remove = voider
copy2 = voider_2arg
rmtree = voider
copytree = voider_2arg
class File(object):
def __init__(self, size, mtime, action=0, drive=-1):
self.size = size
self.mtime = mtime
self.action = action
self.drive = drive
class Tree(object):
def __init__(self, files, folders, size=0, action=0):
self.files = files
self.folders = folders
if size:
self.size = size
else:
self.calc_size()
# Folders
# Actions: 0 = None
# 1 = Recusively back up (when dest files & folders equal source)
# 2 = Delete
# Note: Action 0 still means the folder should be iterated for sub changes
self.action = action
# Files
# Actions: 0 = None
# 1 = Back up
# 2 = Delete
# 3 = Modify
# Files = {name: File(size, mtime, action, drive)}
# Folders = {name: Tree(files, folders, size, action)}
def calc_size(self):
self.size = sum([f.size for f in self.files.values()] + [f.size for f in self.folders.values()])
# Encodes the trees and files in a JSON encodable way
class customJSONEncoder(JSONEncoder):
def default(self, obj):
return obj.__dict__ if isinstance(obj, Tree) or isinstance(obj, File) else JSONEncoder.default(self, obj)
# Decodes trees and files from JSON objects (dictionaries)
def customJSONDecoder(obj):
if "files" in obj and "folders" in obj:
return Tree(obj["files"], obj["folders"], obj["size"], obj["action"])
if "mtime" in obj and "drive" in obj:
return File(obj["size"], obj["mtime"], obj["action"], obj["drive"])
return obj
# Source directory tree builder
# Gets the size and mtime in one run
def build_fs_tree(path):
# Absolute(-ish) path
abs_path = j(SRC, path)
# Listdir returns relative file names
contents = os.listdir(abs_path)
dirs = [d for d in contents if os.path.isdir(j(abs_path, d))]
files = {f: File(os.path.getsize(j(abs_path, f)), os.path.getmtime(j(abs_path, f))) for f in list(set(contents) - set(dirs))}
folders = {d: build_fs_tree(j(path, d)) for d in dirs}
return Tree(files, folders)
# Sets backup dir of sub files to this drive
def recurse_action(tree, set_drive=True, action=1):
for file in tree.files.values():
file.action = action
file.drive = DRIVE if set_drive else file.drive
for folder in tree.folders.values():
recurse_action(folder, set_drive, action)
# Get a list of files to back up (Add/Modify/Delete)
def get_files(main_tree, backup_tree, free_space):
# If none of the folder is backed up, back up the whole thing
if (not backup_tree.size) and free_space - main_tree.size > 0:
main_tree.action = 1
# Assign drive letter to all sub files
recurse_action(main_tree)
return (main_tree.size, main_tree)
orig_free_space = free_space
# Deletions
for fname in backup_tree.files.keys():
file = backup_tree.files[fname]
if fname not in main_tree.files and file.drive == DRIVE:
file.action = 2
free_space += file.size
# There's no way to tell if the folder is on this drive or not
# So we check on the fs before we delete in perform_fs_tasks
for fname in backup_tree.folders.keys():
folder = backup_tree.folders[fname]
if fname not in main_tree.folders:
folder.action = 2
recurse_action(folder, False, 2)
free_space += folder.size
# Files
for fname in sorted(main_tree.files.keys()):
if fname == LOGFILE or "backupbuffet.nextid" in fname:
continue
file = main_tree.files[fname]
# Addition
# It's either not in the backup or the drive ID is -1
if fname not in backup_tree.files or backup_tree.files[fname].drive == -1:
if free_space - file.size > 0:
file.action = 1
file.drive = DRIVE
backup_tree.files[fname] = file
free_space -= file.size
# Modification
elif file.mtime != backup_tree.files[fname].mtime and backup_tree.files[fname].drive == DRIVE:
file.action = 3
file.drive = DRIVE
backup_tree.files[fname] = file
free_space -= file.size - backup_tree.files[fname].size
# Folders
for fname in sorted(main_tree.folders.keys()):
folder = main_tree.folders[fname]
# Stop if there's no more free space
if free_space < MAX_FREE:
break
# Addition
backup_subtree = backup_tree.folders.setdefault(fname, Tree({}, {}))
size_diff, backup_subtree = get_files(folder, backup_subtree, free_space)
# Recalculate size of backup subtree
backup_subtree.calc_size()
if size_diff:
backup_tree.folders[fname] = backup_subtree
free_space -= size_diff
# Recalculate size of backup tree
backup_tree.calc_size()
return (orig_free_space - free_space, backup_tree)
def perform_fs_tasks(backup_tree, src_path=SRC, dest_path=DEST):
empty_folder = False
if not os.path.exists(dest_path):
print("Create folder " + dest_path)
os.mkdir(dest_path)
empty_folder = True
# Files
for fname in sorted(backup_tree.files.keys()):
file = backup_tree.files[fname]
if file.action >= 2:
print("Delete " + j(dest_path, fname))
os.remove(j(dest_path, fname))
if (file.action == 1 and file.drive == DRIVE) or file.action == 3:
print("Copy %s -> %s" % (j(src_path, fname), j(dest_path, fname)))
if not os.path.exists(j(dest_path, fname)):
copy2(j(src_path, fname), j(dest_path, fname))
empty_folder = False
if file.action == 2:
del backup_tree.files[fname]
else:
file.action = 0
# Folders
for fname in sorted(backup_tree.folders.keys()):
folder = backup_tree.folders[fname]
if folder.action == 2:
if os.path.exists(j(dest_path, fname)):
print("Delete " + j(dest_path, fname))
rmtree(j(dest_path, fname))
del backup_tree.folders[fname]
elif folder.action == 1:
print("Copy %s -> %s" % (j(src_path, fname), j(dest_path, fname)))
if "--continue" in argv:
perform_fs_tasks(folder, j(src_path, fname), j(dest_path, fname))
else:
copytree(j(src_path, fname), j(dest_path, fname))
folder.action = 0
recurse_action(folder, False, 0)
empty_folder = False
else:
perform_fs_tasks(folder, j(src_path, fname), j(dest_path, fname))
empty_folder = False
if empty_folder:
print("Delete folder " + dest_path)
os.rmdir(dest_path)
def get_summary(backup_tree):
add, mod, rm = 0, 0, 0
for fname in sorted(backup_tree.files.keys()):
file = backup_tree.files[fname]
if file.action != 0 and file.drive == DRIVE:
add += file.action == 1
mod += file.action == 3
rm += file.action == 2
for fname in sorted(backup_tree.folders.keys()):
folder = backup_tree.folders[fname]
add_branch, mod_branch, rm_branch = get_summary(folder)
add += add_branch
mod += mod_branch
rm += rm_branch
return (add, mod, rm)
def main():
global DRIVE
# Build a directory tree for the source
print("Building source directory tree")
src_tree = build_fs_tree(".")
# Get free space
# We have to figure out what the root directory is though
# Weird list comp is to avoid empty strings/paths
free_space = disk_usage(os.sep + [d for d in DEST.split(os.sep) if d][0]).free
# Use the destination if it exists (Because linux root path might not be on the destination drive)
if os.path.exists(DEST):
free_space = disk_usage(DEST).free
# Load the backup tree
backup_tree = Tree({}, {})
if os.path.exists(j(SRC, LOGFILE)):
print("Reading backup log")
# TODO sync the logs
with open(j(SRC, LOGFILE), "r") as handle: backup_tree = load(handle, object_hook=customJSONDecoder)
# Load drive IDs
next_id = 0
print("Loading Drive ID")
if os.path.exists(j(SRC, "backupbuffet.nextid")):
with open(j(SRC, "backupbuffet.nextid"), "r") as saved_id:
next_id = int(saved_id.read())
if os.path.exists(j(DEST, "backupbuffet.id")):
with open(j(DEST, "backupbuffet.id"), "r") as saved_id:
DRIVE = int(saved_id.read())
else:
DRIVE = next_id
next_id += 1
print("Drive ID is %d" % DRIVE)
# Get a list of files to back up, in a tree
size_diff = 0
if "--continue" not in argv:
print("Getting list of files to Add/Delete/Modify")
size_diff, backup_tree = get_files(src_tree, backup_tree, free_space)
add, mod, rm = get_summary(backup_tree)
print("About to backup %d bytes (~%.1f GB)" % (
size_diff,
size_diff / (1024 ** 3)
))
print("Added: %d\nModified: %d\nDeleted: %d" % (add, mod, rm))
print("Continue [y/n]?")
choice = input().lower()
while not choice or choice[0] not in ["y", "n"]:
print("Yes or no?")
choice = input().lower()
if choice != "y":
print("Cancelling")
return
print("Running backup!")
if not os.path.exists(DEST):
os.makedirs(DEST)
# Save before and after incase it crashes
with open(j(SRC, LOGFILE), "w") as output: dump(backup_tree, output, cls=customJSONEncoder)
with open(j(DEST, LOGFILE), "w") as output: dump(backup_tree, output, cls=customJSONEncoder)
with open(j(DEST, "backupbuffet.id"), "w") as saved_id: saved_id.write(str(DRIVE))
with open(j(SRC, "backupbuffet.nextid"), "w") as saved_id: saved_id.write(str(next_id))
perform_fs_tasks(backup_tree)
with open(j(SRC, LOGFILE), "w") as output: dump(backup_tree, output, cls=customJSONEncoder)
with open(j(DEST, LOGFILE), "w") as output: dump(backup_tree, output, cls=customJSONEncoder)
main()
|
|
from builtins import str, isinstance
import pytest
from tests import mock
import time
from pyglet.media.sources.procedural import Silence
try:
from pyglet.media.drivers import openal
import pyglet.media.drivers.openal.adaptation
except ImportError:
openal = None
pytestmark = pytest.mark.skipif(openal is None, reason='No OpenAL available.')
def almost_equal(f1, f2, eps=0.0001):
return abs(f1 - f2) < eps
def almost_equal_coords(c1, c2, eps=0.0001):
return all(almost_equal(f1, f2, eps) for f1, f2 in zip(c1, c2))
@pytest.fixture
def device():
device = openal.interface.OpenALDevice()
yield device
device.delete()
def test_device_create_delete(device):
assert device.is_ready
device.delete()
assert not device.is_ready
def test_device_version(device):
major, minor = device.get_version()
assert major > 0
assert minor > 0
def test_device_extensions(device):
extensions = device.get_extensions()
assert len(extensions) > 0
for ext in extensions:
assert isinstance(ext, str)
def test_context_create_delete(device):
context = device.create_context()
assert context is not None
context.delete()
@pytest.fixture
def context(device):
context = device.create_context()
yield context
context.delete()
def test_context_make_current(context):
context.make_current()
@pytest.fixture
def buffer_pool(context):
pool = openal.interface.OpenALBufferPool(context)
yield pool
pool.clear()
@pytest.fixture
def buf(buffer_pool):
buf = buffer_pool.get_buffer()
yield buf
buf.delete()
def test_buffer_create_delete(buf):
assert buf.is_valid
assert buf.al_buffer is not None
assert buf.name > 0
buf.delete()
assert not buf.is_valid
def test_buffer_data(buf):
assert buf.is_valid
audio_source = Silence(1.)
buf.data(audio_source.get_audio_data(audio_source.audio_format.bytes_per_second),
audio_source.audio_format)
assert buf.is_valid
buf.delete()
assert not buf.is_valid
def test_bufferpool_get_single_buffer(buffer_pool):
assert len(buffer_pool) == 0
buf = buffer_pool.get_buffer()
assert buf is not None
assert buf.is_valid
assert len(buffer_pool) == 0
def test_bufferpool_return_valid_buffer(buffer_pool):
buf = buffer_pool.get_buffer()
assert buf is not None
assert buf.is_valid
assert len(buffer_pool) == 0
buffer_pool.unqueue_buffer(buf)
assert len(buffer_pool) == 1
buf = buffer_pool.get_buffer()
assert buf is not None
assert buf.is_valid
assert len(buffer_pool) == 0
def test_bufferpool_get_multiple_buffers(buffer_pool):
bufs = buffer_pool.get_buffers(3)
assert bufs is not None
assert len(bufs) == 3
for buf in bufs:
assert buf.is_valid
assert len(buffer_pool) == 0
def test_bufferpool_return_multiple_valid_buffers(buffer_pool):
bufs = buffer_pool.get_buffers(3)
assert bufs is not None
assert len(bufs) == 3
for buf in bufs:
assert buf.is_valid
assert len(buffer_pool) == 0
return_count = 0
for buf in bufs:
buffer_pool.unqueue_buffer(buf)
return_count += 1
assert len(buffer_pool) == return_count
buf = buffer_pool.get_buffer()
assert buf is not None
assert buf.is_valid
assert len(buffer_pool) == 2
def test_bufferpool_return_invalid_buffer(buffer_pool):
buf = buffer_pool.get_buffer()
assert buf is not None
assert buf.is_valid
assert len(buffer_pool) == 0
buf.delete()
assert not buf.is_valid
buffer_pool.unqueue_buffer(buf)
assert len(buffer_pool) == 0
buf = buffer_pool.get_buffer()
assert buf is not None
assert buf.is_valid
assert len(buffer_pool) == 0
def test_bufferpool_invalidate_buffer_in_pool(buffer_pool):
buf = buffer_pool.get_buffer()
assert buf is not None
assert buf.is_valid
assert len(buffer_pool) == 0
buffer_pool.unqueue_buffer(buf)
assert len(buffer_pool) == 1
buf.delete()
assert not buf.is_valid
buf = buffer_pool.get_buffer()
assert buf is not None
assert buf.is_valid
assert len(buffer_pool) == 0
def test_source_create_delete(context):
source = context.create_source()
assert source.is_initial
assert not source.is_playing
assert not source.is_paused
assert not source.is_stopped
assert source.buffers_processed == 0
assert source.byte_offset == 0
source.delete()
@pytest.fixture
def source(context):
source = context.create_source()
yield source
source.delete()
@pytest.fixture
def filled_buffer(source):
buf = source.get_buffer()
assert buf.is_valid
audio_source = Silence(1.)
buf.data(audio_source.get_audio_data(audio_source.audio_format.bytes_per_second),
audio_source.audio_format)
yield buf
source.buffer_pool.unqueue_buffer(buf)
def test_source_queue_play_unqueue(context, filled_buffer):
source = context.create_source()
source.queue_buffer(filled_buffer)
assert source.is_initial
assert not source.is_playing
assert not source.is_paused
assert not source.is_stopped
assert source.buffers_processed == 0
assert source.buffers_queued == 1
assert source.byte_offset == 0
source.play()
assert not source.is_initial
assert source.is_playing
assert not source.is_paused
assert not source.is_stopped
assert source.byte_offset == 0
end_time = time.time() + 1.5
while time.time() < end_time:
if source.byte_offset > 0:
break
time.sleep(.1)
assert source.byte_offset > 0
end_time = time.time() + 1.5
while time.time() < end_time:
if source.buffers_processed > 0:
break
time.sleep(.1)
assert source.buffers_processed == 1
processed = source.unqueue_buffers()
assert processed == 1
assert source.buffers_processed == 0
assert source.buffers_queued == 0
assert not source.is_initial
assert not source.is_playing
assert not source.is_paused
assert source.is_stopped
@pytest.fixture
def filled_source(source, filled_buffer):
source.queue_buffer(filled_buffer)
return source
def test_source_pause_stop(filled_source):
assert filled_source.is_initial
assert not filled_source.is_playing
assert not filled_source.is_paused
assert not filled_source.is_stopped
filled_source.play()
assert not filled_source.is_initial
assert filled_source.is_playing
assert not filled_source.is_paused
assert not filled_source.is_stopped
filled_source.pause()
assert not filled_source.is_initial
assert not filled_source.is_playing
assert filled_source.is_paused
assert not filled_source.is_stopped
filled_source.play()
assert not filled_source.is_initial
assert filled_source.is_playing
assert not filled_source.is_paused
assert not filled_source.is_stopped
filled_source.stop()
assert not filled_source.is_initial
assert not filled_source.is_playing
assert not filled_source.is_paused
assert filled_source.is_stopped
def test_source_prop_position(filled_source):
assert almost_equal_coords(filled_source.position, (0., 0., 0.))
filled_source.position = 1., 2., 3.
assert almost_equal_coords(filled_source.position, (1., 2., 3.))
def test_source_prop_velocity(filled_source):
assert almost_equal_coords(filled_source.velocity, (0., 0., 0.))
filled_source.velocity = 1., 2., 3.
assert almost_equal_coords(filled_source.velocity, (1., 2., 3.))
def test_source_prop_gain(filled_source):
assert almost_equal(filled_source.gain, 1.)
filled_source.gain = 8.5
assert almost_equal(filled_source.gain, 8.5)
def test_source_prop_min_gain(filled_source):
assert almost_equal(filled_source.min_gain, 0.)
filled_source.min_gain = .5
assert almost_equal(filled_source.min_gain, .5)
def test_source_prop_max_gain(filled_source):
assert almost_equal(filled_source.max_gain, 1.)
filled_source.max_gain = .8
assert almost_equal(filled_source.max_gain, .8)
def test_source_prop_reference_distance(filled_source):
assert almost_equal(filled_source.reference_distance, 1.)
filled_source.reference_distance = 10.3
assert almost_equal(filled_source.reference_distance, 10.3)
def test_source_prop_rolloff_factor(filled_source):
assert almost_equal(filled_source.rolloff_factor, 1.)
filled_source.rolloff_factor = 4.5
assert almost_equal(filled_source.rolloff_factor, 4.5)
def test_source_prop_max_distance(filled_source):
assert filled_source.max_distance > 500.0 # No definition of MAX_FLOAT available, 1000.0 on OSX
filled_source.max_distance = 500.
assert almost_equal(filled_source.max_distance, 500.)
def test_source_prop_pitch(filled_source):
assert almost_equal(filled_source.pitch, 1.)
filled_source.pitch = 3.14
assert almost_equal(filled_source.pitch, 3.14)
def test_source_prop_direction(filled_source):
assert almost_equal_coords(filled_source.direction, (0., 0., 0.))
filled_source.direction = 1., 2., 3.
assert almost_equal_coords(filled_source.direction, (1., 2., 3.))
def test_source_prop_cone_inner_angle(filled_source):
assert almost_equal(filled_source.cone_inner_angle, 360.)
filled_source.cone_inner_angle = 180.
assert almost_equal(filled_source.cone_inner_angle, 180.)
def test_source_prop_cone_outer_angle(filled_source):
assert almost_equal(filled_source.cone_outer_angle, 360.)
filled_source.cone_outer_angle = 90.
assert almost_equal(filled_source.cone_outer_angle, 90.)
def test_source_prop_cone_outer_gain(filled_source):
assert almost_equal(filled_source.cone_outer_gain, 0.)
filled_source.cone_outer_gain = .6
assert almost_equal(filled_source.cone_outer_gain, .6)
def test_source_prop_sec_offset(filled_source):
assert almost_equal(filled_source.sec_offset, 0.)
filled_source.play()
filled_source.pause()
filled_source.sec_offset = .1
# Not stable: assert almost_equal(filled_source.sec_offset, .1)
def test_source_prop_sample_offset(filled_source):
assert almost_equal(filled_source.sample_offset, 0.)
filled_source.play()
filled_source.pause()
filled_source.sample_offset = 5.
# Not stable: assert almost_equal(filled_source.sample_offset, 5.)
def test_source_prop_byte_offset(filled_source):
assert almost_equal(filled_source.byte_offset, 0.)
filled_source.play()
filled_source.pause()
filled_source.byte_offset = 8.
# Not stable: assert almost_equal(filled_source.byte_offset, 8.)
def test_listener_prop_position(context):
listener = openal.interface.OpenALListener()
assert almost_equal_coords(listener.position, (0., 0., 0.))
filled_source.position = 1., 2., 3.
#TODO assert almost_equal_coords(listener.position, (1., 2., 3.))
def test_listener_prop_velocity(context):
listener = openal.interface.OpenALListener()
assert almost_equal_coords(listener.velocity, (0., 0., 0.))
filled_source.velocity = 1., 2., 3.
#TODO assert almost_equal_coords(listener.velocity, (1., 2., 3.))
def test_listener_prop_gain(context):
listener = openal.interface.OpenALListener()
assert almost_equal(listener.gain, 1.)
filled_source.gain = 8.5
#TODO assert almost_equal(listener.gain, 8.5)
def test_listener_prop_orientation(context):
listener = openal.interface.OpenALListener()
orientation = listener.orientation
assert almost_equal_coords(orientation.at, (0., 0., -1.))
assert almost_equal_coords(orientation.up, (0., 1., 0.))
listener.orientation = ((1., 2., 3.), (4., 5., 6.))
orientation = listener.orientation
assert almost_equal_coords(orientation.at, (1., 2., 3.))
assert almost_equal_coords(orientation.up, (4., 5., 6.))
|
|
"""
Starts a service to scan in intervals for new devices.
Will emit EVENT_PLATFORM_DISCOVERED whenever a new service has been discovered.
Knows which components handle certain types, will make sure they are
loaded before the EVENT_PLATFORM_DISCOVERED is fired.
"""
import json
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.const import EVENT_HOMEASSISTANT_START
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.discovery import async_load_platform, async_discover
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['netdisco==2.5.0']
DOMAIN = 'discovery'
SCAN_INTERVAL = timedelta(seconds=300)
SERVICE_APPLE_TV = 'apple_tv'
SERVICE_AXIS = 'axis'
SERVICE_DAIKIN = 'daikin'
SERVICE_DECONZ = 'deconz'
SERVICE_DLNA_DMR = 'dlna_dmr'
SERVICE_ENIGMA2 = 'enigma2'
SERVICE_FREEBOX = 'freebox'
SERVICE_HASS_IOS_APP = 'hass_ios'
SERVICE_HASSIO = 'hassio'
SERVICE_HOMEKIT = 'homekit'
SERVICE_HUE = 'philips_hue'
SERVICE_IGD = 'igd'
SERVICE_IKEA_TRADFRI = 'ikea_tradfri'
SERVICE_KONNECTED = 'konnected'
SERVICE_MOBILE_APP = 'hass_mobile_app'
SERVICE_NETGEAR = 'netgear_router'
SERVICE_OCTOPRINT = 'octoprint'
SERVICE_ROKU = 'roku'
SERVICE_SABNZBD = 'sabnzbd'
SERVICE_SAMSUNG_PRINTER = 'samsung_printer'
SERVICE_TELLDUSLIVE = 'tellstick'
SERVICE_YEELIGHT = 'yeelight'
SERVICE_WEMO = 'belkin_wemo'
SERVICE_WINK = 'wink'
SERVICE_XIAOMI_GW = 'xiaomi_gw'
CONFIG_ENTRY_HANDLERS = {
SERVICE_AXIS: 'axis',
SERVICE_DAIKIN: 'daikin',
SERVICE_DECONZ: 'deconz',
'esphome': 'esphome',
'google_cast': 'cast',
SERVICE_HUE: 'hue',
SERVICE_TELLDUSLIVE: 'tellduslive',
SERVICE_IKEA_TRADFRI: 'tradfri',
'sonos': 'sonos',
SERVICE_IGD: 'upnp',
}
SERVICE_HANDLERS = {
SERVICE_MOBILE_APP: ('mobile_app', None),
SERVICE_HASS_IOS_APP: ('ios', None),
SERVICE_NETGEAR: ('device_tracker', None),
SERVICE_WEMO: ('wemo', None),
SERVICE_HASSIO: ('hassio', None),
SERVICE_APPLE_TV: ('apple_tv', None),
SERVICE_ENIGMA2: ('media_player', 'enigma2'),
SERVICE_ROKU: ('roku', None),
SERVICE_WINK: ('wink', None),
SERVICE_XIAOMI_GW: ('xiaomi_aqara', None),
SERVICE_SABNZBD: ('sabnzbd', None),
SERVICE_SAMSUNG_PRINTER: ('sensor', 'syncthru'),
SERVICE_KONNECTED: ('konnected', None),
SERVICE_OCTOPRINT: ('octoprint', None),
SERVICE_FREEBOX: ('freebox', None),
SERVICE_YEELIGHT: ('yeelight', None),
'panasonic_viera': ('media_player', 'panasonic_viera'),
'plex_mediaserver': ('media_player', 'plex'),
'yamaha': ('media_player', 'yamaha'),
'logitech_mediaserver': ('media_player', 'squeezebox'),
'directv': ('media_player', 'directv'),
'denonavr': ('media_player', 'denonavr'),
'samsung_tv': ('media_player', 'samsungtv'),
'frontier_silicon': ('media_player', 'frontier_silicon'),
'openhome': ('media_player', 'openhome'),
'harmony': ('remote', 'harmony'),
'bose_soundtouch': ('media_player', 'soundtouch'),
'bluesound': ('media_player', 'bluesound'),
'songpal': ('media_player', 'songpal'),
'kodi': ('media_player', 'kodi'),
'volumio': ('media_player', 'volumio'),
'lg_smart_device': ('media_player', 'lg_soundbar'),
'nanoleaf_aurora': ('light', 'nanoleaf'),
}
OPTIONAL_SERVICE_HANDLERS = {
SERVICE_HOMEKIT: ('homekit_controller', None),
SERVICE_DLNA_DMR: ('media_player', 'dlna_dmr'),
}
CONF_IGNORE = 'ignore'
CONF_ENABLE = 'enable'
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN): vol.Schema({
vol.Optional(CONF_IGNORE, default=[]):
vol.All(cv.ensure_list, [
vol.In(list(CONFIG_ENTRY_HANDLERS) + list(SERVICE_HANDLERS))]),
vol.Optional(CONF_ENABLE, default=[]):
vol.All(cv.ensure_list, [vol.In(OPTIONAL_SERVICE_HANDLERS)])
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Start a discovery service."""
from netdisco.discovery import NetworkDiscovery
logger = logging.getLogger(__name__)
netdisco = NetworkDiscovery()
already_discovered = set()
# Disable zeroconf logging, it spams
logging.getLogger('zeroconf').setLevel(logging.CRITICAL)
if DOMAIN in config:
# Platforms ignore by config
ignored_platforms = config[DOMAIN][CONF_IGNORE]
# Optional platforms enabled by config
enabled_platforms = config[DOMAIN][CONF_ENABLE]
else:
ignored_platforms = []
enabled_platforms = []
async def new_service_found(service, info):
"""Handle a new service if one is found."""
if service in ignored_platforms:
logger.info("Ignoring service: %s %s", service, info)
return
discovery_hash = json.dumps([service, info], sort_keys=True)
if discovery_hash in already_discovered:
logger.debug("Already discovered service %s %s.", service, info)
return
already_discovered.add(discovery_hash)
if service in CONFIG_ENTRY_HANDLERS:
await hass.config_entries.flow.async_init(
CONFIG_ENTRY_HANDLERS[service],
context={'source': config_entries.SOURCE_DISCOVERY},
data=info
)
return
comp_plat = SERVICE_HANDLERS.get(service)
if not comp_plat and service in enabled_platforms:
comp_plat = OPTIONAL_SERVICE_HANDLERS[service]
# We do not know how to handle this service.
if not comp_plat:
logger.info("Unknown service discovered: %s %s", service, info)
return
logger.info("Found new service: %s %s", service, info)
component, platform = comp_plat
if platform is None:
await async_discover(hass, service, info, component, config)
else:
await async_load_platform(
hass, component, platform, info, config)
async def scan_devices(now):
"""Scan for devices."""
try:
results = await hass.async_add_job(_discover, netdisco)
for result in results:
hass.async_create_task(new_service_found(*result))
except OSError:
logger.error("Network is unreachable")
async_track_point_in_utc_time(
hass, scan_devices, dt_util.utcnow() + SCAN_INTERVAL)
@callback
def schedule_first(event):
"""Schedule the first discovery when Home Assistant starts up."""
async_track_point_in_utc_time(hass, scan_devices, dt_util.utcnow())
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, schedule_first)
return True
def _discover(netdisco):
"""Discover devices."""
results = []
try:
netdisco.scan()
for disc in netdisco.discover():
for service in netdisco.get_info(disc):
results.append((disc, service))
finally:
netdisco.stop()
return results
|
|
# Copyright 2010-present Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import *
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: riak_yokozuna.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='riak_yokozuna.proto',
package='',
serialized_pb='\n\x13riak_yokozuna.proto\"?\n\x10RpbYokozunaIndex\x12\x0c\n\x04name\x18\x01 \x02(\x0c\x12\x0e\n\x06schema\x18\x02 \x01(\x0c\x12\r\n\x05n_val\x18\x03 \x01(\r\"&\n\x16RpbYokozunaIndexGetReq\x12\x0c\n\x04name\x18\x01 \x01(\x0c\";\n\x17RpbYokozunaIndexGetResp\x12 \n\x05index\x18\x01 \x03(\x0b\x32\x11.RpbYokozunaIndex\"K\n\x16RpbYokozunaIndexPutReq\x12 \n\x05index\x18\x01 \x02(\x0b\x32\x11.RpbYokozunaIndex\x12\x0f\n\x07timeout\x18\x02 \x01(\r\")\n\x19RpbYokozunaIndexDeleteReq\x12\x0c\n\x04name\x18\x01 \x02(\x0c\"2\n\x11RpbYokozunaSchema\x12\x0c\n\x04name\x18\x01 \x02(\x0c\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\x0c\"=\n\x17RpbYokozunaSchemaPutReq\x12\"\n\x06schema\x18\x01 \x02(\x0b\x32\x12.RpbYokozunaSchema\"\'\n\x17RpbYokozunaSchemaGetReq\x12\x0c\n\x04name\x18\x01 \x02(\x0c\">\n\x18RpbYokozunaSchemaGetResp\x12\"\n\x06schema\x18\x01 \x02(\x0b\x32\x12.RpbYokozunaSchemaB)\n\x17\x63om.basho.riak.protobufB\x0eRiakYokozunaPB')
_RPBYOKOZUNAINDEX = _descriptor.Descriptor(
name='RpbYokozunaIndex',
full_name='RpbYokozunaIndex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='RpbYokozunaIndex.name', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='schema', full_name='RpbYokozunaIndex.schema', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='n_val', full_name='RpbYokozunaIndex.n_val', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=23,
serialized_end=86,
)
_RPBYOKOZUNAINDEXGETREQ = _descriptor.Descriptor(
name='RpbYokozunaIndexGetReq',
full_name='RpbYokozunaIndexGetReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='RpbYokozunaIndexGetReq.name', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=88,
serialized_end=126,
)
_RPBYOKOZUNAINDEXGETRESP = _descriptor.Descriptor(
name='RpbYokozunaIndexGetResp',
full_name='RpbYokozunaIndexGetResp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='RpbYokozunaIndexGetResp.index', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=128,
serialized_end=187,
)
_RPBYOKOZUNAINDEXPUTREQ = _descriptor.Descriptor(
name='RpbYokozunaIndexPutReq',
full_name='RpbYokozunaIndexPutReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='index', full_name='RpbYokozunaIndexPutReq.index', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timeout', full_name='RpbYokozunaIndexPutReq.timeout', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=189,
serialized_end=264,
)
_RPBYOKOZUNAINDEXDELETEREQ = _descriptor.Descriptor(
name='RpbYokozunaIndexDeleteReq',
full_name='RpbYokozunaIndexDeleteReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='RpbYokozunaIndexDeleteReq.name', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=266,
serialized_end=307,
)
_RPBYOKOZUNASCHEMA = _descriptor.Descriptor(
name='RpbYokozunaSchema',
full_name='RpbYokozunaSchema',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='RpbYokozunaSchema.name', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='content', full_name='RpbYokozunaSchema.content', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=309,
serialized_end=359,
)
_RPBYOKOZUNASCHEMAPUTREQ = _descriptor.Descriptor(
name='RpbYokozunaSchemaPutReq',
full_name='RpbYokozunaSchemaPutReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='schema', full_name='RpbYokozunaSchemaPutReq.schema', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=361,
serialized_end=422,
)
_RPBYOKOZUNASCHEMAGETREQ = _descriptor.Descriptor(
name='RpbYokozunaSchemaGetReq',
full_name='RpbYokozunaSchemaGetReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='RpbYokozunaSchemaGetReq.name', index=0,
number=1, type=12, cpp_type=9, label=2,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=424,
serialized_end=463,
)
_RPBYOKOZUNASCHEMAGETRESP = _descriptor.Descriptor(
name='RpbYokozunaSchemaGetResp',
full_name='RpbYokozunaSchemaGetResp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='schema', full_name='RpbYokozunaSchemaGetResp.schema', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=465,
serialized_end=527,
)
_RPBYOKOZUNAINDEXGETRESP.fields_by_name['index'].message_type = _RPBYOKOZUNAINDEX
_RPBYOKOZUNAINDEXPUTREQ.fields_by_name['index'].message_type = _RPBYOKOZUNAINDEX
_RPBYOKOZUNASCHEMAPUTREQ.fields_by_name['schema'].message_type = _RPBYOKOZUNASCHEMA
_RPBYOKOZUNASCHEMAGETRESP.fields_by_name['schema'].message_type = _RPBYOKOZUNASCHEMA
DESCRIPTOR.message_types_by_name['RpbYokozunaIndex'] = _RPBYOKOZUNAINDEX
DESCRIPTOR.message_types_by_name['RpbYokozunaIndexGetReq'] = _RPBYOKOZUNAINDEXGETREQ
DESCRIPTOR.message_types_by_name['RpbYokozunaIndexGetResp'] = _RPBYOKOZUNAINDEXGETRESP
DESCRIPTOR.message_types_by_name['RpbYokozunaIndexPutReq'] = _RPBYOKOZUNAINDEXPUTREQ
DESCRIPTOR.message_types_by_name['RpbYokozunaIndexDeleteReq'] = _RPBYOKOZUNAINDEXDELETEREQ
DESCRIPTOR.message_types_by_name['RpbYokozunaSchema'] = _RPBYOKOZUNASCHEMA
DESCRIPTOR.message_types_by_name['RpbYokozunaSchemaPutReq'] = _RPBYOKOZUNASCHEMAPUTREQ
DESCRIPTOR.message_types_by_name['RpbYokozunaSchemaGetReq'] = _RPBYOKOZUNASCHEMAGETREQ
DESCRIPTOR.message_types_by_name['RpbYokozunaSchemaGetResp'] = _RPBYOKOZUNASCHEMAGETRESP
@add_metaclass(_reflection.GeneratedProtocolMessageType)
class RpbYokozunaIndex(_message.Message):
DESCRIPTOR = _RPBYOKOZUNAINDEX
# @@protoc_insertion_point(class_scope:RpbYokozunaIndex)
@add_metaclass(_reflection.GeneratedProtocolMessageType)
class RpbYokozunaIndexGetReq(_message.Message):
DESCRIPTOR = _RPBYOKOZUNAINDEXGETREQ
# @@protoc_insertion_point(class_scope:RpbYokozunaIndexGetReq)
@add_metaclass(_reflection.GeneratedProtocolMessageType)
class RpbYokozunaIndexGetResp(_message.Message):
DESCRIPTOR = _RPBYOKOZUNAINDEXGETRESP
# @@protoc_insertion_point(class_scope:RpbYokozunaIndexGetResp)
@add_metaclass(_reflection.GeneratedProtocolMessageType)
class RpbYokozunaIndexPutReq(_message.Message):
DESCRIPTOR = _RPBYOKOZUNAINDEXPUTREQ
# @@protoc_insertion_point(class_scope:RpbYokozunaIndexPutReq)
@add_metaclass(_reflection.GeneratedProtocolMessageType)
class RpbYokozunaIndexDeleteReq(_message.Message):
DESCRIPTOR = _RPBYOKOZUNAINDEXDELETEREQ
# @@protoc_insertion_point(class_scope:RpbYokozunaIndexDeleteReq)
@add_metaclass(_reflection.GeneratedProtocolMessageType)
class RpbYokozunaSchema(_message.Message):
DESCRIPTOR = _RPBYOKOZUNASCHEMA
# @@protoc_insertion_point(class_scope:RpbYokozunaSchema)
@add_metaclass(_reflection.GeneratedProtocolMessageType)
class RpbYokozunaSchemaPutReq(_message.Message):
DESCRIPTOR = _RPBYOKOZUNASCHEMAPUTREQ
# @@protoc_insertion_point(class_scope:RpbYokozunaSchemaPutReq)
@add_metaclass(_reflection.GeneratedProtocolMessageType)
class RpbYokozunaSchemaGetReq(_message.Message):
DESCRIPTOR = _RPBYOKOZUNASCHEMAGETREQ
# @@protoc_insertion_point(class_scope:RpbYokozunaSchemaGetReq)
@add_metaclass(_reflection.GeneratedProtocolMessageType)
class RpbYokozunaSchemaGetResp(_message.Message):
DESCRIPTOR = _RPBYOKOZUNASCHEMAGETRESP
# @@protoc_insertion_point(class_scope:RpbYokozunaSchemaGetResp)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n\027com.basho.riak.protobufB\016RiakYokozunaPB')
# @@protoc_insertion_point(module_scope)
|
|
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from swaggyjenkins.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from swaggyjenkins.exceptions import ApiAttributeError
def lazy_import():
from swaggyjenkins.model.cause_action import CauseAction
from swaggyjenkins.model.free_style_project import FreeStyleProject
globals()['CauseAction'] = CauseAction
globals()['FreeStyleProject'] = FreeStyleProject
class QueueBlockedItem(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'_class': (str,), # noqa: E501
'actions': ([CauseAction],), # noqa: E501
'blocked': (bool,), # noqa: E501
'buildable': (bool,), # noqa: E501
'id': (int,), # noqa: E501
'in_queue_since': (int,), # noqa: E501
'params': (str,), # noqa: E501
'stuck': (bool,), # noqa: E501
'task': (FreeStyleProject,), # noqa: E501
'url': (str,), # noqa: E501
'why': (str,), # noqa: E501
'buildable_start_milliseconds': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'_class': '_class', # noqa: E501
'actions': 'actions', # noqa: E501
'blocked': 'blocked', # noqa: E501
'buildable': 'buildable', # noqa: E501
'id': 'id', # noqa: E501
'in_queue_since': 'inQueueSince', # noqa: E501
'params': 'params', # noqa: E501
'stuck': 'stuck', # noqa: E501
'task': 'task', # noqa: E501
'url': 'url', # noqa: E501
'why': 'why', # noqa: E501
'buildable_start_milliseconds': 'buildableStartMilliseconds', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""QueueBlockedItem - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_class (str): [optional] # noqa: E501
actions ([CauseAction]): [optional] # noqa: E501
blocked (bool): [optional] # noqa: E501
buildable (bool): [optional] # noqa: E501
id (int): [optional] # noqa: E501
in_queue_since (int): [optional] # noqa: E501
params (str): [optional] # noqa: E501
stuck (bool): [optional] # noqa: E501
task (FreeStyleProject): [optional] # noqa: E501
url (str): [optional] # noqa: E501
why (str): [optional] # noqa: E501
buildable_start_milliseconds (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""QueueBlockedItem - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_class (str): [optional] # noqa: E501
actions ([CauseAction]): [optional] # noqa: E501
blocked (bool): [optional] # noqa: E501
buildable (bool): [optional] # noqa: E501
id (int): [optional] # noqa: E501
in_queue_since (int): [optional] # noqa: E501
params (str): [optional] # noqa: E501
stuck (bool): [optional] # noqa: E501
task (FreeStyleProject): [optional] # noqa: E501
url (str): [optional] # noqa: E501
why (str): [optional] # noqa: E501
buildable_start_milliseconds (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1NodeList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1Node]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1NodeList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1NodeList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1NodeList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1NodeList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1NodeList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1NodeList. # noqa: E501
List of nodes # noqa: E501
:return: The items of this V1NodeList. # noqa: E501
:rtype: list[V1Node]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1NodeList.
List of nodes # noqa: E501
:param items: The items of this V1NodeList. # noqa: E501
:type: list[V1Node]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1NodeList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1NodeList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1NodeList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1NodeList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1NodeList. # noqa: E501
:return: The metadata of this V1NodeList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1NodeList.
:param metadata: The metadata of this V1NodeList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeList):
return True
return self.to_dict() != other.to_dict()
|
|
import os
import fileinput
import distutils.core
from markup_formatter import *
from javadoc_parser import *
import markup_formatter
from xml.dom import HierarchyRequestErr
'''
Class to create and customize the Wiki
'''
class Wiki:
'''
Initialize wiki with working directory and call create
'''
def __init__(self, appname, repodir, graph, wikidir):
self.appname = appname
self.repodir = repodir
self.wikidir = wikidir
self.graph = graph
self.create()
self.pageName = None
self.text = None
'''
Copy template files to wiki directory
'''
def create(self):
src = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'wiki-template')
dest = self.wikidir
if not os.path.exists(dest):
os.makedirs(dest)
# Remove existing files
for file in os.listdir(dest):
if file[0] != '.':
path = os.path.join(dest, file)
try:
if os.path.isfile(path):
os.unlink(path)
except Exception, e:
print e
# Copy template directory
distutils.dir_util.copy_tree(src, dest)
self.setTemplate('title', self.appname)
self.setTemplate('readme', self.getReadme())
#self.createPage('_SIDEBAR', '> [Home](Home)\n\n\n')
'''
Create a new wiki page
'''
def createPage(self):
file = open(os.path.join(self.wikidir, self.pageName), 'w')
file.write(self.text)
file.close()
self.pageName = None
self.text = None
'''
Append title item to wiki page
'''
def appendTitlePage(self, title, text):
titlemod = title + '.md'
file = open(os.path.join(self.wikidir, titlemod), 'a')
file.write(text)
file.close()
'''
Add a class to the wiki
'''
def buildClass(self, javadoc):
self.pageName = javadoc.getContext().getFullName() + '.md'
self.text = ''
self.text += self.formatJavadoc(javadoc)
self.text += '\n'
def buildInnerClass(self, javadoc):
self.text += '---\n'
self.text += "#{}\n".format(javadoc.getSourceLine().getDisplay())
self.text += self.formatJavadoc(javadoc)
'''
Add a method to the wiki
'''
def buildMethod(self, javadoc):
self.text += "##{}\n".format(javadoc.getSourceLine().getName())
self.text += "```java\n{}\n```\n".format(javadoc.getSourceLine().getText())
self.text += self.formatJavadoc(javadoc)
'''
Add a field to the wiki
'''
def buildField(self, javadoc):
self.text += "##{}\n".format(javadoc.getSourceLine().getName())
self.text += "```java\n{}\n```\n".format(javadoc.getSourceLine().getText())
self.text += self.formatJavadoc(javadoc)
'''
Create home page heirarchy
'''
def addToHomePage(self, javadoc, hierarchy):
name = javadoc.sourceLine.name
if hierarchy == 0:
type = 'Class'
elif hierarchy == 1:
type = 'Method'
else:
type = 'Field'
text = '\n{}* {}'.format(' ' * hierarchy, link(name, name))
self.appendTitlePage('HOME', text)
self.appendTitlePage('_SIDEBAR', text)
'''
Return project readme file
'''
def getReadme(self):
path = os.path.join(self.repodir, 'README.md')
if os.path.isfile(path):
readmeFile = open(path, 'r')
readme = readmeFile.read()
readmeFile.close()
return readme
else:
return None
def formatJavadoc(self, javadoc):
text = ''
if javadoc:
if javadoc.getMainDesc():
text += "{}\n\n".format(self.formatText(javadoc.getMainDesc()))
if javadoc.getBlockTags():
text += self.formatTagSection(javadoc.getBlockTags())
return "{}\n".format(text)
'''
Format description
'''
def formatText(self, mainDesc):
text = ''
for item in mainDesc.getContent():
if isinstance(item, InlineTag):
text += self.formatLink(item)
else:
item = re.sub(r'(<p>)+', ' ', item)
text += str(item)
return text
'''
Format links
'''
def formatLink(self, link):
if isinstance(link, StringLink):
return "\"{}\"".format(link.getStr())
elif isinstance(link, HtmlLink):
return "[{}]({})".format(link.getLabel(), link.getHref())
elif isinstance(link, JavadocLink):
clsName = self.graph.resolveLink(link)
if clsName:
return "[{}]({})".format(clsName, clsName)
else:
return link.text
else:
return ""
'''
Format tags
'''
def formatTagSection(self, blocktags, italic=False):
text = ""
def formatTextContent(content):
text = ""
for something in content:
if isinstance(something, basestring):
text += something
elif something.getLink():
text += self.formatLink(something.getLink())
return text
authorTag = [blocktag for blocktag in blocktags if blocktag.getName() == "author"]
if len(authorTag) > 0:
text += "###### Authored by {}\n\n".format(self.formatText(authorTag[0].getText()))
versionTag = [blocktag for blocktag in blocktags if blocktag.getName() == "version"]
if len(versionTag) > 0:
text += "Version {}\n\n".format(self.formatText(versionTag[0].getText()))
paramTags = [blocktag for blocktag in blocktags if blocktag.getName() == "param"]
if len(paramTags) > 0:
text += "**params**\n\n"
for paramTag in paramTags:
content = paramTag.getText().getContent()
if len(content) > 0:
first = re.split('\s+', content[0])
var = first[0]
text += "* `{}` - {}{}\n".format(var, ' '.join(first[1:]), formatTextContent(content[1:]))
returnTag = [blocktag for blocktag in blocktags if blocktag.getName() == "return"]
if len(returnTag) > 0:
content = returnTag[0].getText().getContent()
if len(content) > 0:
text += "\n**returns** {}\n\n".format(formatTextContent(content))
throwsTags = [blocktag for blocktag in blocktags if blocktag.getName() == "throws"]
if len(throwsTags) > 0:
text += "**throws**\n\n"
for throwsTag in throwsTags:
content = throwsTag.getText().getContent()
if len(content) > 0:
first = re.split('\s+', content[0])
typ = first[0]
text += "* `{}` {}{}\n".format(typ, ' '.join(first[1:]), formatTextContent(content[1:]))
seeTags = [blocktag for blocktag in blocktags if blocktag.getName() == "see"]
if len(seeTags) > 0:
text += "**see**\n\n"
for seeTag in seeTags:
content = seeTag.getText().getContent()
if len(content) > 0:
text += "* {}\n".format(formatTextContent(content))
sinceTag = [blocktag for blocktag in blocktags if blocktag.getName() == "since"]
if len(sinceTag) > 0:
text += "\n**since** {}\n\n".format(self.formatText(sinceTag[0].getText()))
deprecatedTag = [blocktag for blocktag in blocktags if blocktag.getName() == "deprecated"]
if len(deprecatedTag) > 0:
content = deprecatedTag.getText().getContent()
if len(content) > 0:
text += "~~deprecated~~ {}\n\n".format(formatTextContent(content))
return text
'''
Modify a template tag to be a custom value
'''
def setTemplate(self, tag, text):
if isinstance(text, list):
text = '\n'.join(text)
for title in ["HOME.md", "README.md"]:
f = os.path.join(self.wikidir, title)
if os.path.isfile(f):
strbuild = '{{ %s }}' % tag
h = open(f, 'r')
contents = h.read()
contents = contents.replace(strbuild, text)
h.close()
h = open(f, 'w')
h.write(contents)
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_embedding_v2
from tensorflow.python.tpu import tpu_embedding_v2_utils
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.util import nest
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
flags.DEFINE_string('model_dir', os.environ.get('TEST_TMPDIR'),
'A temporary directory.')
class TPUEmbeddingCorrectness(parameterized.TestCase, test.TestCase):
def setUp(self):
super(TPUEmbeddingCorrectness, self).setUp()
self.embedding_values = np.array(list(range(32)), dtype=np.float64)
self.initializer = init_ops_v2.Constant(self.embedding_values)
# Embedding for video initialized to
# 0 1 2 3
# 4 5 6 7
# ...
self.table_video = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=8,
dim=4,
initializer=self.initializer,
combiner='sum',
name='video')
# Embedding for user initialized to
# 0 1
# 2 3
# 4 5
# 6 7
# ...
self.table_user = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=16,
dim=2,
initializer=self.initializer,
combiner='mean',
name='user')
self.feature_config = (
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends'))
self.batch_size = 2
self.data_batch_size = 4
# One (global) batch of inputs
# sparse tensor for watched:
# row 0: 0
# row 1: 0, 1
# row 2: 0, 1
# row 3: 1
self.feature_watched_indices = [[0, 0], [1, 0], [1, 1],
[2, 0], [2, 1], [3, 0]]
self.feature_watched_values = [0, 0, 1, 0, 1, 1]
self.feature_watched_row_lengths = [1, 2, 2, 1]
# sparse tensor for favorited:
# row 0: 0, 1
# row 1: 1
# row 2: 0
# row 3: 0, 1
self.feature_favorited_indices = [[0, 0], [0, 1], [1, 0],
[2, 0], [3, 0], [3, 1]]
self.feature_favorited_values = [0, 1, 1, 0, 0, 1]
self.feature_favorited_row_lengths = [2, 1, 1, 2]
# sparse tensor for friends:
# row 0: 3
# row 1: 0, 1, 2
# row 2: 3
# row 3: 0, 1, 2
self.feature_friends_indices = [[0, 0], [1, 0], [1, 1], [1, 2],
[2, 0], [3, 0], [3, 1], [3, 2]]
self.feature_friends_values = [3, 0, 1, 2, 3, 0, 1, 2]
self.feature_friends_row_lengths = [1, 3, 1, 3]
self.resolver = None
def _get_strategy(self):
self.resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project)
remote.connect_to_cluster(self.resolver)
tpu_strategy_util.initialize_tpu_system(self.resolver)
return tpu_strategy.TPUStrategy(self.resolver)
def _create_strategy_and_mid_level(self, optimizer_name):
strategy = self._get_strategy()
with strategy.scope():
if optimizer_name == 'sgd':
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
elif optimizer_name == 'adagrad':
optimizer = tpu_embedding_v2_utils.Adagrad(learning_rate=0.1)
elif optimizer_name == 'adam':
optimizer = tpu_embedding_v2_utils.Adam(learning_rate=0.1)
else:
raise ValueError('optimizer is not recognized: ', optimizer_name)
mid_level_api = self._create_mid_level(optimizer=optimizer)
return strategy, mid_level_api, optimizer
@parameterized.parameters(
*itertools.product(
['sgd', 'adagrad', 'adam'],
[True, False],
[True, False]))
def test_embedding(self, optimizer_name, training, sparse):
strategy, mid_level_api, optimizer = (
self._create_strategy_and_mid_level(optimizer_name))
if sparse:
dataset = self._create_sparse_dataset(strategy)
else:
dataset = self._create_ragged_dataset(strategy)
dist = strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(experimental_fetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
"""Create and run computation that returns the embedding activations."""
if not training:
activations = mid_level_api.dequeue()
total_loss = _get_total_loss_tensor(activations)
ret_val = [total_loss] + list(activations)
return ret_val
else:
with backprop.GradientTape() as tape:
activations = mid_level_api.dequeue()
tape.watch(activations)
total_loss = _get_total_loss_tensor(activations)
loss_per_replica = total_loss / strategy.num_replicas_in_sync
gradients = tape.gradient(loss_per_replica, activations)
mid_level_api.apply_gradients(gradients)
ret_val = [total_loss] + list(activations)
return ret_val
mid_level_api.enqueue(next(dist_iter), training=training)
result = strategy.run(step)
return result
# Run model.
shard_out_val = test_fn()
# Retrieve TPU weights to CPU.
mid_level_api._retrieve_variables()
# Compute sparse tensors for global batch.
input_data = next(iter(self._create_sparse_dataset(strategy)))
# Check results.
self._check_results(strategy, shard_out_val, training, input_data,
mid_level_api._variables,
optimizer)
def _create_mid_level(self, optimizer=None):
# Create `TPUEmbedding` object.
if optimizer is None:
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
return tpu_embedding_v2.TPUEmbedding(
feature_config=self.feature_config,
optimizer=optimizer)
def _create_sparse_data(self, include_weights, weight=0.5):
sparse_features = (
sparse_tensor.SparseTensor(
indices=self.feature_watched_indices,
values=self.feature_watched_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_favorited_indices,
values=self.feature_favorited_values,
dense_shape=[self.data_batch_size, 2]),
sparse_tensor.SparseTensor(
indices=self.feature_friends_indices,
values=self.feature_friends_values,
dense_shape=[self.data_batch_size, 3]))
if include_weights:
weights = []
for sparse in sparse_features:
values = (
array_ops.ones_like(sparse.values, dtype=dtypes.float32) * weight)
weights.append(sparse_tensor.SparseTensor(
indices=sparse.indices,
values=values,
dense_shape=sparse.dense_shape))
sparse_features = (sparse_features, tuple(weights))
return sparse_features
def _create_sparse_dataset(self, strategy, include_weights=False, weight=0.5):
# Create dataset for enqueue operation
sparse_features = self._create_sparse_data(include_weights, weight)
dataset = dataset_ops.DatasetV2.from_tensors(sparse_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_ragged_dataset(self, strategy, include_weights=False, weight=0.5):
# Create dataset for enqueue operation
sparse_features = self._create_sparse_data(include_weights, weight)
ragged_features = nest.map_structure(ragged_tensor.RaggedTensor.from_sparse,
sparse_features)
dataset = dataset_ops.DatasetV2.from_tensors(ragged_features)
# Data is batched to self.data_batch_size, rebatch to global batch size.
return dataset.unbatch().repeat().batch(
self.batch_size * strategy.num_replicas_in_sync, drop_remainder=True)
def _create_dense_input_fn(self, strategy, include_weights=False, weight=0.5):
def input_fn(ctx):
del ctx
features = (
constant_op.constant(self.feature_watched_values[-2:],
dtype=dtypes.int32),
constant_op.constant(self.feature_favorited_values[-2:],
dtype=dtypes.int32),
constant_op.constant(self.feature_friends_values[-2:],
dtype=dtypes.int32))
if include_weights:
weights = [array_ops.ones_like(t, dtype=dtypes.float32) * weight
for t in features]
features = (features, tuple(weights))
return dataset_ops.DatasetV2.from_tensors(features).repeat()
return input_fn
def _check_results(self, strategy, shard_out_val, training, input_data,
table_to_variable, optimizer):
num_replicas = strategy.num_replicas_in_sync
# Unpack the values `strategy.run()` returns.
loss = _unpack(strategy, shard_out_val[0])
activation_watched = _unpack(strategy, shard_out_val[1])
activation_favorited = _unpack(strategy, shard_out_val[2])
activation_friends = _unpack(strategy, shard_out_val[3])
# Core 0:
# Calculate the values of embedding activations.
activation_watched_gold0 = np.array([[0, 1, 2, 3], [4, 6, 8, 10]])
activation_favorited_gold0 = np.array([[4, 6, 8, 10], [4, 5, 6, 7]])
# Second row of `activation_friends_gold0` is the mean of the following.
# row 0: 0 1
# row 1: 2 3
# row 2: 4 5
activation_friends_gold0 = np.array([[6, 7], [2, 3]])
loss_gold0 = _compute_loss(activation_watched_gold0,
activation_favorited_gold0,
activation_friends_gold0)
# Add on values from other cores:
# Activations for watched are an alternating sequence of
# activation_watched_gold0 and activation_favorited_gold0.
# For favorited it is the same but in the opposite order.
activation_watched_gold = np.concatenate(
(np.concatenate((np.expand_dims(activation_watched_gold0, axis=0),) *
(num_replicas // 2)),
np.concatenate((np.expand_dims(activation_favorited_gold0, axis=0),) *
(num_replicas // 2))),
axis=1).reshape([self.batch_size * num_replicas, 4])
activation_favorited_gold = np.concatenate(
(activation_watched_gold[self.batch_size:,],
activation_watched_gold[0:self.batch_size,]))
activation_friends_gold = np.concatenate(
(activation_friends_gold0,) * num_replicas)
loss_gold = [loss_gold0] * num_replicas
# Test values.
self.assertAllClose(activation_watched_gold, activation_watched)
self.assertAllClose(activation_favorited_gold, activation_favorited)
self.assertAllClose(activation_friends_gold, activation_friends)
self.assertAllClose(loss_gold, loss)
embedding_table_video_before = np.copy(
np.reshape(self.embedding_values, [8, 4]))
embedding_table_user_before = np.copy(
np.reshape(self.embedding_values, [16, 2]))
global_batch_size = self.batch_size * num_replicas
if training:
gradient_wrt_watched_gold = (2 * activation_watched_gold /
global_batch_size)
gradient_wrt_favorited_gold = (2 * activation_favorited_gold /
global_batch_size)
gradient_wrt_friends_gold = (2 * activation_friends_gold /
global_batch_size)
# Calculate gradients wrt embedding tables.
gradients_wrt_user = (
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_friends_gold,
embedding_table_user_before, input_data[2].indices.numpy(),
input_data[2].values.numpy(), self.table_user.combiner))
gradients_wrt_video = (
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_favorited_gold,
embedding_table_video_before, input_data[1].indices.numpy(),
input_data[1].values.numpy(), self.table_video.combiner) +
_compute_gradients_wrt_embedding_table(
global_batch_size, gradient_wrt_watched_gold,
embedding_table_video_before, input_data[0].indices.numpy(),
input_data[0].values.numpy(), self.table_video.combiner))
self._check_embedding_and_slot_variables(embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video,
optimizer,
table_to_variable)
def _check_embedding_and_slot_variables(self, embedding_table_user_before,
gradients_wrt_user,
embedding_table_video_before,
gradients_wrt_video,
optimizer,
table_to_variable):
if isinstance(optimizer, tpu_embedding_v2_utils.SGD):
check_fn = self._check_embedding_and_slot_variables_for_sgd
elif isinstance(optimizer, tpu_embedding_v2_utils.Adagrad):
check_fn = self._check_embedding_and_slot_variables_for_adagrad
elif isinstance(optimizer, tpu_embedding_v2_utils.Adam):
check_fn = self._check_embedding_and_slot_variables_for_adam
else:
raise ValueError('optimizer is not recognized: ', type(optimizer))
check_fn(embedding_table_user_before, gradients_wrt_user,
optimizer, table_to_variable[self.table_user.name])
check_fn(embedding_table_video_before, gradients_wrt_video,
optimizer, table_to_variable[self.table_video.name])
def _check_embedding_and_slot_variables_for_sgd(self, embedding_table_before,
gradients,
optimizer,
variables):
embedding_table = np.copy(embedding_table_before)
embedding_table -= optimizer.learning_rate * np.sum(gradients, axis=0)
self.assertAllClose(_get_variable(variables['parameters']).numpy(),
embedding_table)
def _check_embedding_and_slot_variables_for_adagrad(self,
embedding_table_before,
gradients,
optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
accumulator = (
optimizer.initial_accumulator_value + np.sum(gradients, axis=0)**2)
embedding_table -= (
optimizer.learning_rate * np.sum(gradients, axis=0) /
np.sqrt(accumulator))
self.assertAllClose(_get_variable(variable['parameters']).numpy(),
embedding_table)
self.assertAllClose(_get_variable(variable['accumulators']).numpy(),
accumulator)
def _check_embedding_and_slot_variables_for_adam(self, embedding_table_before,
gradients,
optimizer,
variable):
embedding_table = np.copy(embedding_table_before)
g = np.sum(gradients, axis=0)
v = g**2 * (1 - optimizer.beta_2)
m = g * (1 - optimizer.beta_1)
epsilon = optimizer.epsilon
# TPU Embeddings don't have the LR decay factor for Adam.
lr_modifier = 1
embedding_table -= (
m * optimizer.learning_rate * lr_modifier / (np.sqrt(v) + epsilon))
self.assertAllClose(_get_variable(variable['parameters']).numpy(),
embedding_table, rtol=1e-4)
self.assertAllClose(_get_variable(variable['momenta']).numpy(),
m, rtol=1e-4)
self.assertAllClose(_get_variable(variable['velocities']).numpy(),
v, rtol=1e-4)
def _get_replica_numpy(self, structured, strategy, replica_id):
def select_replica(x):
x = strategy.experimental_local_results(x)
if len(x) == 1:
return x.numpy()
return x[replica_id].numpy()
return nest.map_structure(select_replica, structured)
def test_dense_lookup(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
input_fn = self._create_dense_input_fn(strategy)
dist = strategy.distribute_datasets_from_function(
input_fn,
options=distribute_lib.InputOptions(experimental_fetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(dist_iter), training=False)
return strategy.run(step)
# Run model.
shard0 = self._get_replica_numpy(test_fn(), strategy, 0)
# embedding_values is a linear list, so we reshape to match the correct
# shape of the corresponding table before performing the lookup.
numpy_videos = np.reshape(self.embedding_values, (8, 4))
numpy_users = np.reshape(self.embedding_values, (16, 2))
golden = ((numpy_videos[self.feature_watched_values[-2:]],
numpy_videos[self.feature_favorited_values[-2:]],
numpy_users[self.feature_friends_values[-2:]]))
self.assertAllClose(shard0, golden)
@parameterized.parameters([True, False])
def test_sequence_embeddings(self, sparse):
feature_config = (
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched',
max_sequence_length=2),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited',
max_sequence_length=2),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends',
max_sequence_length=3))
optimizer = tpu_embedding_v2_utils.SGD(learning_rate=0.1)
strategy = self._get_strategy()
num_replicas = strategy.num_replicas_in_sync
with strategy.scope():
mid_level = tpu_embedding_v2.TPUEmbedding(
feature_config=feature_config,
optimizer=optimizer)
# Call build here. We call 'next' outside of the tf.function and this
# results in data where the shape of the sparse tensor is a tensor which we
# can't tell the shape of at tracing time.
mid_level.build(self.batch_size)
if sparse:
dataset = self._create_sparse_dataset(strategy)
else:
dataset = self._create_ragged_dataset(strategy)
data = next(
iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False))))
@def_function.function
def embedding_and_set_gradients(data):
def tpu_fn():
activations = mid_level.dequeue()
mid_level.apply_gradients(nest.map_structure(array_ops.ones_like,
activations))
return activations
mid_level.enqueue(data)
return strategy.run(tpu_fn)
@def_function.function
def embedding_only(data):
def tpu_fn():
return mid_level.dequeue()
mid_level.enqueue(data)
return strategy.run(tpu_fn)
# Only check core 0.
before_update = self._get_replica_numpy(
embedding_and_set_gradients(data), strategy, 0)
after_update = self._get_replica_numpy(embedding_only(data), strategy, 0)
# For videos table, row 0 and row 1 are looked up 3*num_replicas times as
# they occur 3 times per replica (considering the features 0 and 1 which are
# both looked up in the videos table).
# Feature 0 has ids [0, 0, 1], [0, 1, 1], ... repeated over num_replicas
# Feature 1 has ids [0, 1, 1], [0, 0, 1], ... repeated over num_replicas
# This means that both rows 0 and 1 get a -0.1*3*num_replicas update
# For users table, each row is looked up twice:
# Feature 2 has ids [3, 0, 1, 2], .. repeated over num_replicas
# This means that we get a -0.1*num_replicas update to the third feature.
# In general this means that after the update, if we lookup feature 0 and 1
# the values will be 0.3*num_replicas lower per entry and for feature 2 they
# will be 0.1*num_replicas lower.
# The one issue is that these lookups contain padding values.
# For core 0, we get the first 2 elements of the 4 element batch.
# For feature 0, the indices are [[0, 0], [1, 0], [1, 1]] with max sequence
# length of 2, which means that [0, 1] will be 0s.
# For feature 1, the indices are [[0, 0], [0, 1], [1, 0]] with max sequence
# length of 2, which means that [1, 1] will be 0s.
# For feature 2, the indices are [[0, 0], [1, 0], [1, 1], [1, 2]] with max
# sequence length of 3, which means that [0, 1], [0, 2] will be 0s.
# The following masks represent that so that we only apply the above updates
# to the non-padding rows:
masks = (
np.array([[[1], [0]], [[1], [1]]]),
np.array([[[1], [1]], [[1], [0]]]),
np.array([[[1], [0], [0]], [[1], [1], [1]]]))
per_row_update = (0.3 * num_replicas,
0.3 * num_replicas,
0.1 * num_replicas)
golden = tuple([before - update * mask for before, update, mask in
zip(before_update, per_row_update, masks)])
self.assertAllClose(golden, after_update)
def _compute_gradients_wrt_embedding_table(batch_size,
gradient_wrt_activation,
embedding_table,
feature_indices,
feature_values,
combiner,
max_sequence_length=0):
"""Compute gradients wrt embedding_table.
Args:
batch_size: `int`, batch size.
gradient_wrt_activation: `np.array` with shape `batch_size` by
embedding `dimension`.
embedding_table: `np.array` with shape `vocabulary_size` by embedding
`dimension`.
feature_indices: `indices` as used to construct `SparseTensor`.
feature_values: `values` as used to construct `SparseTensor`.
combiner: `String`, 'mean' or 'sum'.
max_sequence_length: If non-zero, a sequence feature with the given length.
Returns:
Gradients wrt `embedding_table`, an `np.array`s with shape
`batch_size` by `vocabulary_size` by
embedding `dimension`.
Raises:
ValueError: if `combiner` is not one of 'mean' or 'sum'.
"""
if combiner not in ('mean', 'sum'):
raise ValueError('`combiner` must be mean or sum; got {}.'.format(combiner))
grads = []
for i in range(batch_size):
grad = np.zeros_like(embedding_table)
count = 0
for (batch_i, seq_index), vocabulary_id in zip(feature_indices,
feature_values):
if batch_i == i:
count += 1
if max_sequence_length > 0:
if seq_index < max_sequence_length:
grad[vocabulary_id, :] += gradient_wrt_activation[i, seq_index, :]
else:
grad[vocabulary_id, :] += gradient_wrt_activation[i, :]
if combiner == 'mean' and not max_sequence_length:
grad = grad / count
grads.append(grad)
return np.stack(grads)
def _unpack(strategy, per_replica_output):
per_replica_output = strategy.experimental_local_results(per_replica_output)
per_replica_output = array_ops.concat(per_replica_output, axis=0).numpy()
return per_replica_output
def _get_total_loss_tensor(activations):
losses = []
for activation in activations:
losses.append(
math_ops.reduce_mean(
math_ops.reduce_sum(
gen_math_ops.squared_difference(activation, 0), 1)))
total_loss = array_ops.expand_dims_v2(sum(losses), 0)
return total_loss
def _compute_loss(activation_watched, activation_favorited, activation_friends):
watched_loss = np.mean(np.sum(activation_watched**2, axis=1))
if len(activation_favorited.shape) == 2:
favorited_loss = np.mean(np.sum(activation_favorited**2, axis=1))
else:
favorited_loss = np.mean(np.sum(activation_favorited**2, axis=(1, 2)))
if len(activation_friends.shape) == 2:
friends_loss = np.mean(np.sum(activation_friends**2, axis=1))
else:
friends_loss = np.mean(np.sum(activation_friends**2, axis=(1, 2)))
loss = watched_loss + favorited_loss + friends_loss
return loss
def _get_variable(variable):
if isinstance(variable, tpu_embedding_v2.TPUShardedVariable):
return variable.variables[0]
return variable
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
|
#!/usr/bin/env python
#
# Copyright 2012 Dominic Rout
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides empty classes for the twitter data models"""
import urllib, anyjson, sys, pprint
from .twitter_exception import StreamDisconnectException
from functools import partial
from future.utils import iteritems
import oauth2 as oauth
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
class _ApiModel(object):
"""Base class for a model which can be used to contain twitter data.
Models are not generally specified in full but can instead be instantiated with
whatever data is available.
"""
def __init__(self, data = dict(), api = None, **params):
data.update(params)
if api:
api._bless(self)
self.api = api
custom_attrs = self.custom_attrs()
self.creation_attrs = set()
for attribute, value in iteritems(data):
if attribute in custom_attrs:
constructor = custom_attrs[attribute]
setattr(self, attribute, constructor(value))
else:
setattr(self, attribute, value)
self.creation_attrs.add(attribute)
def custom_attrs(self):
"""A function to return a dictionary of 'custom attributes'.
These attributes are handled specially and are usually needed to instantiate
inner models.
"""
return dict()
def to_dict(self):
"""Returns a version of the object containing the same attrs as
were used to create it, suitable for outputting to JSON."""
result = dict()
for k in self.creation_attrs:
v = getattr(self, k)
def transform(v):
if hasattr(v, "to_dict"):
v = v.to_dict()
if hasattr(v, "items"):
v = dict((a, transform(b)) for (a, b) in iteritems(v))
elif hasattr(v, "__iter__"):
v = [transform(w) for w in v]
return v
result[k] = transform(v)
return result
def __repr__(self):
attr_str = [a + ":" + repr(v) for a, v in iteritems(self.to_dict())]
attr_str = [a for a in attr_str if a < 140]
attr_str = ", ".join(attr_str)
return "<%s %s>" % (self.__class__.__name__, attr_str)
class User(_ApiModel):
def custom_attrs(self):
return {"status": Status}
class SearchResult(_ApiModel):
def custom_attrs(self):
return {"statuses": partial(map, partial(Status, api=self.api))}
@property
def results(self):
# For version 1.1 compatibilitiy
return self.statuses
def __iter__(self):
return self.statuses.__iter__()
def __repr__(self):
return "<Search with %d results>" % len(self.statuses)
class Status(_ApiModel):
def __init__(self, data = dict(), api = None, **params):
self.delete = None
super(Status, self).__init__(data, api, **params)
def custom_attrs(self):
return {"user": User,
"entities": Entities,
"delete": lambda x: Status(x["status"])
}
def __repr__(self):
if self.delete:
result = "<Status DELETION %s>" % (self.delete.id_str)
elif hasattr(self, "user"):
result = "<Status @%s '%s'>" % (self.user.screen_name, self.text[:50] + "..." if len(self.text) > 50 else self.text)
elif hasattr(self, "from_user"):
result = "<Status @%s '%s'>" % (self.from_user, self.text[:50] + "..." if len(self.text) > 50 else self.text)
else:
result = "<Status '%s'>" % (self.text[:50] + "..." if len(self.text) > 50 else self.text)
return result.encode("ascii", errors="replace")
class Entities(_ApiModel):
class Media(_ApiModel):
pass
class Url(_ApiModel):
pass
class UserMention(_ApiModel):
pass
class HashTag(_ApiModel):
pass
def custom_attrs(self):
return {
"media": lambda xs: [self.Media(x, self.api) for x in xs],
"urls": lambda xs: [self.Url(x, self.api) for x in xs],
"user_mentions": lambda xs: [self.UserMention(x, self.api) for x in xs],
"hashtags": lambda xs: [self.HashTag(x, self.api) for x in xs]
}
class SuggestionCategory(_ApiModel):
pass
class List(_ApiModel):
def custom_attrs(self):
return {"user": User}
class ResultsPage(_ApiModel):
def custom_attrs(self):
return {
"users": partial(map, partial(User, api=self.api)),
"lists": partial(map, partial(List, api=self.api))}
class LimitStatus(_ApiModel):
pass
class DirectMessage(_ApiModel):
def __repr__(self):
result = "<DirectMessage @%s -> @%s '%s'>" % (self.sender.screen_name,
self.recipient.screen_name,
self.text[:50] + "..." if len(self.text) > 50 else self.text)
return result.encode("ascii", errors="replace")
def custom_attrs(self):
return {
"sender": User,
"recipient": User}
class StreamingCommands(_ApiModel):
"""Superclass for streaming commands.
Purely exists to make life easier for the consumer"""
pass
class StatusDeletion(StreamingCommands):
def __init__(self, data = dict(), api = None, **params):
super(StatusDeletion, self).__init__(data["status"], api, **params)
class GeoDeletion(StreamingCommands):
pass
class Limit(StreamingCommands):
pass
class StatusWithheld(StreamingCommands):
pass
class UserWithheld(StreamingCommands):
pass
class SteamWarning(StreamingCommands):
pass
class Event(StreamingCommands):
pass
def streaming_selector(result, api = None, **params):
"""Selects from the possible models that can be generated by a stream"""
classes = {"delete": StatusDeletion,
"scrub_geo": GeoDeletion,
"limit": Limit,
"status_withheld": StatusWithheld,
"user_withheld": UserWithheld,
"disconnect": StreamDisconnectException.raise_for_response,
"warning": SteamWarning,
"friends": lambda x, y: x,
"friends_str": lambda x, y: x,
"event": Event,
"sender": DirectMessage
}
matched_classes = set(result.keys()) & set(classes.keys())
for key in matched_classes:
return classes[key](result[key], api, **params)
return Status(result, api, **params)
|
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from apscheduler import job
from watcher.applier import rpcapi
from watcher.common import exception
from watcher.common import scheduling
from watcher.db.sqlalchemy import api as sq_api
from watcher.decision_engine.audit import continuous
from watcher.decision_engine.audit import oneshot
from watcher.decision_engine.model.collector import manager
from watcher.decision_engine.strategy.strategies import base as base_strategy
from watcher.decision_engine.strategy.strategies import dummy_strategy
from watcher import notifications
from watcher import objects
from watcher.tests.db import base
from watcher.tests.decision_engine.model import faker_cluster_state as faker
from watcher.tests.objects import utils as obj_utils
class TestOneShotAuditHandler(base.DbTestCase):
def setUp(self):
super(TestOneShotAuditHandler, self).setUp()
p_audit_notifications = mock.patch.object(
notifications, 'audit', autospec=True)
self.m_audit_notifications = p_audit_notifications.start()
self.addCleanup(p_audit_notifications.stop)
self.goal = obj_utils.create_test_goal(
self.context, id=1, name=dummy_strategy.DummyStrategy.get_name())
self.strategy = obj_utils.create_test_strategy(
self.context, name=dummy_strategy.DummyStrategy.get_name(),
goal_id=self.goal.id)
audit_template = obj_utils.create_test_audit_template(
self.context, strategy_id=self.strategy.id)
self.audit = obj_utils.create_test_audit(
self.context,
uuid=uuidutils.generate_uuid(),
goal_id=self.goal.id,
strategy_id=self.strategy.id,
audit_template_id=audit_template.id,
goal=self.goal)
@mock.patch.object(manager.CollectorManager, "get_cluster_model_collector")
@mock.patch.object(base_strategy.BaseStrategy, "compute_model",
mock.Mock(stale=False))
def test_trigger_audit_without_errors(self, m_collector):
m_collector.return_value = faker.FakerModelCollector()
audit_handler = oneshot.OneShotAuditHandler()
audit_handler.execute(self.audit, self.context)
expected_calls = [
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.END),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.END)]
self.assertEqual(
expected_calls,
self.m_audit_notifications.send_action_notification.call_args_list)
@mock.patch.object(base_strategy.BaseStrategy, "do_execute")
@mock.patch.object(manager.CollectorManager, "get_cluster_model_collector")
def test_trigger_audit_with_error(self, m_collector, m_do_execute):
m_collector.return_value = faker.FakerModelCollector()
m_do_execute.side_effect = Exception
audit_handler = oneshot.OneShotAuditHandler()
audit_handler.execute(self.audit, self.context)
expected_calls = [
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.STRATEGY,
priority=objects.fields.NotificationPriority.ERROR,
phase=objects.fields.NotificationPhase.ERROR)]
self.assertEqual(
expected_calls,
self.m_audit_notifications.send_action_notification.call_args_list)
@mock.patch.object(manager.CollectorManager, "get_cluster_model_collector")
@mock.patch.object(base_strategy.BaseStrategy, "compute_model",
mock.Mock(stale=False))
def test_trigger_audit_state_succeeded(self, m_collector):
m_collector.return_value = faker.FakerModelCollector()
audit_handler = oneshot.OneShotAuditHandler()
audit_handler.execute(self.audit, self.context)
audit = objects.audit.Audit.get_by_uuid(self.context, self.audit.uuid)
self.assertEqual(objects.audit.State.SUCCEEDED, audit.state)
expected_calls = [
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.END),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.END)]
self.assertEqual(
expected_calls,
self.m_audit_notifications.send_action_notification.call_args_list)
@mock.patch.object(manager.CollectorManager, "get_cluster_model_collector")
@mock.patch.object(base_strategy.BaseStrategy, "compute_model",
mock.Mock(stale=False))
def test_trigger_audit_send_notification(self, m_collector):
m_collector.return_value = faker.FakerModelCollector()
audit_handler = oneshot.OneShotAuditHandler()
audit_handler.execute(self.audit, self.context)
expected_calls = [
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.END),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, self.audit,
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.END)]
self.assertEqual(
expected_calls,
self.m_audit_notifications.send_action_notification.call_args_list)
class TestAutoTriggerActionPlan(base.DbTestCase):
def setUp(self):
super(TestAutoTriggerActionPlan, self).setUp()
self.goal = obj_utils.create_test_goal(
self.context, id=1, name=dummy_strategy.DummyStrategy.get_name())
self.strategy = obj_utils.create_test_strategy(
self.context, name=dummy_strategy.DummyStrategy.get_name(),
goal_id=self.goal.id)
audit_template = obj_utils.create_test_audit_template(
self.context)
self.audit = obj_utils.create_test_audit(
self.context,
id=0,
uuid=uuidutils.generate_uuid(),
audit_template_id=audit_template.id,
goal_id=self.goal.id,
audit_type=objects.audit.AuditType.CONTINUOUS.value,
goal=self.goal,
auto_trigger=True)
self.ongoing_action_plan = obj_utils.create_test_action_plan(
self.context,
uuid=uuidutils.generate_uuid(),
audit_id=self.audit.id,
strategy_id=self.strategy.id,
audit=self.audit,
strategy=self.strategy,
)
self.recommended_action_plan = obj_utils.create_test_action_plan(
self.context,
uuid=uuidutils.generate_uuid(),
state=objects.action_plan.State.ONGOING,
audit_id=self.audit.id,
strategy_id=self.strategy.id,
audit=self.audit,
strategy=self.strategy,
)
@mock.patch.object(oneshot.OneShotAuditHandler, 'do_execute')
@mock.patch.object(objects.action_plan.ActionPlan, 'list')
def test_trigger_audit_with_actionplan_ongoing(self, mock_list,
mock_do_execute):
mock_list.return_value = [self.ongoing_action_plan]
audit_handler = oneshot.OneShotAuditHandler()
audit_handler.execute(self.audit, self.context)
self.assertFalse(mock_do_execute.called)
@mock.patch.object(rpcapi.ApplierAPI, 'launch_action_plan')
@mock.patch.object(objects.action_plan.ActionPlan, 'list')
@mock.patch.object(objects.audit.Audit, 'get_by_id')
def test_trigger_action_plan_without_ongoing(self, mock_get_by_id,
mock_list, mock_applier):
mock_get_by_id.return_value = self.audit
mock_list.return_value = []
auto_trigger_handler = oneshot.OneShotAuditHandler()
with mock.patch.object(auto_trigger_handler,
'do_schedule') as m_schedule:
m_schedule().uuid = self.recommended_action_plan.uuid
auto_trigger_handler.post_execute(self.audit, mock.MagicMock(),
self.context)
mock_applier.assert_called_once_with(self.context,
self.recommended_action_plan.uuid)
@mock.patch.object(oneshot.OneShotAuditHandler, 'do_execute')
def test_trigger_audit_with_force(self, mock_do_execute):
audit_handler = oneshot.OneShotAuditHandler()
self.audit.force = True
audit_handler.execute(self.audit, self.context)
self.assertTrue(mock_do_execute.called)
class TestContinuousAuditHandler(base.DbTestCase):
def setUp(self):
super(TestContinuousAuditHandler, self).setUp()
p_audit_notifications = mock.patch.object(
notifications, 'audit', autospec=True)
self.m_audit_notifications = p_audit_notifications.start()
self.addCleanup(p_audit_notifications.stop)
self.goal = obj_utils.create_test_goal(
self.context, id=1, name=dummy_strategy.DummyStrategy.get_name())
audit_template = obj_utils.create_test_audit_template(
self.context)
self.audits = [
obj_utils.create_test_audit(
self.context,
id=id_,
name='My Audit {0}'.format(id_),
uuid=uuidutils.generate_uuid(),
audit_template_id=audit_template.id,
goal_id=self.goal.id,
audit_type=objects.audit.AuditType.CONTINUOUS.value,
goal=self.goal,
hostname='hostname1')
for id_ in range(2, 4)]
cfg.CONF.set_override("host", "hostname1")
@mock.patch.object(objects.service.Service, 'list')
@mock.patch.object(sq_api, 'get_engine')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
@mock.patch.object(objects.audit.Audit, 'list')
def test_launch_audits_periodically_with_interval(
self, mock_list, mock_jobs, m_add_job, m_engine, m_service):
audit_handler = continuous.ContinuousAuditHandler()
mock_list.return_value = self.audits
self.audits[0].next_run_time = (datetime.datetime.now() -
datetime.timedelta(seconds=1800))
mock_jobs.return_value = mock.MagicMock()
m_engine.return_value = mock.MagicMock()
m_add_job.return_value = mock.MagicMock()
audit_handler.launch_audits_periodically()
m_service.assert_called()
m_engine.assert_called()
m_add_job.assert_called()
mock_jobs.assert_called()
self.assertIsNotNone(self.audits[0].next_run_time)
self.assertIsNone(self.audits[1].next_run_time)
@mock.patch.object(objects.service.Service, 'list')
@mock.patch.object(sq_api, 'get_engine')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
@mock.patch.object(objects.audit.Audit, 'list')
def test_launch_audits_periodically_with_cron(
self, mock_list, mock_jobs, m_add_job, m_engine, m_service):
audit_handler = continuous.ContinuousAuditHandler()
mock_list.return_value = self.audits
self.audits[0].interval = "*/5 * * * *"
mock_jobs.return_value = mock.MagicMock()
m_engine.return_value = mock.MagicMock()
m_add_job.return_value = mock.MagicMock()
audit_handler.launch_audits_periodically()
m_service.assert_called()
m_engine.assert_called()
m_add_job.assert_called()
mock_jobs.assert_called()
self.assertIsNotNone(self.audits[0].next_run_time)
self.assertIsNone(self.audits[1].next_run_time)
@mock.patch.object(continuous.ContinuousAuditHandler, '_next_cron_time')
@mock.patch.object(objects.service.Service, 'list')
@mock.patch.object(sq_api, 'get_engine')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
@mock.patch.object(objects.audit.Audit, 'list')
def test_launch_audits_periodically_with_invalid_cron(
self, mock_list, mock_jobs, m_add_job, m_engine, m_service,
mock_cron):
audit_handler = continuous.ContinuousAuditHandler()
mock_list.return_value = self.audits
self.audits[0].interval = "*/5* * * *"
mock_cron.side_effect = exception.CronFormatIsInvalid
mock_jobs.return_value = mock.MagicMock()
m_engine.return_value = mock.MagicMock()
m_add_job.return_value = mock.MagicMock()
self.assertRaises(exception.CronFormatIsInvalid,
audit_handler.launch_audits_periodically)
@mock.patch.object(objects.service.Service, 'list')
@mock.patch.object(sq_api, 'get_engine')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
@mock.patch.object(objects.audit.Audit, 'list')
def test_launch_multiply_audits_periodically(self, mock_list,
mock_jobs, m_add_job,
m_engine, m_service):
audit_handler = continuous.ContinuousAuditHandler()
mock_list.return_value = self.audits
mock_jobs.return_value = mock.MagicMock()
m_engine.return_value = mock.MagicMock()
m_service.return_value = mock.MagicMock()
calls = [mock.call(audit_handler.execute_audit, 'interval',
args=[mock.ANY, mock.ANY],
seconds=3600,
name='execute_audit',
next_run_time=mock.ANY) for _ in self.audits]
audit_handler.launch_audits_periodically()
m_add_job.assert_has_calls(calls)
@mock.patch.object(objects.service.Service, 'list')
@mock.patch.object(sq_api, 'get_engine')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
@mock.patch.object(objects.audit.Audit, 'list')
def test_period_audit_not_called_when_deleted(self, mock_list,
mock_jobs, m_add_job,
m_engine, m_service):
audit_handler = continuous.ContinuousAuditHandler()
mock_list.return_value = self.audits
mock_jobs.return_value = mock.MagicMock()
m_service.return_value = mock.MagicMock()
m_engine.return_value = mock.MagicMock()
ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit',
func=audit_handler.execute_audit,
args=(self.audits[0], mock.MagicMock()),
kwargs={}),
job.Job(mock.MagicMock(), name='execute_audit',
func=audit_handler.execute_audit,
args=(self.audits[1], mock.MagicMock()),
kwargs={})
]
mock_jobs.return_value = ap_jobs
audit_handler.launch_audits_periodically()
audit_handler.update_audit_state(self.audits[1],
objects.audit.State.CANCELLED)
audit_handler.update_audit_state(self.audits[0],
objects.audit.State.SUSPENDED)
is_inactive = audit_handler._is_audit_inactive(self.audits[1])
self.assertTrue(is_inactive)
is_inactive = audit_handler._is_audit_inactive(self.audits[0])
self.assertTrue(is_inactive)
@mock.patch.object(objects.service.Service, 'list')
@mock.patch.object(sq_api, 'get_engine')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
@mock.patch.object(objects.audit.AuditStateTransitionManager,
'is_inactive')
@mock.patch.object(continuous.ContinuousAuditHandler, 'execute')
def test_execute_audit_with_interval_no_job(
self,
m_execute,
m_is_inactive,
m_get_jobs,
m_get_engine,
m_service):
audit_handler = continuous.ContinuousAuditHandler()
self.audits[0].next_run_time = (datetime.datetime.now() -
datetime.timedelta(seconds=1800))
m_is_inactive.return_value = True
m_get_jobs.return_value = []
audit_handler.execute_audit(self.audits[0], self.context)
self.assertIsNotNone(self.audits[0].next_run_time)
@mock.patch.object(objects.service.Service, 'list')
@mock.patch.object(sq_api, 'get_engine')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'remove_job')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job')
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
@mock.patch.object(objects.audit.Audit, 'list')
def test_launch_audits_periodically_with_diff_interval(
self, mock_list, mock_jobs, m_add_job, m_remove_job,
m_engine, m_service):
audit_handler = continuous.ContinuousAuditHandler()
mock_list.return_value = self.audits
self.audits[0].next_run_time = (datetime.datetime.now() -
datetime.timedelta(seconds=1800))
m_job1 = mock.MagicMock()
m_job1.name = 'execute_audit'
m_audit = mock.MagicMock()
m_audit.uuid = self.audits[0].uuid
m_audit.interval = 60
m_job1.args = [m_audit]
mock_jobs.return_value = [m_job1]
m_engine.return_value = mock.MagicMock()
m_add_job.return_value = mock.MagicMock()
audit_handler.launch_audits_periodically()
m_service.assert_called()
m_engine.assert_called()
m_add_job.assert_called()
mock_jobs.assert_called()
self.assertIsNotNone(self.audits[0].next_run_time)
self.assertIsNone(self.audits[1].next_run_time)
audit_handler.launch_audits_periodically()
m_remove_job.assert_called()
@mock.patch.object(continuous.ContinuousAuditHandler, 'get_planner',
mock.Mock())
@mock.patch.object(base_strategy.BaseStrategy, "compute_model",
mock.Mock(stale=False))
def test_execute_audit(self):
audit_handler = continuous.ContinuousAuditHandler()
audit_handler.execute_audit(self.audits[0], self.context)
expected_calls = [
mock.call(self.context, self.audits[0],
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, self.audits[0],
action=objects.fields.NotificationAction.STRATEGY,
phase=objects.fields.NotificationPhase.END),
mock.call(self.context, self.audits[0],
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.START),
mock.call(self.context, self.audits[0],
action=objects.fields.NotificationAction.PLANNER,
phase=objects.fields.NotificationPhase.END)]
self.assertEqual(
expected_calls,
self.m_audit_notifications.send_action_notification.call_args_list)
@mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs')
def test_is_audit_inactive(self, mock_jobs):
audit_handler = continuous.ContinuousAuditHandler()
mock_jobs.return_value = mock.MagicMock()
audit_handler._audit_scheduler = mock.MagicMock()
ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit',
func=audit_handler.execute_audit,
args=(self.audits[0], mock.MagicMock()),
kwargs={}),
]
audit_handler.update_audit_state(self.audits[1],
objects.audit.State.CANCELLED)
mock_jobs.return_value = ap_jobs
is_inactive = audit_handler._is_audit_inactive(self.audits[1])
self.assertTrue(is_inactive)
is_inactive = audit_handler._is_audit_inactive(self.audits[0])
self.assertFalse(is_inactive)
def test_check_audit_expired(self):
current = datetime.datetime.utcnow()
# start_time and end_time are None
audit_handler = continuous.ContinuousAuditHandler()
result = audit_handler.check_audit_expired(self.audits[0])
self.assertFalse(result)
self.assertIsNone(self.audits[0].start_time)
self.assertIsNone(self.audits[0].end_time)
# current time < start_time and end_time is None
self.audits[0].start_time = current+datetime.timedelta(days=1)
result = audit_handler.check_audit_expired(self.audits[0])
self.assertTrue(result)
self.assertIsNone(self.audits[0].end_time)
# current time is between start_time and end_time
self.audits[0].start_time = current-datetime.timedelta(days=1)
self.audits[0].end_time = current+datetime.timedelta(days=1)
result = audit_handler.check_audit_expired(self.audits[0])
self.assertFalse(result)
# current time > end_time
self.audits[0].end_time = current-datetime.timedelta(days=1)
result = audit_handler.check_audit_expired(self.audits[0])
self.assertTrue(result)
self.assertEqual(objects.audit.State.SUCCEEDED, self.audits[0].state)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators as utils_validators
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes.volumes \
import forms as project_forms
class ManageVolume(forms.SelfHandlingForm):
identifier = forms.CharField(
max_length=255,
label=_("Identifier"),
help_text=_("Name or other identifier for existing volume"))
id_type = forms.ChoiceField(
label=_("Identifier Type"),
help_text=_("Type of backend device identifier provided"))
host = forms.CharField(
max_length=255,
label=_("Host"),
help_text=_("Cinder host on which the existing volume resides; "
"takes the form: host@backend-name#pool"))
name = forms.CharField(
max_length=255,
label=_("Volume Name"),
required=False,
help_text=_("Volume name to be assigned"))
description = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'class': 'modal-body-fixed-width', 'rows': 4}),
label=_("Description"), required=False)
metadata = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'class': 'modal-body-fixed-width', 'rows': 2}),
label=_("Metadata"), required=False,
help_text=_("Comma-separated key=value pairs"),
validators=[utils_validators.validate_metadata])
volume_type = forms.ChoiceField(
label=_("Volume Type"),
required=False)
availability_zone = forms.ChoiceField(
label=_("Availability Zone"),
required=False)
bootable = forms.BooleanField(
label=_("Bootable"),
required=False,
help_text=_("Specifies that the newly created volume "
"should be marked as bootable"))
def __init__(self, request, *args, **kwargs):
super(ManageVolume, self).__init__(request, *args, **kwargs)
self.fields['id_type'].choices = [("source-name", _("Name"))] + \
[("source-id", _("ID"))]
volume_types = cinder.volume_type_list(request)
self.fields['volume_type'].choices = [("", _("No volume type"))] + \
[(type.name, type.name)
for type in volume_types]
self.fields['availability_zone'].choices = \
project_forms.availability_zones(request)
def handle(self, request, data):
try:
az = data.get('availability_zone')
# assume user enters metadata with "key1=val1,key2=val2"
# convert to dictionary
metadataDict = {}
metadata = data.get('metadata')
if metadata:
metadata.replace(" ", "")
for item in metadata.split(','):
key, value = item.split('=')
metadataDict[key] = value
cinder.volume_manage(request,
host=data['host'],
identifier=data['identifier'],
id_type=data['id_type'],
name=data['name'],
description=data['description'],
volume_type=data['volume_type'],
availability_zone=az,
metadata=metadataDict,
bootable=data['bootable'])
# for success message, use identifier if user does not
# provide a volume name
volume_name = data['name']
if not volume_name:
volume_name = data['identifier']
messages.success(
request,
_('Successfully sent the request to manage volume: %s')
% volume_name)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to manage volume."),
redirect=redirect)
class UnmanageVolume(forms.SelfHandlingForm):
name = forms.CharField(label=_("Volume Name"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.CharField(label=_("Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
volume_id = forms.CharField(label=_("ID"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
def handle(self, request, data):
try:
cinder.volume_unmanage(request, self.initial['volume_id'])
messages.success(
request,
_('Successfully sent the request to unmanage volume: %s')
% data['name'])
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to unmanage volume."),
redirect=redirect)
class CreateVolumeType(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if len(cleaned_name.strip()) == 0:
raise ValidationError(_('Volume type name can not be empty.'))
return cleaned_name
def handle(self, request, data):
try:
# Remove any new lines in the public key
volume_type = cinder.volume_type_create(request,
data['name'])
messages.success(request, _('Successfully created volume type: %s')
% data['name'])
return volume_type
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to create volume type.'),
redirect=redirect)
class UpdateStatus(forms.SelfHandlingForm):
status = forms.ChoiceField(label=_("Status"))
def __init__(self, request, *args, **kwargs):
super(UpdateStatus, self).__init__(request, *args, **kwargs)
# This set of states was culled from cinder's admin_actions.py
self.fields['status'].choices = (
('attaching', _('Attaching')),
('available', _('Available')),
('creating', _('Creating')),
('deleting', _('Deleting')),
('detaching', _('Detaching')),
('error', _('Error')),
('error_deleting', _('Error Deleting')),
('in-use', _('In Use')),
)
def handle(self, request, data):
# Obtain the localized status for including in the message
for choice in self.fields['status'].choices:
if choice[0] == data['status']:
new_status = choice[1]
break
else:
new_status = data['status']
try:
cinder.volume_reset_state(request,
self.initial['volume_id'],
data['status'])
messages.success(request,
_('Successfully updated volume status to "%s".') %
new_status)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to update volume status to "%s".') %
new_status, redirect=redirect)
class CreateQosSpec(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
consumer = forms.ChoiceField(label=_("Consumer"),
choices=cinder.CONSUMER_CHOICES)
def handle(self, request, data):
try:
qos_spec = cinder.qos_spec_create(request,
data['name'],
{'consumer': data['consumer']})
messages.success(request,
_('Successfully created QoS Spec: %s')
% data['name'])
return qos_spec
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to create QoS Spec.'),
redirect=redirect)
|
|
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import (assert_allclose, assert_array_less,
assert_array_equal)
from scipy.interpolate import interp1d
from scipy.spatial.distance import cdist
import pytest
from mne import pick_types, pick_info
from mne.forward._compute_forward import _MAG_FACTOR
from mne.io import (read_raw_fif, read_raw_artemis123, read_raw_ctf, read_info,
RawArray, read_raw_kit)
from mne.io.constants import FIFF
from mne.chpi import (compute_chpi_amplitudes, compute_chpi_locs,
compute_head_pos, _setup_ext_proj,
_chpi_locs_to_times_dig, _compute_good_distances,
extract_chpi_locs_ctf, head_pos_to_trans_rot_t,
read_head_pos, write_head_pos, filter_chpi,
get_chpi_info, _get_hpi_initial_fit,
extract_chpi_locs_kit)
from mne.datasets import testing
from mne.simulation import add_chpi
from mne.transforms import rot_to_quat, _angle_between_quats
from mne.utils import catch_logging, assert_meg_snr, verbose
from mne.viz import plot_head_positions
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
raw_fname = op.join(base_dir, 'test_raw.fif')
data_path = testing.data_path(download=False)
sample_fname = op.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
chpi_fif_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
sss_fif_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_sss.fif')
sss_hpisubt_fname = op.join(data_path, 'SSS', 'test_move_anon_hpisubt_raw.fif')
chpi5_fif_fname = op.join(data_path, 'SSS', 'chpi5_raw.fif')
chpi5_pos_fname = op.join(data_path, 'SSS', 'chpi5_raw_mc.pos')
ctf_chpi_fname = op.join(data_path, 'CTF', 'testdata_ctf_mc.ds')
ctf_chpi_pos_fname = op.join(data_path, 'CTF', 'testdata_ctf_mc.pos')
art_fname = op.join(data_path, 'ARTEMIS123', 'Artemis_Data_2017-04-04' +
'-15h-44m-22s_Motion_Translation-z.bin')
art_mc_fname = op.join(data_path, 'ARTEMIS123', 'Artemis_Data_2017-04-04' +
'-15h-44m-22s_Motion_Translation-z_mc.pos')
con_fname = op.join(data_path, 'KIT', 'MQKIT_125_2sec.con')
mrk_fname = op.join(data_path, 'KIT', 'MQKIT_125.mrk')
elp_fname = op.join(data_path, 'KIT', 'MQKIT_125.elp')
hsp_fname = op.join(data_path, 'KIT', 'MQKIT_125.hsp')
berlin_fname = op.join(data_path, 'KIT', 'data_berlin.con')
@testing.requires_testing_data
def test_chpi_adjust():
"""Test cHPI logging and adjustment."""
raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes')
with catch_logging() as log:
_get_hpi_initial_fit(raw.info, adjust=True, verbose='debug')
get_chpi_info(raw.info, on_missing='raise', verbose='debug')
# Ran MaxFilter (with -list, -v, -movecomp, etc.), and got:
msg = ['HPIFIT: 5 coils digitized in order 5 1 4 3 2',
'HPIFIT: 3 coils accepted: 1 2 4',
'Hpi coil moments (3 5):',
'2.08542e-15 -1.52486e-15 -1.53484e-15',
'2.14516e-15 2.09608e-15 7.30303e-16',
'-3.2318e-16 -4.25666e-16 2.69997e-15',
'5.21717e-16 1.28406e-15 1.95335e-15',
'1.21199e-15 -1.25801e-19 1.18321e-15',
'HPIFIT errors: 0.3, 0.3, 5.3, 0.4, 3.2 mm.',
'HPI consistency of isotrak and hpifit is OK.',
'HP fitting limits: err = 5.0 mm, gval = 0.980.',
'Using 5 HPI coils: 83 143 203 263 323 Hz', # actually came earlier
]
log = log.getvalue().splitlines()
assert set(log) == set(msg), '\n' + '\n'.join(set(msg) - set(log))
# Then took the raw file, did this:
raw.info['dig'][5]['r'][2] += 1.
# And checked the result in MaxFilter, which changed the logging as:
msg = msg[:8] + [
'HPIFIT errors: 0.3, 0.3, 5.3, 999.7, 3.2 mm.',
'Note: HPI coil 3 isotrak is adjusted by 5.3 mm!',
'Note: HPI coil 5 isotrak is adjusted by 3.2 mm!'] + msg[-2:]
with catch_logging() as log:
_get_hpi_initial_fit(raw.info, adjust=True, verbose='debug')
get_chpi_info(raw.info, on_missing='raise', verbose='debug')
log = log.getvalue().splitlines()
assert set(log) == set(msg), '\n' + '\n'.join(set(msg) - set(log))
@testing.requires_testing_data
def test_read_write_head_pos(tmpdir):
"""Test reading and writing head position quaternion parameters."""
temp_name = op.join(str(tmpdir), 'temp.pos')
# This isn't a 100% valid quat matrix but it should be okay for tests
head_pos_rand = np.random.RandomState(0).randn(20, 10)
# This one is valid
head_pos_read = read_head_pos(pos_fname)
for head_pos_orig in (head_pos_rand, head_pos_read):
write_head_pos(temp_name, head_pos_orig)
head_pos = read_head_pos(temp_name)
assert_allclose(head_pos_orig, head_pos, atol=1e-3)
# Degenerate cases
pytest.raises(TypeError, write_head_pos, 0, head_pos_read) # not filename
pytest.raises(ValueError, write_head_pos, temp_name, 'foo') # not array
pytest.raises(ValueError, write_head_pos, temp_name, head_pos_read[:, :9])
pytest.raises(TypeError, read_head_pos, 0)
pytest.raises(IOError, read_head_pos, temp_name + 'foo')
@testing.requires_testing_data
def test_hpi_info(tmpdir):
"""Test getting HPI info."""
temp_name = op.join(str(tmpdir), 'temp_raw.fif')
for fname in (chpi_fif_fname, sss_fif_fname):
raw = read_raw_fif(fname, allow_maxshield='yes').crop(0, 0.1)
assert len(raw.info['hpi_subsystem']) > 0
raw.save(temp_name, overwrite=True)
info = read_info(temp_name)
assert len(info['hpi_subsystem']) == len(raw.info['hpi_subsystem'])
# test get_chpi_info()
info = read_info(chpi_fif_fname)
hpi_freqs, stim_ch_idx, hpi_on_codes = get_chpi_info(info)
assert_allclose(hpi_freqs, np.array([83., 143., 203., 263., 323.]))
assert stim_ch_idx == 378
assert_allclose(hpi_on_codes, np.array([256, 512, 1024, 2048, 4096]))
# test get_chpi_info() if no proper cHPI info is available
info['hpi_subsystem'] = None
info['hpi_meas'] = []
info['hpi_results'] = []
with pytest.raises(ValueError, match='No appropriate cHPI information'):
get_chpi_info(info)
with pytest.warns(RuntimeWarning, match='No appropriate cHPI information'):
get_chpi_info(info, on_missing='warn')
hpi_freqs, stim_ch_idx, hpi_on_codes = get_chpi_info(info,
on_missing='ignore')
assert_array_equal([], hpi_freqs)
assert stim_ch_idx is None
assert_array_equal([], hpi_on_codes)
def _assert_quats(actual, desired, dist_tol=0.003, angle_tol=5., err_rtol=0.5,
gof_rtol=0.001, vel_atol=2e-3): # 2 mm/s
"""Compare estimated cHPI positions."""
__tracebackhide__ = True
trans_est, rot_est, t_est = head_pos_to_trans_rot_t(actual)
trans, rot, t = head_pos_to_trans_rot_t(desired)
quats_est = rot_to_quat(rot_est)
gofs, errs, vels = desired[:, 7:].T
gofs_est, errs_est, vels_est = actual[:, 7:].T
del actual, desired
# maxfilter produces some times that are implausibly large (weird)
if not np.isclose(t[0], t_est[0], atol=1e-1): # within 100 ms
raise AssertionError('Start times not within 100 ms: %0.3f != %0.3f'
% (t[0], t_est[0]))
use_mask = (t >= t_est[0]) & (t <= t_est[-1])
t = t[use_mask]
trans = trans[use_mask]
quats = rot_to_quat(rot)
quats = quats[use_mask]
gofs, errs, vels = gofs[use_mask], errs[use_mask], vels[use_mask]
# double-check our angle function
for q in (quats, quats_est):
angles = _angle_between_quats(q, q)
assert_allclose(angles, 0., atol=1e-5)
# limit translation difference between MF and our estimation
trans_est_interp = interp1d(t_est, trans_est, axis=0)(t)
distances = np.sqrt(np.sum((trans - trans_est_interp) ** 2, axis=1))
assert np.isfinite(distances).all()
arg_worst = np.argmax(distances)
assert distances[arg_worst] <= dist_tol, (
'@ %0.3f seconds: %0.3f > %0.3f mm'
% (t[arg_worst], 1000 * distances[arg_worst], 1000 * dist_tol))
# limit rotation difference between MF and our estimation
# (note that the interpolation will make this slightly worse)
quats_est_interp = interp1d(t_est, quats_est, axis=0)(t)
angles = 180 * _angle_between_quats(quats_est_interp, quats) / np.pi
arg_worst = np.argmax(angles)
assert angles[arg_worst] <= angle_tol, (
'@ %0.3f seconds: %0.3f > %0.3f deg'
% (t[arg_worst], angles[arg_worst], angle_tol))
# error calculation difference
errs_est_interp = interp1d(t_est, errs_est)(t)
assert_allclose(errs_est_interp, errs, rtol=err_rtol, atol=1e-3,
err_msg='err') # 1 mm
# gof calculation difference
gof_est_interp = interp1d(t_est, gofs_est)(t)
assert_allclose(gof_est_interp, gofs, rtol=gof_rtol, atol=1e-7,
err_msg='gof')
# velocity calculation difference
vel_est_interp = interp1d(t_est, vels_est)(t)
assert_allclose(vel_est_interp, vels, atol=vel_atol,
err_msg='velocity')
def _decimate_chpi(raw, decim=4):
"""Decimate raw data (with aliasing) in cHPI-fitting compatible way."""
raw_dec = RawArray(
raw._data[:, ::decim], raw.info, first_samp=raw.first_samp // decim)
raw_dec.info['sfreq'] /= decim
for coil in raw_dec.info['hpi_meas'][0]['hpi_coils']:
if coil['coil_freq'] > raw_dec.info['sfreq']:
coil['coil_freq'] = np.mod(coil['coil_freq'],
raw_dec.info['sfreq'])
if coil['coil_freq'] > raw_dec.info['sfreq'] / 2.:
coil['coil_freq'] = raw_dec.info['sfreq'] - coil['coil_freq']
return raw_dec
# A shortcut method for testing that does both steps
@verbose
def _calculate_chpi_positions(raw, t_step_min=0.01, t_step_max=1.,
t_window='auto', too_close='raise',
dist_limit=0.005, gof_limit=0.98,
ext_order=1, verbose=None):
chpi_amplitudes = compute_chpi_amplitudes(
raw, t_step_min=t_step_min, t_window=t_window,
ext_order=ext_order, verbose=verbose)
chpi_locs = compute_chpi_locs(
raw.info, chpi_amplitudes, t_step_max=t_step_max,
too_close=too_close, verbose=verbose)
head_pos = compute_head_pos(
raw.info, chpi_locs, dist_limit=dist_limit, gof_limit=gof_limit,
verbose=verbose)
return head_pos
@pytest.mark.slowtest
@testing.requires_testing_data
def test_calculate_chpi_positions_vv():
"""Test calculation of cHPI positions."""
# Check to make sure our fits match MF decently
mf_quats = read_head_pos(pos_fname)
raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes')
raw.crop(0, 5).load_data()
# check "auto" t_window estimation at full sampling rate
with catch_logging() as log:
compute_chpi_amplitudes(raw, t_step_min=0.1, t_window='auto',
tmin=0, tmax=2, verbose=True)
assert '83.3 ms' in log.getvalue()
# This is a little hack (aliasing while decimating) to make it much faster
# for testing purposes only. We can relax this later if we find it breaks
# something.
raw_dec = _decimate_chpi(raw, 15)
with catch_logging() as log:
with pytest.warns(RuntimeWarning, match='cannot determine'):
py_quats = _calculate_chpi_positions(raw_dec, t_window=0.2,
verbose='debug')
log = log.getvalue()
assert '\nHPIFIT' in log
assert 'Computing 4385 HPI location guesses' in log
_assert_quats(py_quats, mf_quats, dist_tol=0.001, angle_tol=0.7)
# degenerate conditions
raw_no_chpi = read_raw_fif(sample_fname)
with pytest.raises(ValueError, match='No appropriate cHPI information'):
_calculate_chpi_positions(raw_no_chpi)
raw_bad = raw.copy()
del raw_bad.info['hpi_meas'][0]['hpi_coils'][0]['coil_freq']
with pytest.raises(ValueError, match='No appropriate cHPI information'):
_calculate_chpi_positions(raw_bad)
raw_bad = raw.copy()
for d in raw_bad.info['dig']:
if d['kind'] == FIFF.FIFFV_POINT_HPI:
d['coord_frame'] = FIFF.FIFFV_COORD_UNKNOWN
break
with pytest.raises(RuntimeError, match='coordinate frame incorrect'):
_calculate_chpi_positions(raw_bad)
for d in raw_bad.info['dig']:
if d['kind'] == FIFF.FIFFV_POINT_HPI:
d['coord_frame'] = FIFF.FIFFV_COORD_HEAD
d['r'] = np.ones(3)
raw_bad.crop(0, 1.)
picks = np.concatenate([np.arange(306, len(raw_bad.ch_names)),
pick_types(raw_bad.info, meg=True)[::16]])
raw_bad.pick_channels([raw_bad.ch_names[pick] for pick in picks])
with pytest.warns(RuntimeWarning, match='Discrepancy'):
with catch_logging() as log_file:
_calculate_chpi_positions(raw_bad, t_step_min=1., verbose=True)
# ignore HPI info header and [done] footer
assert '0/5 good HPI fits' in log_file.getvalue()
# half the rate cuts off cHPI coils
raw.info['lowpass'] /= 2.
with pytest.raises(RuntimeError, match='above the'):
_calculate_chpi_positions(raw)
@testing.requires_testing_data
@pytest.mark.slowtest
def test_calculate_chpi_positions_artemis():
"""Test on 5k artemis data."""
raw = read_raw_artemis123(art_fname, preload=True)
mf_quats = read_head_pos(art_mc_fname)
mf_quats[:, 8:] /= 100 # old code errantly had this factor
py_quats = _calculate_chpi_positions(raw, t_step_min=2., verbose='debug')
_assert_quats(
py_quats, mf_quats,
dist_tol=0.001, angle_tol=1., err_rtol=0.7, vel_atol=1e-2)
@testing.requires_testing_data
def test_initial_fit_redo():
"""Test that initial fits can be redone based on moments."""
raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes')
slopes = np.array(
[[c['slopes'] for c in raw.info['hpi_meas'][0]['hpi_coils']]])
amps = np.linalg.norm(slopes, axis=-1)
amps /= slopes.shape[-1]
assert_array_less(amps, 5e-11)
assert_array_less(1e-12, amps)
proj, _, _ = _setup_ext_proj(raw.info, ext_order=1)
chpi_amplitudes = dict(times=np.zeros(1), slopes=slopes, proj=proj)
chpi_locs = compute_chpi_locs(raw.info, chpi_amplitudes)
# check GOF
coil_gof = raw.info['hpi_results'][0]['goodness']
assert_allclose(chpi_locs['gofs'][0], coil_gof, atol=0.3) # XXX not good
# check moment
# XXX our forward and theirs differ by an extra mult by _MAG_FACTOR
coil_moment = raw.info['hpi_results'][0]['moments'] / _MAG_FACTOR
py_moment = chpi_locs['moments'][0]
coil_amp = np.linalg.norm(coil_moment, axis=-1, keepdims=True)
py_amp = np.linalg.norm(py_moment, axis=-1, keepdims=True)
assert_allclose(coil_amp, py_amp, rtol=0.2)
coil_ori = coil_moment / coil_amp
py_ori = py_moment / py_amp
angles = np.rad2deg(np.arccos(np.abs(np.sum(coil_ori * py_ori, axis=1))))
assert_array_less(angles, 20)
# check resulting dev_head_t
head_pos = compute_head_pos(raw.info, chpi_locs)
assert head_pos.shape == (1, 10)
nm_pos = raw.info['dev_head_t']['trans']
dist = 1000 * np.linalg.norm(nm_pos[:3, 3] - head_pos[0, 4:7])
assert 0.1 < dist < 2
angle = np.rad2deg(_angle_between_quats(
rot_to_quat(nm_pos[:3, :3]), head_pos[0, 1:4]))
assert 0.1 < angle < 2
gof = head_pos[0, 7]
assert_allclose(gof, 0.9999, atol=1e-4)
@testing.requires_testing_data
def test_calculate_head_pos_chpi_on_chpi5_in_one_second_steps():
"""Comparing estimated cHPI positions with MF results (one second)."""
# Check to make sure our fits match MF decently
mf_quats = read_head_pos(chpi5_pos_fname)
raw = read_raw_fif(chpi5_fif_fname, allow_maxshield='yes')
# the last two seconds contain a maxfilter problem!
# fiff file timing: 26. to 43. seconds
# maxfilter estimates a wrong head position for interval 16: 41.-42. sec
raw = _decimate_chpi(raw.crop(0., 10.).load_data(), decim=8)
# needs no interpolation, because maxfilter pos files comes with 1 s steps
py_quats = _calculate_chpi_positions(
raw, t_step_min=1.0, t_step_max=1.0, t_window=1.0, verbose='debug')
_assert_quats(py_quats, mf_quats, dist_tol=0.002, angle_tol=1.2,
vel_atol=3e-3) # 3 mm/s
@pytest.mark.slowtest
@testing.requires_testing_data
def test_calculate_head_pos_chpi_on_chpi5_in_shorter_steps():
"""Comparing estimated cHPI positions with MF results (smaller steps)."""
# Check to make sure our fits match MF decently
mf_quats = read_head_pos(chpi5_pos_fname)
raw = read_raw_fif(chpi5_fif_fname, allow_maxshield='yes')
raw = _decimate_chpi(raw.crop(0., 5.).load_data(), decim=8)
with pytest.warns(RuntimeWarning, match='cannot determine'):
py_quats = _calculate_chpi_positions(
raw, t_step_min=0.1, t_step_max=0.1, t_window=0.1, verbose='debug')
# needs interpolation, tolerance must be increased
_assert_quats(py_quats, mf_quats, dist_tol=0.002, angle_tol=1.2,
vel_atol=0.02) # 2 cm/s is not great but probably fine
def test_simulate_calculate_head_pos_chpi():
"""Test calculation of cHPI positions with simulated data."""
# Read info dict from raw FIF file
info = read_info(raw_fname)
# Tune the info structure
chpi_channel = u'STI201'
ncoil = len(info['hpi_results'][0]['order'])
coil_freq = 10 + np.arange(ncoil) * 5
hpi_subsystem = {'event_channel': chpi_channel,
'hpi_coils': [{'event_bits': np.array([256, 0, 256, 256],
dtype=np.int32)},
{'event_bits': np.array([512, 0, 512, 512],
dtype=np.int32)},
{'event_bits':
np.array([1024, 0, 1024, 1024],
dtype=np.int32)},
{'event_bits':
np.array([2048, 0, 2048, 2048],
dtype=np.int32)}],
'ncoil': ncoil}
info['hpi_subsystem'] = hpi_subsystem
for fi, freq in enumerate(coil_freq):
info['hpi_meas'][0]['hpi_coils'][fi]['coil_freq'] = freq
picks = pick_types(info, meg=True, stim=True, eeg=False, exclude=[])
info['sfreq'] = 100. # this will speed it up a lot
info = pick_info(info, picks)
info['chs'][info['ch_names'].index('STI 001')]['ch_name'] = 'STI201'
info._update_redundant()
info['projs'] = []
info_trans = info['dev_head_t']['trans'].copy()
dev_head_pos_ini = np.concatenate([rot_to_quat(info_trans[:3, :3]),
info_trans[:3, 3]])
ez = np.array([0, 0, 1]) # Unit vector in z-direction of head coordinates
# Define some constants
duration = 10 # Time / s
# Quotient of head position sampling frequency
# and raw sampling frequency
head_pos_sfreq_quotient = 0.01
# Round number of head positions to the next integer
S = int(duration * info['sfreq'] * head_pos_sfreq_quotient)
assert S == 10
dz = 0.001 # Shift in z-direction is 0.1mm for each step
dev_head_pos = np.zeros((S, 10))
dev_head_pos[:, 0] = np.arange(S) * info['sfreq'] * head_pos_sfreq_quotient
dev_head_pos[:, 1:4] = dev_head_pos_ini[:3]
dev_head_pos[:, 4:7] = dev_head_pos_ini[3:] + \
np.outer(np.arange(S) * dz, ez)
dev_head_pos[:, 7] = 1.0
# m/s
dev_head_pos[:, 9] = dz / (info['sfreq'] * head_pos_sfreq_quotient)
# Round number of samples to the next integer
raw_data = np.zeros((len(picks), int(duration * info['sfreq'] + 0.5)))
raw = RawArray(raw_data, info)
add_chpi(raw, dev_head_pos)
quats = _calculate_chpi_positions(
raw, t_step_min=raw.info['sfreq'] * head_pos_sfreq_quotient,
t_step_max=raw.info['sfreq'] * head_pos_sfreq_quotient, t_window=1.0)
_assert_quats(quats, dev_head_pos, dist_tol=0.001, angle_tol=1.,
vel_atol=4e-3) # 4 mm/s
def _calculate_chpi_coil_locs(raw, verbose):
"""Wrap to facilitate change diff."""
chpi_amplitudes = compute_chpi_amplitudes(raw, verbose=verbose)
chpi_locs = compute_chpi_locs(raw.info, chpi_amplitudes, verbose=verbose)
return _chpi_locs_to_times_dig(chpi_locs)
def _check_dists(info, cHPI_digs, n_bad=0, bad_low=0.02, bad_high=0.04):
__tracebackhide__ = True
orig = _get_hpi_initial_fit(info)
hpi_coil_distances = cdist(orig, orig)
new_pos = np.array([d['r'] for d in cHPI_digs])
mask, distances = _compute_good_distances(hpi_coil_distances, new_pos)
good_idx = np.where(mask)[0]
assert len(good_idx) >= 3
meds = np.empty(len(orig))
for ii in range(len(orig)):
idx = np.setdiff1d(good_idx, ii)
meds[ii] = np.median(distances[ii][idx])
meds = np.array(meds)
assert_array_less(meds[good_idx], 0.003)
bad_idx = np.where(~mask)[0]
if len(bad_idx):
bads = meds[bad_idx]
assert_array_less(bad_low, bads)
assert_array_less(bads, bad_high)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_calculate_chpi_coil_locs_artemis():
"""Test computing just cHPI locations."""
raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes', preload=True)
# This is a little hack (aliasing while decimating) to make it much faster
# for testing purposes only. We can relax this later if we find it breaks
# something.
raw_dec = _decimate_chpi(raw, 15)
times, cHPI_digs = _calculate_chpi_coil_locs(raw_dec, verbose='debug')
# spot check
assert_allclose(times[0], 9., atol=1e-2)
assert_allclose(cHPI_digs[0][2]['r'],
[-0.01937833, 0.00346804, 0.06331209], atol=1e-3)
assert_allclose(cHPI_digs[0][2]['gof'], 0.9957, atol=1e-3)
assert_allclose(cHPI_digs[0][4]['r'],
[-0.0655, 0.0755, 0.0004], atol=3e-3)
assert_allclose(cHPI_digs[0][4]['gof'], 0.9323, atol=1e-3)
_check_dists(raw.info, cHPI_digs[0], n_bad=1)
# test on 5k artemis data
raw = read_raw_artemis123(art_fname, preload=True)
times, cHPI_digs = _calculate_chpi_coil_locs(raw, verbose='debug')
assert len(np.setdiff1d(times, raw.times + raw.first_time)) == 0
assert_allclose(times[5], 1.5, atol=1e-3)
assert_allclose(cHPI_digs[5][0]['gof'], 0.995, atol=5e-3)
assert_allclose(cHPI_digs[5][0]['r'],
[-0.0157, 0.0655, 0.0018], atol=1e-3)
_check_dists(raw.info, cHPI_digs[5])
coil_amplitudes = compute_chpi_amplitudes(raw)
with pytest.raises(ValueError, match='too_close'):
compute_chpi_locs(raw.info, coil_amplitudes, too_close='foo')
# ensure values are in a reasonable range
amps = np.linalg.norm(coil_amplitudes['slopes'], axis=-1)
amps /= coil_amplitudes['slopes'].shape[-1]
assert amps.shape == (len(coil_amplitudes['times']), 3)
assert_array_less(amps, 1e-11)
assert_array_less(1e-13, amps)
# with nan amplitudes (i.e., cHPI off) it should return an empty array,
# but still one that is 3D
coil_amplitudes['slopes'].fill(np.nan)
chpi_locs = compute_chpi_locs(raw.info, coil_amplitudes)
assert chpi_locs['rrs'].shape == (0, 3, 3)
pos = compute_head_pos(raw.info, chpi_locs)
assert pos.shape == (0, 10)
def assert_suppressed(new, old, suppressed, retained):
"""Assert that some frequencies are suppressed and others aren't."""
__tracebackhide__ = True
from scipy.signal import welch
picks = pick_types(new.info, meg='grad')
sfreq = new.info['sfreq']
new = new.get_data(picks)
old = old.get_data(picks)
f, new = welch(new, sfreq, 'hann', nperseg=1024)
_, old = welch(old, sfreq, 'hann', nperseg=1024)
new = np.median(new, axis=0)
old = np.median(old, axis=0)
for freqs, lim in ((suppressed, (10, 60)), (retained, (-3, 3))):
for freq in freqs:
fidx = np.argmin(np.abs(f - freq))
this_new = np.median(new[fidx])
this_old = np.median(old[fidx])
suppression = -10 * np.log10(this_new / this_old)
assert lim[0] < suppression < lim[1], freq
@testing.requires_testing_data
def test_chpi_subtraction_filter_chpi():
"""Test subtraction of cHPI signals."""
raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes', preload=True)
raw.info['bads'] = ['MEG0111']
raw.del_proj()
raw_orig = raw.copy().crop(0, 16)
with catch_logging() as log:
filter_chpi(raw, include_line=False, t_window=0.2, verbose=True)
log = log.getvalue()
assert 'No average EEG' not in log
assert '5 cHPI' in log
# MaxFilter doesn't do quite as well as our algorithm with the last bit
raw.crop(0, 16)
# remove cHPI status chans
raw_c = read_raw_fif(sss_hpisubt_fname).crop(0, 16).load_data()
raw_c.pick_types(
meg=True, eeg=True, eog=True, ecg=True, stim=True, misc=True)
assert_meg_snr(raw, raw_c, 143, 624)
# cHPI suppressed but not line freqs (or others)
assert_suppressed(raw, raw_orig, np.arange(83, 324, 60), [30, 60, 150])
raw = raw_orig.copy()
with catch_logging() as log:
filter_chpi(raw, include_line=True, t_window=0.2, verbose=True)
log = log.getvalue()
assert '5 cHPI' in log
assert '6 line' in log
# cHPI and line freqs suppressed
suppressed = np.sort(np.concatenate([
np.arange(83, 324, 60), np.arange(60, 301, 60),
]))
assert_suppressed(raw, raw_orig, suppressed, [30, 150])
# No HPI information
raw = read_raw_fif(sample_fname, preload=True)
raw_orig = raw.copy()
assert raw.info['line_freq'] is None
with pytest.raises(RuntimeError, match='line_freq.*consider setting it'):
filter_chpi(raw, t_window=0.2)
raw.info['line_freq'] = 60.
with pytest.raises(ValueError, match='No appropriate cHPI information'):
filter_chpi(raw, t_window=0.2)
# but this is allowed
with catch_logging() as log:
filter_chpi(raw, t_window='auto', allow_line_only=True, verbose=True)
log = log.getvalue()
assert '0 cHPI' in log
assert '1 line' in log
# Our one line freq suppressed but not others
assert_suppressed(raw, raw_orig, [60], [30, 45, 75])
# When MaxFliter downsamples, like::
# $ maxfilter -nosss -ds 2 -f test_move_anon_raw.fif \
# -o test_move_anon_ds2_raw.fif
# it can strip out some values of info, which we emulate here:
raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes')
raw = raw.crop(0, 1).load_data().resample(600., npad='auto')
raw.info['lowpass'] = 200.
del raw.info['maxshield']
del raw.info['hpi_results'][0]['moments']
del raw.info['hpi_subsystem']['event_channel']
with catch_logging() as log:
filter_chpi(raw, t_window='auto', verbose=True)
with pytest.raises(ValueError, match='must be > 0'):
filter_chpi(raw, t_window=-1)
assert '2 cHPI' in log.getvalue()
@testing.requires_testing_data
def test_calculate_head_pos_ctf():
"""Test extracting of cHPI positions from CTF data."""
raw = read_raw_ctf(ctf_chpi_fname)
chpi_locs = extract_chpi_locs_ctf(raw)
quats = compute_head_pos(raw.info, chpi_locs)
mc_quats = read_head_pos(ctf_chpi_pos_fname)
mc_quats[:, 9] /= 10000 # had old factor in there twice somehow...
_assert_quats(quats, mc_quats, dist_tol=0.004, angle_tol=2.5, err_rtol=1.,
vel_atol=7e-3) # 7 mm/s
plot_head_positions(quats, info=raw.info)
raw = read_raw_fif(ctf_fname)
with pytest.raises(RuntimeError, match='Could not find'):
extract_chpi_locs_ctf(raw)
@testing.requires_testing_data
def test_calculate_head_pos_kit():
"""Test calculation of head position using KIT data."""
raw = read_raw_kit(con_fname, mrk_fname, elp_fname, hsp_fname)
assert len(raw.info['hpi_results']) == 1
chpi_locs = extract_chpi_locs_kit(raw)
assert chpi_locs['rrs'].shape == (2, 5, 3)
assert_array_less(chpi_locs['gofs'], 1.)
assert_array_less(0.98, chpi_locs['gofs'])
quats = compute_head_pos(raw.info, chpi_locs)
assert quats.shape == (2, 10)
# plotting works
plot_head_positions(quats, info=raw.info)
raw_berlin = read_raw_kit(berlin_fname)
assert_allclose(raw_berlin.info['dev_head_t']['trans'], np.eye(4))
assert len(raw_berlin.info['hpi_results']) == 0
with pytest.raises(ValueError, match='Invalid value'):
extract_chpi_locs_kit(raw_berlin)
with pytest.raises(RuntimeError, match='not find appropriate'):
extract_chpi_locs_kit(raw_berlin, 'STI 014')
with pytest.raises(RuntimeError, match='no initial cHPI'):
compute_head_pos(raw_berlin.info, chpi_locs)
|
|
from libsbml import *
from peitho.errors_and_parsers.abc_sysbio.abcsysbio.relations import *
import os
import re
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.Writer import Writer
class OdeCUDAWriter(Writer):
def __init__(self, sbmlFileName, modelName="", inputPath="", outputPath=""):
Writer.__init__(self, sbmlFileName, modelName, inputPath, outputPath)
self.out_file=open(os.path.join(outputPath,self.parsedModel.name+".cu"),"w")
def mathMLConditionParserCuda(self, mathMLstring):
"""
Replaces and and or with and_ and or_ in a MathML string.
Returns the string with and and or replaced by and_ and or_
***** args *****
mathMLstring:
A mathMLstring
"""
andString = re.compile("and")
orString = re.compile("or")
mathMLstring = andString.sub("and_", mathMLstring)
mathMLstring = orString.sub("or_", mathMLstring)
return mathMLstring
def write(self):
p=re.compile('\s')
#Write number of parameters and species
self.out_file.write("#define NSPECIES " + str(self.parsedModel.numSpecies) + "\n")
self.out_file.write("#define NPARAM " + str(self.parsedModel.numGlobalParameters) + "\n")
self.out_file.write("#define NREACT " + str(self.parsedModel.numReactions) + "\n")
self.out_file.write("\n")
#The user-defined functions used in the model must be written in the file
numEvents = len(self.parsedModel.listOfEvents)
numRules = len(self.parsedModel.listOfRules)
num = numEvents+numRules
if num>0:
self.out_file.write("#define leq(a,b) a<=b\n")
self.out_file.write("#define neq(a,b) a!=b\n")
self.out_file.write("#define geq(a,b) a>=b\n")
self.out_file.write("#define lt(a,b) a<b\n")
self.out_file.write("#define gt(a,b) a>b\n")
self.out_file.write("#define eq(a,b) a==b\n")
self.out_file.write("#define and_(a,b) a&&b\n")
self.out_file.write("#define or_(a,b) a||b\n")
for i in range(0,len(self.parsedModel.listOfFunctions)):
self.out_file.write("__device__ float "+self.parsedModel.listOfFunctions[i].getId()+"(")
for j in range(0, self.parsedModel.listOfFunctions[i].getNumArguments()):
self.out_file.write("float "+self.parsedModel.functionArgument[i][j])
if(j<( self.parsedModel.listOfFunctions[i].getNumArguments()-1)):
self.out_file.write(",")
self.out_file.write("){\n return ")
self.out_file.write(self.parsedModel.functionBody[i])
self.out_file.write(";\n}\n")
self.out_file.write("\n")
self.out_file.write("struct myFex{\n __device__ void operator()(int *neq, double *t, double *y, double *ydot/*, void *otherData*/)\n {\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n")
numSpecies = len(self.parsedModel.species)
#write rules and events
for i in range(0,len(self.parsedModel.listOfRules)):
if self.parsedModel.listOfRules[i].isRate() == True:
self.out_file.write(" ")
if not(self.parsedModel.ruleVariable[i] in self.parsedModel.speciesId):
self.out_file.write(self.parsedModel.ruleVariable[i])
else:
string = "y["+repr(self.parsedModel.speciesId.index(ruleVariable[i]))+"]"
self.out_file.write(string)
self.out_file.write("=")
string = self.parsedModel.ruleFormula[i]
for q in range(0,len(self.parsedModel.speciesId)):
pq = re.compile(self.parsedModel.speciesId[q])
string=pq.sub('y['+repr(q)+']' ,string)
for q in range(0,len(self.parsedModel.parameterId)):
if (not(self.parsedModel.parameterId[q] in self.parsedModel.ruleVariable)):
flag = False
for r in range(0,len(self.parsedModel.eventVariable)):
if (self.parsedModel.parameterId[q] in self.parsedModel.eventVariable[r]):
flag = True
if flag==False:
pq = re.compile(self.parsedModel.parameterId[q])
string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string)
self.out_file.write(string)
self.out_file.write(";\n")
for i in range(0,len(self.parsedModel.listOfEvents)):
self.out_file.write(" if( ")
#print EventCondition[i]
self.out_file.write(self.mathMLConditionParserCuda(self.parsedModel.eventCondition[i]))
self.out_file.write("){\n")
listOfAssignmentRules = self.parsedModel.listOfEvents[i].getListOfEventAssignments()
for j in range(0, len(listOfAssignmentRules)):
self.out_file.write(" ")
#self.out_file.write("float ")
if not(self.parsedModel.eventVariable[i][j] in self.parsedModel.speciesId):
self.out_file.write(self.parsedModel.eventVariable[i][j])
else:
string = "y["+repr(self.parsedModel.speciesId.index(self.parsedModel.eventVariable[i][j]))+"]"
self.out_file.write(string)
self.out_file.write("=")
string = self.parsedModel.eventFormula[i][j]
for q in range(0,len(self.parsedModel.speciesId)):
pq = re.compile(self.parsedModel.speciesId[q])
string=pq.sub('y['+repr(q)+']' ,string)
for q in range(0,len(self.parsedModel.parameterId)):
if (not(self.parsedModel.parameterId[q] in self.parsedModel.ruleVariable)):
flag = False
for r in range(0,len(self.parsedModel.eventVariable)):
if (self.parsedModel.parameterId[q] in self.parsedModel.eventVariable[r]):
flag = True
if flag==False:
pq = re.compile(self.parsedModel.parameterId[q])
string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string)
self.out_file.write(string)
self.out_file.write(";\n")
self.out_file.write("}\n")
self.out_file.write("\n")
for i in range(0, len(self.parsedModel.listOfRules)):
if self.parsedModel.listOfRules[i].isAssignment():
self.out_file.write(" ")
if not(self.parsedModel.ruleVariable[i] in self.parsedModel.speciesId):
self.out_file.write("float ")
self.out_file.write(self.parsedModel.ruleVariable[i])
else:
string = "y["+repr(self.parsedModel.speciesId.index(self.parsedModel.ruleVariable[i]))+"]"
self.out_file.write(string)
self.out_file.write("=")
string = self.mathMLConditionParserCuda(self.parsedModel.ruleFormula[i])
for q in range(0,len(self.parsedModel.speciesId)):
pq = re.compile(self.parsedModel.speciesId[q])
string=pq.sub("y["+repr(q)+"]" ,string)
for q in range(0,len(self.parsedModel.parameterId)):
if (not(self.parsedModel.parameterId[q] in self.parsedModel.ruleVariable)):
flag = False
for r in range(0,len(self.parsedModel.eventVariable)):
if (self.parsedModel.parameterId[q] in self.parsedModel.eventVariable[r]):
flag = True
if flag==False:
pq = re.compile(self.parsedModel.parameterId[q])
x = "tex2D(param_tex,"+repr(q)+",tid)"
string=pq.sub(x,string)
self.out_file.write(string)
self.out_file.write(";\n")
self.out_file.write("\n\n")
#Write the derivatives
for i in range(self.parsedModel.numSpecies-1,-1, -1):
if (self.parsedModel.species[i].getConstant() == False and self.parsedModel.species[i].getBoundaryCondition() == False):
self.out_file.write(" ydot["+repr(i)+"]=")
if (self.parsedModel.species[i].isSetCompartment() == True):
self.out_file.write("(")
reactionWritten = False
for k in range(0,self.parsedModel.numReactions):
if(not self.parsedModel.stoichiometricMatrix[i][k]==0.0):
if(reactionWritten and self.parsedModel.stoichiometricMatrix[i][k]>0.0):
self.out_file.write("+")
reactionWritten = True
self.out_file.write(repr(self.parsedModel.stoichiometricMatrix[i][k]))
self.out_file.write("*(")
#test if reaction has a positive sign
#if(reactionWritten):
# if(stoichiometricMatrix[i][k]>0.0):
# self.out_file.write("+")
# else:
# self.out_file.write("-")
#reactionWritten = True
#test if reaction is 1.0; then omit multiplication term
#if(abs(stoichiometricMatrix[i][k]) == 1.0):
# self.out_file.write("(")
#else:
# self.out_file.write(repr(abs(stoichiometricMatrix[i][k])))
# self.out_file.write("*(")
string = self.parsedModel.kineticLaw[k]
for q in range(len(self.parsedModel.speciesId)-1,-1,-1):
pq = re.compile(self.parsedModel.speciesId[q])
string=pq.sub('y['+repr(q)+']' ,string)
for q in range(0,len(self.parsedModel.parameterId)):
if (not(self.parsedModel.parameterId[q] in self.parsedModel.ruleVariable)):
flag = False
for r in range(0,len(self.parsedModel.eventVariable)):
if (self.parsedModel.parameterId[q] in self.parsedModel.eventVariable[r]):
flag = True
if flag==False:
pq = re.compile(self.parsedModel.parameterId[q])
string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string)
string=p.sub('',string)
self.out_file.write(string)
self.out_file.write(")")
if (self.parsedModel.species[i].isSetCompartment() == True):
self.out_file.write(")/")
mySpeciesCompartment = self.parsedModel.species[i].getCompartment()
for j in range(0, len(self.parsedModel.listOfParameter)):
if (self.parsedModel.listOfParameter[j].getId() == mySpeciesCompartment):
if (not(self.parsedModel.parameterId[j] in self.parsedModel.ruleVariable)):
flag = False
for r in range(0,len(self.parsedModel.eventVariable)):
if (self.parsedModel.parameterId[j] in self.parsedModel.eventVariable[r]):
flag = True
if flag==False:
self.out_file.write("tex2D(param_tex,"+repr(j)+",tid)"+";")
break
else:
self.out_file.write(self.parsedModel.parameterId[j]+";")
break
else:
self.out_file.write(";")
self.out_file.write("\n")
self.out_file.write("\n }")
self.out_file.write("\n};\n\n\n struct myJex{\n __device__ void operator()(int *neq, double *t, double *y, int ml, int mu, double *pd, int nrowpd/*, void *otherData*/){\n return; \n }\n};")
|
|
#! /usr/bin/env python
##################################################################################################################
# attention.py
#
# Jackie Lee
# jackylee@media.mit.edu
#
# Affective Computing Group, MIT Media Laboratory
# Special Thanks to Heymian Wong, Jon Wetzel
# Last modified on Aug. 9, 2011
#
# Requirement:
# MacOSX 10.6.4
# Using OpenCV 2.1
# (you could make your life easier by install this-
# http://www.cs.colostate.edu/facerec/algorithms/support/OpenCV2.1_rev3291_MacOS10.6.pkg)
#
##################################################################################################################
import sys
import time
import os
import cv #try this first to see if your OpenCV is ok to go.
import socket
import serial
IPADDR = '10.10.100.254'
PORTNUM = 8899
PACKETDATA='220055'.decode('hex')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((IPADDR, PORTNUM))
s.send(PACKETDATA)
s.close()
ser = serial.Serial('/dev/tty.usbmodem1421', 9600) # Establish the connection on a specific port on Arduino
### Face detection constants
#Face movement constants
CAPTURING = 0 ## set 1 to enable saving JPGs into img/
FACE_MIN_SIZE = 70 ## the bigger, the more fps
FACE_MAX_MOVEMENT = 40
FACE_MAX_LIFE = 1
FACE_LR_MOVE_THRESH = 2
FACE_UD_MOVE_THRESH = 1
FACE_LR_STATE_CHANGE_THRESH = 1
FACE_UD_STATE_CHANGE_THRESH = 1
FACE_ALTERNATION_THRESH = 2
FACE_ONE_DIMENSION_THRESH = 2
FACE_STILL_THRESHOLD = 2
FACE_ALTERNATIONS_EXPIRE = 2
globt = 1
#light patch sales things
arrive = 10;
early = 150;
medium = 250;
late = 300;
mood = 0
#Face movement enumeration
OTHER = 0
STILL = 1
LEFT = 2
RIGHT = 3
UP = 4
DOWN = 5
i = 0
m = 0
#Color donstant definitions
RED = cv.RGB(255,0,0)
GREEN = cv.RGB (0,220,0)
BLUE = cv.RGB (0,0,255)
YELLOW = cv.RGB(255,255,0);
ORANGE = cv.RGB(255,127,0);
MAGENTA = cv.RGB(255,0,255);
# other constants
scale = 1
cascade = None
storage = cv.CreateMemStorage(0)
cascade_name = "xml/haarcascade_frontalface_alt.xml"
min_size = (FACE_MIN_SIZE,FACE_MIN_SIZE)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = cv.CV_HAAR_DO_CANNY_PRUNING
age = 0
age1 = 0
age2 = 0
age3 = 0
age4 = 0
age5 = 0
ageavg = 0
agerate = 0
metric = 0
cmet = ""
att = 5
trackedFaces = []
IPL_DEPTH_8U = 8
gray = 0
small_img = 0
osName = os.name
fname_temp=""
### end of Face detection constants
### save as JPG for every 2 seconds
def saveAsJPG(img):
global fname_temp
lt = time.localtime(time.time())
if ((lt[5] %2) == 0):
fname = "%04d%02d%02d%02d%02d%02d" % (lt[0], lt[1], lt[2], lt[3], lt[4], lt[5])
if (fname != fname_temp):
print "frame saved at " + fname
cv.SaveImage("img/"+fname+".jpg",img)
fname_temp = fname
### end save as JPG
########## Face Class #############
class Face:
def __init__(self,age,width,height,xpt, ypt,life, att):
self.age = age;
self.width = width;
self.height = height;
self.xpt = xpt;
self.ypt = ypt;
self.life = life;
self.att = att;
#self.printFace();
self.updateEyes();
self.updateMouth();
self.state = OTHER;
self.lastState = self.state;
self.alternations = 0;
self.faceStill = 0;
self.stills = 0;
self.lefts = 0;
self.rights = 0
self.ups = 0;
self.downs = 0;
def updateFace(self, width, height, xpt, ypt,att):
turnDir = self.getTurnDir(self.xpt, xpt, self.ypt, ypt, self.width, width, self.height, height)
self.updateMoveState(turnDir)
#print turnDir
self.age = self.age + 1;
global age1
age1 = self.age;
self.width = width;
self.height = height;
self.xpt = xpt;
self.ypt = ypt;
self.life = 0;
self.updateEyes();
self.updateMouth();
#self.att=self.age;
def updateEyes(self):
self.eyeTopline = self.ypt + ((self.height*1)/3);
self.eyeBotline = self.ypt + ((self.height*1)/2);
self.eyeLeft1 = (self.xpt + (self.width/5),self.eyeTopline);
self.eyeLeft2 = (self.xpt + ((self.width*3)/8), self.eyeBotline);
self.eyeRight1 = (self.xpt + ((self.width*5)/8),self.eyeTopline);
self.eyeRight2 = (self.xpt + ((self.width*4)/5),self.eyeBotline);
def updateMouth(self):
self.mouthTopline = self.ypt + ((self.height*2)/3);
self.mouthBotline = self.ypt + self.height;
self.mouthTopLeft = (self.xpt + self.width/5, self.mouthTopline);
self.mouthBotRight = (self.xpt + (self.width*4)/5, self.mouthBotline);
def isShaking(self):
if (self.alternations < FACE_ALTERNATION_THRESH):
return False
else:
self.att-=1 #saying no
global mood
mood = self.att
if ((self.state == LEFT) or (self.state == RIGHT)):
return True
else:
return False
def isNodding(self):
if (self.alternations < FACE_ALTERNATION_THRESH):
return False
else:
self.att+=1 #saying yes
mood = self.att
if ((self.state == UP) or (self.state ==DOWN)):
return True
else:
return False
def isStill(self):
return (self.faceStill < FACE_STILL_THRESHOLD)
def updateMoveState(self, turnDir):
if (turnDir == OTHER):
self.faceStill += 1
self.state = OTHER
elif (turnDir == STILL):
if (self.state != STILL):
lastState = self.state
else:
self.faceStill = 0
self.state = STILL
self.stills += 1
if (self.stills > FACE_ALTERNATIONS_EXPIRE):
self.alternations = 0
self.stills = 0
elif (turnDir == RIGHT):
self.faceStill += 1
if (self.state == OTHER):
self.rights += 1
if (self.rights > FACE_LR_STATE_CHANGE_THRESH):
self.state = RIGHT
elif (self.state == RIGHT):
self.rights += 1
elif (self.state == LEFT):
self.rights += 1
if (self.rights > FACE_LR_STATE_CHANGE_THRESH):
self.state = RIGHT;
self.resetNonAltCounts()
self.alternations += 1
elif ((self.state == UP) or (self.state == DOWN)):
self.state = OTHER
self.resetCounts()
elif(self.state == STILL):
if (self.lastState == LEFT):
self.alternations += 1
self.state = RIGHT
elif (turnDir ==LEFT):
self.faceStill += 1
if (self.state == OTHER):
self.lefts += 1
if (self.lefts > FACE_LR_STATE_CHANGE_THRESH):
self.state = LEFT;
elif (self.state == RIGHT):
self.lefts += 1
if(self.lefts > FACE_LR_STATE_CHANGE_THRESH):
self.state = LEFT
self.resetNonAltCounts()
self.alternations += 1
elif (self.state == LEFT):
self.lefts += 1
elif ((self.state ==UP) or (self.state == DOWN)):
self.state = OTHER
self.resetCounts()
elif (self.state == STILL):
if (self.lastState == RIGHT):
self.alternations += 1
self.state = LEFT
elif (turnDir == UP):
self.faceStill += 1
if (self.state == OTHER):
self.ups += 1
if (self.ups > FACE_UD_STATE_CHANGE_THRESH):
self.state = UP
elif (self.state == DOWN):
self.ups += 1
if (self.ups > FACE_UD_STATE_CHANGE_THRESH):
self.state = UP
self.resetNonAltCounts()
self.alternations += 1
elif (self.state == UP):
self.ups += 1
elif ((self.state == LEFT) or (self.state == RIGHT)):
self.state = OTHER
self.resetCounts()
elif (self.state == STILL):
if (self.lastState == DOWN):
self.alternations += 1
self.state = UP
elif (turnDir == DOWN):
self.faceStill += 1
if (self.state == OTHER):
self.downs += 1
if (self.downs > FACE_UD_STATE_CHANGE_THRESH):
self.state = DOWN
elif (self.state == UP):
self.downs += 1
if (self.downs > FACE_UD_STATE_CHANGE_THRESH):
self.state = DOWN
self.resetNonAltCounts()
self.alternations += 1
elif (self.state == DOWN):
self.downs += 1
elif ((self.state == LEFT) or (self.state == RIGHT)):
self.state = OTHER
self.resetCounts()
elif (self.state == STILL):
if (self.lastState == UP):
self.altnerations += 1
self.state = DOWN
def resetCounts(self):
self.others = 0
self.stills = 0
self.rights = 0
self.lefts = 0
self.ups = 0
self.downs = 0
self.alternations = 0
def resetNonAltCounts(self):
self.others = 0
self.stills = 0
self.rights = 0
self.lefts = 0
self.ups = 0
self.downs = 0
def getTurnDir(self, old_xpt, new_xpt, old_ypt, new_ypt, old_width, new_width, old_height, new_height):
old_x = (int (old_xpt + (old_width/2)))
new_x = (int (new_xpt + (new_width/2)))
old_y = (int (old_ypt + (old_height/2)))
new_y = (int (new_ypt + (new_height/2)))
xdir = STILL
ydir = STILL
if (new_x - old_x > FACE_LR_MOVE_THRESH):
xdir = RIGHT
if (new_x - old_x < -FACE_LR_MOVE_THRESH):
xdir = LEFT
if (new_y - old_y > FACE_UD_MOVE_THRESH):
ydir = DOWN
if (new_y - old_y < -FACE_UD_MOVE_THRESH):
ydir = UP
if (ydir == xdir):
return STILL
else:
if ((ydir != STILL) and (xdir !=STILL)):
if ((abs(new_x - old_x)) > (abs(new_y - old_y)/2)):
return xdir
else:
if (((abs(new_y - old_y)) - (abs(new_x - old_x))) > FACE_ONE_DIMENSION_THRESH):
return ydir
else:
return OTHER;
else:
if (xdir == STILL):
return ydir
else:
return xdir
def isTooOld(self):
if (self.life > FACE_MAX_LIFE):
return True;
else:
return False;
def updateLife(self):
self.life = self.life+1;
return self.life;
########## end of Face Class #############
#### Detect faces ######################
def detect_and_draw(img ,cascade):
t = cv.GetTickCount() ## start counter
cv.CvtColor( img, gray, cv.CV_BGR2GRAY )
cv.Resize( gray, small_img, cv.CV_INTER_LINEAR )
#Ages all trackedFaces
for f in trackedFaces:
f.updateLife()
#f.printFace();
#Remove expired faces
for f in trackedFaces:
if (f.isTooOld()):
trackedFaces.remove(f)
faces = cv.HaarDetectObjects( small_img, cascade, storage, haar_scale, min_neighbors, haar_flags, min_size )
drawline = 0
if faces:
#found a face
for ((x, y, w, h), n) in faces:
matchedFace = False;
pt1 = ( int(x*image_scale), int(y*image_scale))
pt2 = ( int((x+w)*image_scale), int((y+h)*image_scale) )
pt3 = ( int(x*image_scale)+int(((x+w)*image_scale-x*image_scale)/3), int(y*image_scale))
pt4 = ( int((x+w)*image_scale)-int(((x+w)*image_scale-x*image_scale)/3), int((y*image_scale)+int(((y+h)*image_scale)-int(y*image_scale))/3) )
#check if there are trackedFaces
if (len(trackedFaces) > 0):
#each face being tracked
for f in trackedFaces:
#the face is found (small movement) RIGHT ON THE MONEY!!!
if ((abs(f.xpt - pt1[0]) < FACE_MAX_MOVEMENT) and (abs(f.ypt - pt1[1]) < FACE_MAX_MOVEMENT)):
matchedFace = True;
f.updateFace(int(w*image_scale), int(h*image_scale), pt1[0], pt1[1], att);
mf = f;
break;
#if face not found, add a new face
if (matchedFace == False):
f = Face(0,int(w*image_scale), int(h*image_scale), pt1[0], pt1[1],0, att);
trackedFaces.append(f);
mf = f;
#No tracked faces: adding one
else:
f = Face(0,int (w*image_scale), int (h*image_scale), pt1[0], pt1[1],0, att);
trackedFaces.append(f);
mf = f;
#where to draw face and properties
if (mf.age > 5):
#draw attention line
lnpt1 = (int (mf.xpt*scale), int(mf.ypt*scale-5)-5)
if (mf.age > mf.width):
lnpt2 = (int (mf.xpt*scale+mf.width), int(mf.ypt*scale-5))
else:
lnpt2 = (int (mf.xpt*scale+mf.age), int(mf.ypt*scale-5))
cv.Rectangle(img, lnpt1, lnpt2, RED, 4, 8, 0) ## drawing bolded attention line
### draw eyes
cv.Rectangle(img, mf.eyeLeft1, mf.eyeLeft2, MAGENTA, 3,8,0)
cv.Rectangle(img, mf.eyeRight1, mf.eyeRight2, MAGENTA, 3,8,0)
#
### draw mouth
cv.Rectangle(img, mf.mouthTopLeft, mf.mouthBotRight, ORANGE, 3, 8, 0)
#
### draw face
cv.Rectangle( img, pt1, pt2, getColor(mf), 3, 8, 0 )
#cv.Rectangle( img, pt3, pt4, MAGENTA, 1, 8, 0 ) #forehead
drawline = mf.age
if(CAPTURING): saveAsJPG(img)
if (osName == "nt"): cv.Flip(img, img, 0)
cv.ShowImage ('Camera', img)
t = cv.GetTickCount() - t ## counter for FPS
#print "%i fps." % (cv.GetTickFrequency()*1000000./t) ## print FPS
global globt
globt= t;
#### end of Detect faces ######################
def getColor(mf):
if (mf.isNodding()): return GREEN
elif (mf.isShaking()): return RED
elif (mf.isStill()): return BLUE
else: return YELLOW
######### main program ############
if __name__ == '__main__':
#create window and move to screen position
cv.NamedWindow ('Camera', cv.CV_WINDOW_AUTOSIZE)
if len (sys.argv) == 1:
# no argument on the command line, try to use the camera
capture = cv.CreateCameraCapture (0)
#
### check that capture device is OK
if not capture:
print "Error opening capture device"
sys.exit (1)
#
### capture the 1st frame to get some propertie on it
frame = cv.QueryFrame (capture)
#
### get size of the frame
frame_size = cv.GetSize (frame)
gray = cv.CreateImage( frame_size, 8, 1 )
small_img = cv.CreateImage( ( int( frame_size[0]/image_scale),int( frame_size[1]/image_scale)), 8, 1 )
cascade = cv.Load( cascade_name)
#
while 1: # do forever
i += 1
# capture the current image
frame = cv.QueryFrame (capture)
if frame is None:
# no image captured... end the processing
break
#
### check OS
if (osName == "nt"):
cv.Flip(frame, frame, 0)
else:
cv.Flip(frame, None, 1)
#
### detecting faces here
detect_and_draw(frame, cascade)
if i%30 == 0:
if (age1>age2): m+=1
else: m-=1
agerate = (age1 - 60 - age5)/4
age5 = age4
age4 = age3
age3 = age2
age2 = age1
ageavg = (age1+age2+age3+age4+age5)/5
metric = ageavg + agerate
print str(age1) + '\t' + str(ageavg) + '\t' + str(agerate) + '\t' + str(metric) + '\t' + str(mood)
if (mood<0): cmet = "negative"
elif (mood>0): cmet = "positive"
# addressing information of target
IPADDR = '10.10.100.254'
PORTNUM = 8899
# enter the data content of the UDP packet as hex
if (age1 < arrive): color = "purple"
elif ((age1 > arrive) & (age1 <= early)):
if (cmet == "positive"):
color = "blue"
age1 = age1+30
elif (cmet== "negative"):
color = "red"
age1 = age1-10
else: color = "yellow"
elif ((age1 > early) & (age1 <= medium)):
if (cmet == "positive"):
color = "green"
age1 = age1+30
elif (cmet == "negative"):
color = "red"
age1 = age1-10
else: color = "blue"
elif ((age1 > medium) & (age1 <= late)):
if (cmet == "positive"):
color == "green"
age1 = age1+30
elif (cmet == "negative"):
color = "red"
age1 = age1-10
else: color = "green"
else: color = "purple"
if (color=="purple"):PACKETDATA='20bf55'.decode('hex')
elif (color=="blue"):PACKETDATA='20ff55'.decode('hex')
elif (color=="yellow"):PACKETDATA='207f55'.decode('hex')
elif (color=="green"):PACKETDATA='206f55'.decode('hex')
elif (color== "orange"):PACKETDATA='208f55'.decode('hex')
elif (color== "red"):PACKETDATA='209f55'.decode('hex')
else: PACKETDATA='209f55'.decode('hex')
# initialize a socket, think of it as a cable
# SOCK_DGRAM specifies that this is UDP
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
# connect the socket, think of it as connecting the cable to the address location
s.connect((IPADDR, PORTNUM))
# send the command
s.send(PACKETDATA)
# close the socket
s.close()
time.sleep(1.0)
### handle key events
k = cv.WaitKey (5)
if k % 0x100 == 27:
# user has press the ESC key, so exit
IPADDR = '10.10.100.254'
PORTNUM = 8899
PACKETDATA='210055'.decode('hex')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((IPADDR, PORTNUM))
s.send(PACKETDATA)
s.close()
cv.DestroyWindow('Camera');
break
|
|
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from cinder.api.contrib import services
from cinder.api import extensions
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import timeutils
from cinder import policy
from cinder import test
from cinder.tests.api import fakes
from datetime import datetime
fake_services_list = [{'binary': 'cinder-scheduler',
'host': 'host1',
'availability_zone': 'cinder',
'id': 1,
'disabled': True,
'updated_at': datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'host': 'host1',
'availability_zone': 'cinder',
'id': 2,
'disabled': True,
'updated_at': datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test2'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'availability_zone': 'cinder',
'id': 3,
'disabled': False,
'updated_at': datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'availability_zone': 'cinder',
'id': 4,
'disabled': True,
'updated_at': datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': 'test4'},
]
class FakeRequest(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {}
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
class FakeRequestWithService(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"service": "cinder-volume"}
class FakeRequestWithBinary(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"binary": "cinder-volume"}
class FakeRequestWithHost(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1"}
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
class FakeRequestWithHostService(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1", "service": "cinder-volume"}
class FakeRequestWithHostBinary(object):
environ = {"cinder.context": context.get_admin_context()}
GET = {"host": "host1", "binary": "cinder-volume"}
def fake_service_get_all(context):
return fake_services_list
def fake_service_get_by_host_binary(context, host, binary):
for service in fake_services_list:
if service['host'] == host and service['binary'] == binary:
return service
return None
def fake_service_get_by_id(value):
for service in fake_services_list:
if service['id'] == value:
return service
return None
def fake_service_update(context, service_id, values):
service = fake_service_get_by_id(service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
else:
{'host': 'host1', 'service': 'cinder-volume',
'disabled': values['disabled']}
def fake_policy_enforce(context, action, target):
pass
def fake_utcnow():
return datetime(2012, 10, 29, 13, 42, 11)
class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
self.stubs.Set(db, "service_get_all", fake_service_get_all)
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
fake_service_get_by_host_binary)
self.stubs.Set(db, "service_update", fake_service_update)
self.stubs.Set(policy, "enforce", fake_policy_enforce)
self.context = context.get_admin_context()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = services.ServiceController(self.ext_mgr)
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(
2012, 10, 29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime(
2012, 9, 19, 6, 55, 34)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime(
2012, 9, 18, 8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_detail(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(
2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(
2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime(
2012, 9, 19, 6, 55, 34),
'disabled_reason': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime(
2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host(self):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10,
29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_host(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10,
29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18,
8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_binary(self):
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18,
8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_binary(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_host_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host_binary(self):
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_host_binary(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_enable_with_service_key(self):
body = {'host': 'host1', 'service': 'cinder-volume'}
req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable')
res_dict = self.controller.update(req, "enable", body)
self.assertEqual(res_dict['status'], 'enabled')
def test_services_enable_with_binary_key(self):
body = {'host': 'host1', 'binary': 'cinder-volume'}
req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable')
res_dict = self.controller.update(req, "enable", body)
self.assertEqual(res_dict['status'], 'enabled')
def test_services_disable_with_service_key(self):
req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable')
body = {'host': 'host1', 'service': 'cinder-volume'}
res_dict = self.controller.update(req, "disable", body)
self.assertEqual(res_dict['status'], 'disabled')
def test_services_disable_with_binary_key(self):
req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable')
body = {'host': 'host1', 'binary': 'cinder-volume'}
res_dict = self.controller.update(req, "disable", body)
self.assertEqual(res_dict['status'], 'disabled')
def test_services_disable_log_reason(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = (
fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason'))
body = {'host': 'host1',
'binary': 'cinder-scheduler',
'disabled_reason': 'test-reason',
}
res_dict = self.controller.update(req, "disable-log-reason", body)
self.assertEqual(res_dict['status'], 'disabled')
self.assertEqual(res_dict['disabled_reason'], 'test-reason')
def test_services_disable_log_reason_none(self):
self.ext_mgr.extensions['os-extended-services'] = True
self.controller = services.ServiceController(self.ext_mgr)
req = (
fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason'))
body = {'host': 'host1',
'binary': 'cinder-scheduler',
'disabled_reason': None,
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, "disable-log-reason", body)
def test_invalid_reason_field(self):
reason = ' '
self.assertFalse(self.controller._is_valid_as_reason(reason))
reason = 'a' * 256
self.assertFalse(self.controller._is_valid_as_reason(reason))
reason = 'it\'s a valid reason.'
self.assertTrue(self.controller._is_valid_as_reason(reason))
reason = None
self.assertFalse(self.controller._is_valid_as_reason(reason))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry for layers and their parameters/variables.
This represents the collection of all layers in the approximate Fisher
information matrix to which a particular FisherBlock may belong. That is, we
might have several layer collections for one TF graph (if we have multiple K-FAC
optimizers being used, for example.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from collections import OrderedDict
from functools import partial
import math
import six
from tensorflow.contrib.kfac.python.ops import fisher_blocks as fb
from tensorflow.contrib.kfac.python.ops import loss_functions as lf
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
# Names for various approximations that can be requested for Fisher blocks.
APPROX_KRONECKER_NAME = "kron"
APPROX_DIAGONAL_NAME = "diagonal"
APPROX_FULL_NAME = "full"
_GENERIC_APPROX_TO_BLOCK_TYPES = {
APPROX_FULL_NAME: fb.FullFB,
APPROX_DIAGONAL_NAME: fb.NaiveDiagonalFB,
}
_FULLY_CONNECTED_APPROX_TO_BLOCK_TYPES = {
APPROX_KRONECKER_NAME: fb.FullyConnectedKFACBasicFB,
APPROX_DIAGONAL_NAME: fb.FullyConnectedDiagonalFB,
}
_CONV2D_APPROX_TO_BLOCK_TYPES = {
APPROX_KRONECKER_NAME: fb.ConvKFCBasicFB,
APPROX_DIAGONAL_NAME: fb.ConvDiagonalFB,
}
APPROX_KRONECKER_INDEP_NAME = "kron_indep"
APPROX_KRONECKER_SERIES_1_NAME = "kron_series_1"
APPROX_KRONECKER_SERIES_2_NAME = "kron_series_2"
_FULLY_CONNECTED_MULTI_APPROX_TO_BLOCK_TYPES = {
APPROX_KRONECKER_INDEP_NAME: fb.FullyConnectedMultiIndepFB,
APPROX_KRONECKER_SERIES_1_NAME: partial(fb.FullyConnectedSeriesFB,
option=1),
APPROX_KRONECKER_SERIES_2_NAME: partial(fb.FullyConnectedSeriesFB,
option=2)
}
# Possible value for 'reuse' keyword argument. Sets 'reuse' to
# tf.get_variable_scope().reuse.
VARIABLE_SCOPE = "VARIABLE_SCOPE"
def ensure_sequence(obj):
"""If `obj` isn't a tuple or list, return a tuple containing `obj`."""
if isinstance(obj, (tuple, list)):
return obj
else:
return (obj,)
class LayerParametersDict(OrderedDict):
"""An OrderedDict where keys are Tensors or tuples of Tensors.
Ensures that no Tensor is associated with two different keys.
"""
def __init__(self, *args, **kwargs):
self._tensors = set()
super(LayerParametersDict, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
key = self._canonicalize_key(key)
tensors = key if isinstance(key, (tuple, list)) else (key,)
key_collisions = self._tensors.intersection(tensors)
if key_collisions:
raise ValueError("Key(s) already present: {}".format(key_collisions))
self._tensors.update(tensors)
super(LayerParametersDict, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._canonicalize_key(key)
self._tensors.remove(key)
super(LayerParametersDict, self).__delitem__(key)
def __getitem__(self, key):
key = self._canonicalize_key(key)
return super(LayerParametersDict, self).__getitem__(key)
def __contains__(self, key):
key = self._canonicalize_key(key)
return super(LayerParametersDict, self).__contains__(key)
def _canonicalize_key(self, key):
if isinstance(key, (list, tuple)):
return tuple(key)
return key
# TODO(b/68034464): add capability for LayerCollection to be "finalized"
# and do this when it gets used by FisherEstimator / KfacOptimizer.
class LayerCollection(object):
"""Registry of information about layers and losses.
Note that you need to create a new one of these for each MatrixEstimator or
KfacOptimizer.
Attributes:
fisher_blocks: a LayersParamsDict (subclass of OrderedDict) mapping layer
parameters (Tensors or tuples of Tensors) to FisherBlock instances.
fisher_factors: an OrderedDict mapping tuples to FisherFactor instances.
losses: a list of LossFunction objects. The loss to be optimized is their
sum.
"""
def __init__(self,
graph=None,
colocate_cov_ops_with_inputs=False,
name="LayerCollection"):
self.fisher_blocks = LayerParametersDict()
self.fisher_factors = OrderedDict()
self._linked_parameters = dict(
) # dict mapping sets of variables to optionally specified approximations.
self._graph = graph or ops.get_default_graph()
self._loss_dict = {} # {str: LossFunction}
self._subgraph = None
self._default_generic_approximation = APPROX_FULL_NAME
self._default_fully_connected_approximation = APPROX_KRONECKER_NAME
self._default_convolution_2d_approximation = APPROX_KRONECKER_NAME
self._default_fully_connected_multi_approximation = (
APPROX_KRONECKER_SERIES_2_NAME)
self._colocate_cov_ops_with_inputs = colocate_cov_ops_with_inputs
with variable_scope.variable_scope(None, default_name=name) as scope:
self._var_scope = scope.name
@property
def losses(self):
"""LossFunctions registered with this LayerCollection."""
return list(self._loss_dict.values())
@property
def registered_variables(self):
"""A tuple of all of the variables currently registered."""
tuple_of_tuples = (ensure_sequence(key) for key, block
in six.iteritems(self.fisher_blocks))
flat_tuple = tuple(item for tuple_ in tuple_of_tuples for item in tuple_)
return flat_tuple
@property
def linked_parameters(self):
"""Groups of parameters with an optionally specified approximation.
Linked parameters can be added using `define_linked_parameters`.
If an approximation is specified, then this approximation will be used
when registering a layer with exactly these parameters, unless an
approximation is specified when calling the registration function.
Returns:
A `dict` mapping tuples of parameters to an optional string.
"""
return self._linked_parameters
@property
def default_generic_approximation(self):
return self._default_generic_approximation
def set_default_generic_approximation(self, value):
if value not in _GENERIC_APPROX_TO_BLOCK_TYPES:
raise ValueError(
"{} is not a valid approximation for generic variables.".format(
value))
self._default_generic_approximation = value
@property
def default_fully_connected_approximation(self):
return self._default_fully_connected_approximation
def set_default_fully_connected_approximation(self, value):
if value not in _FULLY_CONNECTED_APPROX_TO_BLOCK_TYPES:
raise ValueError(
"{} is not a valid approximation for fully connected layers.".format(
value))
self._default_fully_connected_approximation = value
@property
def default_conv2d_approximation(self):
return self._default_convolution_2d_approximation
def set_default_conv2d_approximation(self, value):
if value not in _CONV2D_APPROX_TO_BLOCK_TYPES:
raise ValueError(
"{} is not a valid approximation for 2d convolutional layers.".format(
value))
self._default_convolution_2d_approximation = value
@property
def default_fully_connected_multi_approximation(self):
return self._default_fully_connected_multi_approximation
def set_default_fully_connected_multi_approximation(self, value):
if value not in _FULLY_CONNECTED_MULTI_APPROX_TO_BLOCK_TYPES:
raise ValueError("{} is not a valid approximation for a fully-connected "
"multi layer.".format(value))
self._default_fully_connected_multi_approximation = value
def register_block(self, layer_key, fisher_block, reuse=VARIABLE_SCOPE):
"""Validates and registers the layer_key associated with the fisher_block.
Args:
layer_key: A variable or tuple of variables. The key to check for in
existing registrations and to register if valid.
fisher_block: The associated `FisherBlock`.
reuse: Method to use for inserting new `FisherBlock`s. One of True, False,
or 'VARIABLE_SCOPE'.
Raises:
ValueError: If `layer_key` was already registered and reuse is `False`,
if `layer_key` was registered with a different block type, or if
`layer_key` shares any variables with but is not equal to a previously
registered key.
KeyError: If `reuse` is `True` but `layer_key` was not previously
registered.
Returns:
The `FisherBlock` registered under `layer_key`. If `layer_key` was already
registered, this will be the previously registered `FisherBlock`.
"""
if reuse is VARIABLE_SCOPE:
reuse = variable_scope.get_variable_scope().reuse
if reuse is True or (reuse is variable_scope.AUTO_REUSE and
layer_key in self.fisher_blocks):
result = self.fisher_blocks[layer_key]
if type(result) != type(fisher_block): # pylint: disable=unidiomatic-typecheck
raise ValueError(
"Attempted to register FisherBlock of type %s when existing "
"FisherBlock has type %s." % (type(fisher_block), type(result)))
return result
if reuse is False and layer_key in self.fisher_blocks:
raise ValueError("FisherBlock for %s is already in LayerCollection." %
(layer_key,))
# Insert fisher_block into self.fisher_blocks.
if layer_key in self.fisher_blocks:
raise ValueError("Duplicate registration: {}".format(layer_key))
# Raise an error if any variable in layer_key has been registered in any
# other blocks.
variable_to_block = {
var: (params, block)
for (params, block) in self.fisher_blocks.items()
for var in ensure_sequence(params)
}
for variable in ensure_sequence(layer_key):
if variable in variable_to_block:
prev_key, prev_block = variable_to_block[variable]
raise ValueError(
"Attempted to register layer_key {} with block {}, but variable {}"
" was already registered in key {} with block {}.".format(
layer_key, fisher_block, variable, prev_key, prev_block))
self.fisher_blocks[layer_key] = fisher_block
return fisher_block
def get_use_count_map(self):
"""Returns a dict of variables to their number of registrations."""
# TODO(b/70283403): Reimplement this in the old way, where each
# registration function would be responsible for incrementing the count.
# Also, this version has a bug: it won't do the right thing for generic
# registration for parameters that are shared. i.e. it won't set the use
# count to infinity.
vars_to_uses = defaultdict(int)
for key, block in six.iteritems(self.fisher_blocks):
n = (
block.num_inputs()*block.num_registered_minibatches if isinstance(
block, (fb.FullyConnectedSeriesFB, fb.FullyConnectedMultiIndepFB))
else block.num_registered_minibatches)
key = ensure_sequence(key)
for k in key:
vars_to_uses[k] += n
return vars_to_uses
def check_registration(self, variables):
"""Checks that all variable uses have been registered properly.
Args:
variables: List of variables.
Raises:
ValueError: If any registered variables are not included in the list.
ValueError: If any variable in the list is not registered.
ValueError: If any variable in the list is registered with the wrong
number of "uses" in the subgraph recorded (vs the number of times that
variable is actually used in the subgraph).
"""
# Note that overlapping parameters (i.e. those that share variables) will
# be caught by layer_collection.LayerParametersDict during registration.
reg_use_map = self.get_use_count_map()
error_messages = []
for var in variables:
total_uses = self.subgraph.variable_uses(var)
reg_uses = reg_use_map[var]
if reg_uses == 0:
error_messages.append("Variable {} not registered.".format(var))
elif (not math.isinf(reg_uses)) and reg_uses != total_uses:
error_messages.append(
"Variable {} registered with wrong number of uses ({} "
"registrations vs {} uses).".format(var, reg_uses, total_uses))
num_get_vars = len(reg_use_map)
if num_get_vars > len(variables):
error_messages.append("{} registered variables were not included in list."
.format(num_get_vars - len(variables)))
if error_messages:
error_messages = [
"Found the following errors with variable registration:"
] + error_messages
raise ValueError("\n\t".join(error_messages))
def get_blocks(self):
return self.fisher_blocks.values()
def get_factors(self):
return self.fisher_factors.values()
@property
def graph(self):
return self._graph
@property
def subgraph(self):
return self._subgraph
def define_linked_parameters(self, params, approximation=None):
"""Identify a set of parameters that should be grouped together.
During automatic graph scanning, any matches containing variables that have
been identified as part of a linked group will be filtered out unless
the match parameters are exactly equal to the ones specified in the linked
group.
Args:
params: A variable, or a tuple or list of variables. The variables
to be linked.
approximation: Optional string specifying the type of approximation to use
for these variables. If unspecified, this layer collection's default
approximation for the layer type will be used.
Raises:
ValueError: If the parameters were already registered in a layer or
identified as part of an incompatible group.
"""
params = frozenset(ensure_sequence(params))
# Check if any of the variables in 'params' is already in
# 'self.fisher_blocks.keys()'.
for registered_params, fisher_block in self.fisher_blocks.items():
registered_params_set = set(ensure_sequence(registered_params))
for variable in params:
if (variable in registered_params_set and
params != registered_params_set):
raise ValueError(
"Can't link parameters {}, variable {} was already registered in "
"group {} with layer {}".format(params, variable,
registered_params, fisher_block))
# Check if any of the variables in 'params' is already in
# 'self.linked_parameters'.
for variable in params:
for other_linked_params in self.linked_parameters:
if variable in other_linked_params:
raise ValueError("Can't link parameters {}, variable {} was already "
"linked in group {}.".format(params, variable,
other_linked_params))
self._linked_parameters[params] = approximation
def create_subgraph(self):
if not self.losses:
raise ValueError("Must have at least one registered loss.")
inputs_to_losses = nest.flatten(tuple(loss.inputs for loss in self.losses))
self._subgraph = utils.SubGraph(inputs_to_losses)
def total_loss(self):
return math_ops.add_n(tuple(loss.evaluate() for loss in self.losses))
def total_sampled_loss(self):
return math_ops.add_n(
tuple(loss.evaluate_on_sample() for loss in self.losses))
def _get_linked_approx(self, params):
"""If params were linked, return their specified approximation."""
params_set = frozenset(ensure_sequence(params))
if params_set in self.linked_parameters:
return self.linked_parameters[params_set]
else:
return None
def register_fully_connected(self,
params,
inputs,
outputs,
approx=None,
reuse=VARIABLE_SCOPE):
"""Registers a fully connnected layer.
Args:
params: Tensor or 2-tuple of Tensors corresponding to weight and bias of
this layer. Weight matrix should have shape [input_size, output_size].
Bias should have shape [output_size].
inputs: Tensor of shape [batch_size, input_size]. Inputs to layer.
outputs: Tensor of shape [batch_size, output_size]. Outputs
produced by layer.
approx: str. One of "kron" or "diagonal".
reuse: bool or str. If True, reuse an existing FisherBlock. If False,
create a new FisherBlock. If "VARIABLE_SCOPE", use
tf.get_variable_scope().reuse.
Raises:
ValueError: For improper value to 'approx'.
KeyError: If reuse == True but no FisherBlock found for 'params'.
ValueError: If reuse == True and FisherBlock found but of the wrong type.
"""
if approx is None:
approx = self._get_linked_approx(params)
if approx is None:
approx = self.default_fully_connected_approximation
if approx not in _FULLY_CONNECTED_APPROX_TO_BLOCK_TYPES:
raise ValueError("Bad value {} for approx.".format(approx))
block_type = _FULLY_CONNECTED_APPROX_TO_BLOCK_TYPES[approx]
has_bias = isinstance(params, (tuple, list))
block = self.register_block(params, block_type(self, has_bias), reuse=reuse)
block.register_additional_minibatch(inputs, outputs)
def register_conv2d(self,
params,
strides,
padding,
inputs,
outputs,
approx=None,
reuse=VARIABLE_SCOPE):
"""Registers a convolutional layer.
Args:
params: Tensor or 2-tuple of Tensors corresponding to weight and bias of
this layer. Weight matrix should have shape [kernel_height,
kernel_width, in_channels, out_channels]. Bias should have shape
[out_channels].
strides: 1-D Tensor of length 4. Strides for convolution kernel.
padding: string. see tf.nn.conv2d for valid values.
inputs: Tensor of shape [batch_size, height, width, in_channels]. Inputs
to layer.
outputs: Tensor of shape [batch_size, height, width, out_channels].
Output produced by layer.
approx: str. One of "kron" or "diagonal".
reuse: bool or str. If True, reuse an existing FisherBlock. If False,
create a new FisherBlock. If "VARIABLE_SCOPE", use
tf.get_variable_scope().reuse.
Raises:
ValueError: For improper value to 'approx'.
KeyError: If reuse == True but no FisherBlock found for 'params'.
ValueError: If reuse == True and FisherBlock found but of the wrong type.
"""
if approx is None:
approx = self._get_linked_approx(params)
if approx is None:
approx = self.default_conv2d_approximation
if approx not in _CONV2D_APPROX_TO_BLOCK_TYPES:
raise ValueError("Bad value {} for approx.".format(approx))
block_type = _CONV2D_APPROX_TO_BLOCK_TYPES[approx]
block = self.register_block(
params, block_type(self, params, strides, padding), reuse=reuse)
block.register_additional_minibatch(inputs, outputs)
def register_generic(self,
params,
batch_size,
approx=None,
reuse=VARIABLE_SCOPE):
"""Registers a generic layer.
Args:
params: Tensor or tuple of Tensors corresponding to the parameters.
batch_size: 0-D Tensor. Size of the minibatch.
approx: str. One of "full" or "diagonal".
reuse: bool or str. If True, reuse an existing FisherBlock. If False,
create a new FisherBlock. If "VARIABLE_SCOPE", use
tf.get_variable_scope().reuse.
Raises:
ValueError: For improper value to 'approx'.
KeyError: If reuse == True but no FisherBlock found for 'params'.
ValueError: If reuse == True and FisherBlock found but of the wrong type.
"""
if approx is None:
approx = self._get_linked_approx(params)
if approx is None:
approx = self.default_generic_approximation
if approx not in _GENERIC_APPROX_TO_BLOCK_TYPES:
raise ValueError("Bad value {} for approx.".format(approx))
block_type = _GENERIC_APPROX_TO_BLOCK_TYPES[approx]
block = self.register_block(params, block_type(self, params), reuse=reuse)
block.register_additional_minibatch(batch_size)
def register_fully_connected_multi(self, params, inputs, outputs,
approx=None):
"""Register fully connected layers with shared parameters.
This can handle general fully-connected layers with shared parameters, but
has specialized approximations to deal with the case where there is a
meaningful linear order to the share instances (such as in an RNN).
Args:
params: Tensor or 2-tuple of Tensors corresponding to weight and bias of
this layer. Weight matrix should have shape [input_size, output_size].
Bias should have shape [output_size].
inputs: A list of tensors, each of shape [batch_size, input_size]. Inputs
to layer. In the case of RNNs, one Tensor per time step.
outputs: A list of tensors, the same length as 'inputs', each of shape
[batch_size, output_size]. Outputs produced by layer. In the case of
RNNs, one Tensor per time step.
approx: str. One of "kron_indep", "kron_series_1", or "kron_series_2".
Raises:
ValueError: For improper value to 'approx'.
"""
if approx is None:
approx = self._get_linked_approx(params)
if approx is None:
approx = self.default_fully_connected_multi_approximation
has_bias = isinstance(params, (tuple, list))
# TODO(b/70283649): something along the lines of find_canonical_output
# should be added back in here (and for the other block types, arguably).
if approx not in _FULLY_CONNECTED_MULTI_APPROX_TO_BLOCK_TYPES:
raise ValueError("Bad value {} for approx.".format(approx))
block_type = _FULLY_CONNECTED_MULTI_APPROX_TO_BLOCK_TYPES[approx]
# For now we don't support multiple minibatches for this type of layer, so
# we set reuse=False
self.register_block(params,
block_type(self, inputs, outputs, has_bias=has_bias),
reuse=False)
def register_categorical_predictive_distribution(self,
logits,
seed=None,
targets=None,
name=None,
reuse=VARIABLE_SCOPE):
"""Registers a categorical predictive distribution.
Args:
logits: The logits of the distribution (i.e. its parameters).
seed: The seed for the RNG (for debugging) (Default: None)
targets: (OPTIONAL) The targets for the loss function. Only required if
one wants to call total_loss() instead of total_sampled_loss().
total_loss() is required, for example, to estimate the
"empirical Fisher" (instead of the true Fisher).
(Default: None)
name: (OPTIONAL) str or None. Unique name for this loss function. If None,
a new name is generated. (Default: None)
reuse: (OPTIONAL) bool or str. If True, reuse an existing FisherBlock.
If False, create a new FisherBlock. If VARIABLE_SCOPE, use
tf.get_variable_scope().reuse.
Raises:
ValueError: If reuse == True and name == None.
ValueError: If reuse == True and seed != None.
KeyError: If reuse == True and no existing LossFunction with 'name' found.
KeyError: If reuse == False and existing LossFunction with 'name' found.
"""
name = name or self._graph.unique_name(
"register_categorical_predictive_distribution")
if reuse == VARIABLE_SCOPE:
reuse = variable_scope.get_variable_scope().reuse
if reuse:
if name is None:
raise ValueError(
"If reuse is enabled, loss function's name must be set.")
if seed is not None:
raise ValueError(
"Seed can only be specified at LossFunction instantiation.")
loss = self._loss_dict.get(name, None)
if loss is None:
raise KeyError(
"Unable to find loss function named {}. Create a new LossFunction "
"with reuse=False.".format(name))
loss.register_additional_minibatch(logits, targets=targets)
else:
if name in self._loss_dict:
raise KeyError(
"Loss function named {} already exists. Set reuse=True to append "
"another minibatch.".format(name))
loss = lf.CategoricalLogitsNegativeLogProbLoss(
logits, targets=targets, seed=seed)
self._loss_dict[name] = loss
def register_normal_predictive_distribution(self,
mean,
var=0.5,
seed=None,
targets=None,
name=None):
"""Registers a normal predictive distribution.
Args:
mean: The mean vector defining the distribution.
var: The variance (must be a scalar). Note that the default value of
0.5 corresponds to a standard squared error loss (target -
prediction)**2. If your squared error loss is of the form
0.5*(target - prediction)**2 you should use var=1.0. (Default: 0.5)
seed: The seed for the RNG (for debugging) (Default: None)
targets: (OPTIONAL) The targets for the loss function. Only required if
one wants to call total_loss() instead of total_sampled_loss().
total_loss() is required, for example, to estimate the
"empirical Fisher" (instead of the true Fisher).
(Default: None)
name: (OPTIONAL) str or None. Unique name for this loss function. If None,
a new name is generated. (Default: None)
"""
name = name or self._graph.unique_name(
"register_normal_predictive_distribution")
if name in self._loss_dict:
raise NotImplementedError(
"Adding logits to an existing LossFunction not yet supported.")
loss = lf.NormalMeanNegativeLogProbLoss(
mean, var, targets=targets, seed=seed)
self._loss_dict[name] = loss
def register_multi_bernoulli_predictive_distribution(self,
logits,
seed=None,
targets=None,
name=None):
"""Registers a multi-Bernoulli predictive distribution.
Args:
logits: The logits of the distribution (i.e. its parameters).
seed: The seed for the RNG (for debugging) (Default: None)
targets: (OPTIONAL) The targets for the loss function. Only required if
one wants to call total_loss() instead of total_sampled_loss().
total_loss() is required, for example, to estimate the
"empirical Fisher" (instead of the true Fisher).
(Default: None)
name: (OPTIONAL) str or None. Unique name for this loss function. If None,
a new name is generated. (Default: None)
"""
name = name or self._graph.unique_name(
"register_multi_bernoulli_predictive_distribution")
if name in self._loss_dict:
raise NotImplementedError(
"Adding logits to an existing LossFunction not yet supported.")
loss = lf.MultiBernoulliNegativeLogProbLoss(
logits, targets=targets, seed=seed)
self._loss_dict[name] = loss
def make_or_get_factor(self, cls, args):
"""Insert 'cls(args)' into 'self.fisher_factors' if not already present.
Wraps constructor in 'tf.variable_scope()' to ensure variables constructed
in 'cls.__init__' are placed under this LayerCollection's scope.
Args:
cls: Class that implements FisherFactor.
args: Tuple of arguments to pass into 'cls's constructor. Must be
hashable.
Returns:
Instance of 'cls' found in self.fisher_factors.
"""
try:
hash(args)
except TypeError:
raise TypeError(
("Unable to use (cls, args) = ({}, {}) as a key in "
"LayerCollection.fisher_factors. The pair cannot be hashed.").format(
cls, args))
key = cls, args
if key not in self.fisher_factors:
colo = self._colocate_cov_ops_with_inputs
with variable_scope.variable_scope(self._var_scope):
self.fisher_factors[key] = cls(*args, colocate_cov_ops_with_inputs=colo)
return self.fisher_factors[key]
|
|
from basic import *
import score_trees_devel
import svg_basic
import numpy as np
import util
import html_colors
import scipy.stats
import copy
import random
from operator import add
import tcr_sampler
#from mannwhitneyu import mannwhitneyu as mannwhitneyu_exact #too slow
with Parser(locals()) as p:
p.str('clones_file').required()
p.str('organism').required()
p.float('distance_scale_factor').default(0.01)
p.float('extra_color_schemes_none_score').shorthand('none_score')
p.flag('dont_trim_labels')
p.flag('constant_seed')
p.multiword('only_epitopes').cast(lambda x:x.split())
p.multiword('extra_color_schemes').shorthand('colors').cast(lambda x:x.split())
if constant_seed: random.seed(1)
fake_chains = util.detect_fake_chains( clones_file )
probs_cs = 'probs'
sharing_cs = 'sharing'
cross_reactivity_cs = 'cross_reactivity'
clonality_cs = 'clonality'
min_other_nbrdist_cs = 'min_other_nbrdist'
color_scheme_explanations = {}
color_schemes = [ probs_cs, sharing_cs, cross_reactivity_cs, clonality_cs, min_other_nbrdist_cs ]
extra_color_scheme_prefix = 'FC_'
if extra_color_schemes:
for tsvtag in extra_color_schemes:
color_schemes.append( extra_color_scheme_prefix + tsvtag )
gap_character = '-' ## different from some other places
min_cluster_size = 1
#cluster_radius = {'AB':1.0, 'A':0.5, 'B':0.5}
distance_threshold_25_scaled = distance_scale_factor * pipeline_params[ 'distance_threshold_25' ]
cluster_radius = {
'A' : distance_threshold_25_scaled*2, ## was 0.5
'B' : distance_threshold_25_scaled*2, ## was 0.5
'AB': distance_threshold_25_scaled*4 ## was 1.0
}
tree_width = 750
ymargin = 30 ## right now we dont want top text to get cut off
xmargin = 10
text_column_separation = 4
labels_tree_separation = 10
#labels_width = 700##approx
#tree_x0 = labels_width + xmargin + xpad
#total_svg_width = tree_x0 + tree_width + xmargin
branch_width_fraction = 0.1
log10_of_zero = -100
def get_safe_log10(f ):
if f==0: return log10_of_zero
else: return math.log10(f)
def pad_to_middle( s, num ): ## with spaces
if len(s) >= num:return s
extra = num-len(s)
before = extra/2
after = extra-before
return ' '*before + s + ' '*after
def get_primary_number( gene_name ):
tmp = gene_name[:]
while tmp and not tmp[0].isdigit():
tmp = tmp[1:]
if not tmp:
## for example, if gene_name=='TRGJP'
return 0
assert tmp[0].isdigit()
if tmp.isdigit():
return int(tmp)
else:
tmp2 = ''
while tmp[0].isdigit():
tmp2 += tmp[0]
tmp = tmp[1:]
return int(tmp2)
## based these next two functions from ../distances.new.py
# def tree_leaves( tree ):
# if tree[0] == tree[1]:
# return [ tree[0] ]
# else:
# return tree_leaves( tree[0] ) + tree_leaves( tree[1] )
def tree_splits_ttest( edge_pvals, tree, other_leaves, leaf_scores, leaf_names, info='', pvalue_threshold = 1e-2 ):
if tree[0] == tree[1]:
return
for i in range(2):
a_leaves = score_trees_devel.Node_members( tree[i] )
b_leaves = score_trees_devel.Node_members( tree[(i+1)%2] ) + other_leaves
a_scores = reduce( add, [ leaf_scores[x] for x in a_leaves ] )
b_scores = reduce( add, [ leaf_scores[x] for x in b_leaves ] )
if a_scores and b_scores:
amean = sum(a_scores)/len(a_scores)
bmean = sum(b_scores)/len(b_scores)
if amean != bmean:
t, t_pvalue1 = scipy.stats.ttest_ind( a_scores, b_scores, equal_var = True )
t, t_pvalue2 = scipy.stats.ttest_ind( a_scores, b_scores, equal_var = False )
u, u_pvalue = scipy.stats.mannwhitneyu( a_scores, b_scores )
maxp = max( [t_pvalue1, t_pvalue2, u_pvalue] )
if maxp < pvalue_threshold:
# if len(a_scores)<5 or len(b_scores)<5: ## try the exact calculation
# print 'calc exact:',len(a_scores),len(b_scores)
# u2, u_pvalue_exact = mannwhitneyu_exact( a_scores, b_scores )
# print 'exact MWU:',u,u2,u_pvalue, u_pvalue_exact
print 'pvalue: {:.3e} {:.3e} {:.3e} {:.3e} {} a: {} {} {:.2f} b: {} {} {:.2f}'\
.format( maxp, t_pvalue1, t_pvalue2, u_pvalue, info,
len(a_leaves), leaf_names[a_leaves[0]], amean,
len(b_leaves), leaf_names[b_leaves[0]], bmean )
k = tuple( sorted( a_leaves ) )
assert k not in edge_pvals
symbol = '-' if amean < bmean else '+'
edge_pvals[ k ] = [ maxp, t_pvalue1, t_pvalue2, u_pvalue, symbol ]
## recurse
tree_splits_ttest( edge_pvals, tree[i], b_leaves, leaf_scores, leaf_names, info )
def label_pval_edges( cmds, edge_pvals, subtree, plotting_info ):
sizes, node_position, Transform, canvas_tree_w_factor, canvas_tree_min_rmsd = plotting_info
if score_trees_devel.IsALeaf( subtree ):
return
else:
big_rmsd = subtree[2]
for ii in range(2):
iitree = subtree[ii]
little_rmsd = iitree[2]
#assert little_rmsd<=big_rmsd
assert little_rmsd <= big_rmsd+1e-3
if little_rmsd > big_rmsd:
print 'WHOAH:',little_rmsd,big_rmsd
leaves = tuple( sorted( score_trees_devel.Node_members(iitree) ) )
if leaves in edge_pvals:
pvals = edge_pvals[leaves]
symbol = pvals[-1]
center= score_trees_devel.Center( iitree, node_position, sizes, use_sizes_as_weights=True )
size = score_trees_devel.Size( iitree, sizes )
line_width = max(1,int(floor(0.5+ size*canvas_tree_w_factor )))
box_x0 = Transform(max(canvas_tree_min_rmsd,little_rmsd)) ; box_x1 = Transform(big_rmsd)
sep = 3
cmds.append( svg_basic.make_text( '{:.0E} {}'.format( pvals[0], symbol ),
( box_x0+sep, center-line_width/2-sep),
10, font_family="Droid Sans Mono" ) )
label_pval_edges( cmds, edge_pvals, iitree, plotting_info )
## little class to store some info
class TCR:
def __init__( self, l ):
self.subject = l['subject']
self.epitope = l['epitope']
self.cdr3a = l['cdr3a']
self.cdr3b = l['cdr3b']
if 'cdr3a_protseq_masked' not in l or 'cdr3b_protseq_masked' not in l:
tcr_sampler.add_masked_CDR3_sequences_to_tcr_dict( organism, l )
self.cdr3a_masked = l['cdr3a_protseq_masked']
self.cdr3b_masked = l['cdr3b_protseq_masked']
self.a_indels = l['a_indels']
self.b_indels = l['b_indels']
self.clone_id = l['clone_id']
self.clone_size = int( l['clone_size'] )
self.info = copy.deepcopy(l)
#genes= []
self.reps_for_sharing = []
self.reps_for_counting = []
for ab in 'ab':
for vj in 'vj':
#hits = l['{}{}_blast_hits'.format(vj,ab)]
genes = set( l['{}{}_genes'.format(vj,ab)].split(';') )
self.reps_for_sharing.append( util.reps_from_genes( genes, organism=organism, mm1=False ) )
#self.reps_for_counting.append( util.countreps_from_genes( genes, organism=organism ) )
return
## parse the clones_file ############################################################################################3
all_tcrs = {}
#all_color_scores = {}
infields = []
clones_file_with_nbrdists = '{}_nbrdists.tsv'.format(clones_file[:-4])
assert exists( clones_file_with_nbrdists )
Log('parsing {}'.format(clones_file_with_nbrdists))
for line in open( clones_file_with_nbrdists,'r'):
if not infields:
if line[0] == '#':
infields = line[1:-1].split('\t')
else:
infields = line[:-1].split('\t')
continue
assert infields
l = parse_tsv_line( line[:-1], infields )
epitope = l['epitope']
tcr = TCR( l )
## figure out the color scores, one for each chains
## color by probs
tcr.color_scores = {}
tcr.color_scores[probs_cs] = { 'A': get_safe_log10( float(l['a_protseq_prob']) ),
'B': get_safe_log10( float(l['b_protseq_prob']) ),
'AB': get_safe_log10( float(l['a_protseq_prob']) * float( l['b_protseq_prob'] ) ) }
#clone_size = float(l['clone_size'])
tcr.color_scores[clonality_cs] = { 'A': tcr.clone_size, 'B': tcr.clone_size, 'AB': tcr.clone_size }
## look at rank scores for other epitopes
#suffix = '_rank25'
suffix = '_wtd_nbrdist10rank'
tcr.color_scores[ min_other_nbrdist_cs ] = {}
for ab in ['A','B','AB']:
other_ranks = []
for tag,val in l.iteritems():
suf = '_{}{}'.format(ab,suffix)
if tag.endswith(suf):
ep = tag[:-1*len(suf)]
if ep != epitope:
other_ranks.append( ( float(val), ep ) )
if other_ranks:
tcr.color_scores[ min_other_nbrdist_cs ][ ab ] = min( other_ranks ) ## (val,other_ep)
else:
tcr.color_scores[ min_other_nbrdist_cs ][ ab ] = (0.0,'NA')
## extra color scores
if extra_color_schemes:
for tsvtag in extra_color_schemes:
scheme = extra_color_scheme_prefix + tsvtag
tcr.color_scores[scheme] = {}
for ab in ['A','B','AB']:
tcr.color_scores[scheme][ab] = float( l[tsvtag] )
if epitope not in all_tcrs:
all_tcrs[epitope] = []
all_tcrs[ epitope ].append( tcr )
epitopes = sorted( all_tcrs.keys()[:] )
# color_score_range = {'A':None, 'B':None, 'AB':None }
color_score_range_probs_cs = {}
color_score_range_probs_cs[ 'A'] = ( -9.0, -5.0 )
color_score_range_probs_cs[ 'B'] = ( -9.5, -5.5 )
color_score_range_probs_cs['AB'] = ( -16.0, -12.0 )
def same_tcr( t1, t2, chains ): ## t1 = (subject,genes,reps,cdr3a,cdr3b,...)
#t1_reps = t1[2] ## [va,ja,vb,jb]
#t2_reps = t2[2]
if 'A' in chains:
if ( t1.reps_for_sharing[0].isdisjoint( t2.reps_for_sharing[0] ) or
t1.reps_for_sharing[1].isdisjoint( t2.reps_for_sharing[1] ) or
t1.cdr3a != t2.cdr3a ):
return False
if 'B' in chains:
if ( t1.reps_for_sharing[2].isdisjoint( t2.reps_for_sharing[2] ) or
t1.reps_for_sharing[3].isdisjoint( t2.reps_for_sharing[3] ) or
t1.cdr3b != t2.cdr3b ):
return False
return True
for epitope in epitopes:
if only_epitopes and epitope not in only_epitopes: continue
tcrs = all_tcrs[epitope]
infos = [x.info for x in tcrs]
## this fills xx_label_rep and xx_label_rep_color in each dict in the infos list
util.assign_label_reps_and_colors_based_on_most_common_genes_in_repertoire( infos, organism )
rep_colors = {}
for tcr, info in zip(tcrs,infos):
tcr.single_reps = []
for ab in 'ab':
for vj in 'vj':
rep = info[ '{}{}_label_rep'.format(vj,ab) ]
color = info[ '{}{}_label_rep_color'.format(vj,ab) ]
tcr.single_reps.append( rep )
rep_colors[ rep ] = color
epitope_mice = list( set( [ x.subject for x in tcrs ] ) )
epitope_mice.sort() ## will use these to label the tree
## let's compute sharing
Log('compute sharing '+epitope)
for tcr in tcrs:
tcr.color_scores[ cross_reactivity_cs ] = {}
tcr.color_scores[ sharing_cs ] = {}
for ab in ['A','B','AB']:
## look for sharing with other epitopes or other mice (same epitope)
counts_xr = {}
mice = [[], [] ]
for ep2 in epitopes:
for tcr2 in all_tcrs[ ep2 ]:
if same_tcr( tcr, tcr2, ab ):
if epitope != ep2:
counts_xr[ ep2 ] = counts_xr.get( ep2,0)+1
mice[ epitope == ep2 ].append( tcr2.subject )
if counts_xr:
other_epitopes = counts_xr.keys()[:]
other_epitopes.sort()
else:
other_epitopes = []
tcr.color_scores[ cross_reactivity_cs ][ab] = ( 1+len(other_epitopes), other_epitopes )#(Nepitopes,other-eps)
tcr.color_scores[ sharing_cs ][ab] = [ len(set(mice[1])), len(set(mice[0]+mice[1])) ]
for ab in ['A','B','AB']:
if ab in fake_chains: continue
radius = cluster_radius[ab]
distfile = '{}_{}_{}.dist'.format( clones_file[:-4], ab, epitope )
assert exists(distfile)
Log('reading '+distfile)
N=0
all_nbrs = []
all_dists = []
for line in open( distfile,'r'):
l = line.split()
clone_id = l[0]
assert tcrs[ len(all_nbrs) ].clone_id == clone_id
dists = [ distance_scale_factor*float(x) for x in l[1:] ]
if not N:
N = len(dists)
else:
assert N == len(dists)
nbrs = []
for ii,d in enumerate(dists):
if d <= radius:
nbrs.append( ii )
all_dists.append( dists )
all_nbrs.append( nbrs )
deleted = [False]*N
centers = []
all_members = []
Log('clustering {} tcrs'.format(len(tcrs)))
while True:
clusterno = len(centers)
best_nbr_count =0
for i in range(N):
if deleted[i]: continue
nbr_count = 0
for nbr in all_nbrs[i]:
if not deleted[nbr]:
nbr_count+=1
if nbr_count > best_nbr_count:
best_nbr_count = nbr_count
center = i
if best_nbr_count < min_cluster_size:
break
centers.append( center )
members = [center]
deleted[center] = True
for nbr in all_nbrs[center]:
if not deleted[nbr]:
deleted[nbr] = True
members.append( nbr )
assert len(members) == best_nbr_count
all_members.append( frozenset(members) )
num_clusters = len(centers)
num_tcrs = len(tcrs)
## I think this will give a better ordering of the TCRs along the tree
## order from 1....N by going through the members of the clusters, largest to smallest
old2new_index = {}
new2old_index = {}
last_index=-1
for members in all_members:
for member in members:
last_index += 1
old2new_index[ member ] = last_index
new2old_index[ last_index ] = member
assert len(old2new_index) == num_tcrs
## how much vertical space will we need?
##
label_fontsize = 10
tree_height = label_fontsize * len(tcrs)
total_svg_height = tree_height + 2*ymargin
max_clone_count=max( ( x.clone_size for x in tcrs ) )
def clonality_fraction( clone_size ):
global max_clone_count
if max_clone_count==1: return 0.0
exponent = 1.0/3
mx = max_clone_count**exponent
cs = clone_size**exponent
return ( cs-1.0)/(mx-1.0)
def clonality_color( clone_size ):
return svg_basic.rgb_from_fraction( clonality_fraction( clone_size ) )
#max_num_mice_same_epitope = max( ( x.color_scores[ sharing_cs ][ab][0] for x in tcrs ) )
max_num_mice_all_epitopes = max( ( x.color_scores[ sharing_cs ][ab][1] for x in tcrs ) )
max_num_epitopes = max( ( x.color_scores[ cross_reactivity_cs ][ab][0] for x in tcrs ) )
def num_mice_all_epitopes_color( num_mice_all_epitopes ):
if max_num_mice_all_epitopes==1:
return svg_basic.rgb_from_fraction( 0.0 )
else:
return svg_basic.rgb_from_fraction( float(num_mice_all_epitopes-1)/(max_num_mice_all_epitopes-1))
def num_epitopes_color( num_epitopes ):
if max_num_epitopes == 1:
return svg_basic.rgb_from_fraction( 0.0 )
else:
return svg_basic.rgb_from_fraction( float(num_epitopes-1)/(max_num_epitopes-1) )
tree = None
for color_scheme in color_schemes:
## let's fill out an array of color scores
my_color_scores = [ x.color_scores[ color_scheme ][ab] for x in tcrs ]
my_color_scores_labels = ['']*num_tcrs ## will go into the label text, at the end
color_score_range = None
if color_scheme == probs_cs:
## adjust scores of low-prob guys
min_good_score = min( [x for x in my_color_scores if x!=log10_of_zero] )
my_color_scores_floats = [ max(min_good_score,x) for x in my_color_scores ]
color_score_range = color_score_range_probs_cs[ab]
elif color_scheme == clonality_cs:
my_color_scores_floats = [ clonality_fraction(float(x)) for x in my_color_scores ]
my_color_scores_labels = [ '{:2d}'.format(x) for x in my_color_scores ]
elif color_scheme == cross_reactivity_cs:
my_color_scores_floats = [float(x[0]) for x in my_color_scores ]
my_color_scores_labels = [ ' '.join(x[1]) for x in my_color_scores ]
elif color_scheme == sharing_cs:
my_color_scores_floats = [ float(x[0]) for x in my_color_scores ] # N-mice this epitope
my_color_scores_labels = [ '{}'.format(x[0]) for x in my_color_scores ]
elif color_scheme == min_other_nbrdist_cs:
my_color_scores_floats = [-1*float(x[0]) for x in my_color_scores ]
my_color_scores_labels = [ '{:3d} {}'.format( int(floor(0.5+x[0])), x[1] ) for x in my_color_scores ]
color_score_range = (-100,0)
elif color_scheme.startswith(extra_color_scheme_prefix):
my_color_scores_floats = [ None if x == extra_color_schemes_none_score else x for x in my_color_scores ]
my_color_scores_labels = ['NA' if x == extra_color_schemes_none_score else '{:.1f}'.format(x)
for x in my_color_scores ]
if my_color_scores_floats.count(None) == len(my_color_scores_floats):
print 'skipping empty color scheme:',color_scheme,epitope
continue ## no scores for this guy
mn_score_color = min( [ x for x in my_color_scores_floats if x!=None ] )
mx_score_color = max( [ x for x in my_color_scores_floats if x!=None ] )
def get_tcr_score_color( index ):
score = my_color_scores_floats[index]
if score==None:return 'black'
if color_score_range:
mn,mx = color_score_range
else:
mn,mx = mn_score_color, mx_score_color
if mx==mn: mx=mn+1
return svg_basic.rgb_from_fraction( max(0.0,min(1.0, ( score-mn )/(mx-mn ) ) ) )
## now get some info together for plotting
all_center_dists = {}
all_scores = []
sizes = []
names = []
infos = [] ## for the color score correlations
for new_index in range(num_tcrs):
old_index = new2old_index[ new_index ]
names.append( '' )
sizes.append( 1 )
infos.append( '{} {}'.format( tcrs[old_index].cdr3a[3:-2], tcrs[old_index].cdr3b[3:-2] ))
color_score = my_color_scores_floats[ old_index ]
if color_score==None:
all_scores.append( [] )
else:
all_scores.append( [color_score] )
for other_new_index in range(num_tcrs):
other_old_index = new2old_index[ other_new_index ]
dist = all_dists[ old_index ][ other_old_index ]
all_center_dists[ (new_index,other_new_index) ] = dist
all_center_dists[ (other_new_index,new_index) ] = dist
percentile = -1
Log('Make_tree')
if not tree:
tree = score_trees_devel.Make_tree_new( all_center_dists, len(names),
score_trees_devel.Update_distance_matrix_AL,
all_scores, score_trees_devel.CallAverageScore(percentile) )
else:
tree = score_trees_devel.Copy_tree_update_scores( tree, all_scores,
score_trees_devel.CallAverageScore(percentile))
## look for branches with high/low scores
edge_pvals = {}
tree_splits_ttest( edge_pvals, tree, [], all_scores, infos, epitope+'_'+ab+"_"+color_scheme )
## the x-values dont matter here
## but the y-values do
tree_p0 = [10, ymargin ]
tree_p1 = [1000, tree_height+ymargin ]
## node_position tells us where the different clusters are located, vertically
##
Log('Canvas_tree 1st time')
tmp_plotter = svg_basic.SVG_tree_plotter()
node_position,Transform,canvas_tree_min_rmsd, canvas_tree_w_factor \
= score_trees_devel.Canvas_tree( tree, names, sizes, tree_p0, tree_p1, branch_width_fraction,
tmp_plotter, label_internal_nodes = False,
score_range_for_coloring = color_score_range )
cmds = []
cmds.append( svg_basic.make_text( '{} {} #tcrs: {} {} colors'.format(epitope,ab,len(tcrs),color_scheme),
( 10, ymargin ),
30, font_family="Droid Sans Mono" ) ) ## label the epitope
## now let's add some text for each tcr
num_columns = 6 + 2*len(ab)
text_columns = []
for i in range( num_columns ):
text_columns.append( [] )
header = ['']*num_columns
for old_index, tcr in enumerate( tcrs ):
## 1. have some text on the left with the cdr3s, masked cdr3s and indles
## 2. clonality text
## 3. sharing text -- yeah
## 4. gene segments text, actually each gene segment separate,maybe since they are different colors
## 5. color label
##
## which mouse/subject
icol = 0
text_columns[ icol ].append( ( '{:2d}{:2s}'.format(epitope_mice.index(tcr.subject)+1,tcr.subject[:2]),
'black' ) )
header[icol] = 'M#'
icol += 1
text0 = ''
if 'A' in ab:
text0 += '{} {} {}'\
.format( pad_to_middle( tcr.cdr3a if dont_trim_labels else tcr.cdr3a[3:-2], 15 ),
pad_to_middle( tcr.cdr3a_masked if dont_trim_labels else tcr.cdr3a_masked[3:-2], 15 ),
pad_to_middle( tcr.a_indels, 6 ) )
if 'B' in ab:
if text0: text0 += ' '
text0 += '{} {} {}'\
.format( pad_to_middle( tcr.cdr3b if dont_trim_labels else tcr.cdr3b[3:-2], 15 ),
pad_to_middle( tcr.cdr3b_masked if dont_trim_labels else tcr.cdr3b_masked[3:-2], 15 ),
pad_to_middle( tcr.b_indels, 6 ) )
text_columns[ icol ].append( ( text0, 'black' ) )
icol += 1
## clonality
header[icol] = ' C'
text_columns[ icol ].append( ( '{:2d}'.format(tcr.clone_size), clonality_color( tcr.clone_size ) ) )
icol += 1
## sharing
header[icol] = ' M '
num_mice_this_epitope, num_mice_all_epitopes = tcr.color_scores[ sharing_cs ][ab]
text_columns[ icol ].append( ( '{},{}'.format( num_mice_this_epitope, num_mice_all_epitopes ),
num_mice_all_epitopes_color( num_mice_all_epitopes ) ) )
icol += 1
## cross-reactivity:
header[icol] = 'E'
num_epitopes = tcr.color_scores[ cross_reactivity_cs ][ab][0] ## includes this one
text_columns[ icol ].append( ( str(num_epitopes), num_epitopes_color( num_epitopes ) ) )
icol += 1
## gene segments
reps = tcr.single_reps
if 'A' in ab:
assert reps[0].startswith('TR') and reps[1].startswith('TR')
text_columns[ icol ].append( ( '{}{:02d}'.format( reps[0][2:4], get_primary_number( reps[0] ) ),
rep_colors[reps[0]] ) )
icol += 1
text_columns[ icol ].append( ( '{}{:02d}'.format( reps[1][2:4], get_primary_number( reps[1] ) ),
rep_colors[reps[1]] ) )
icol += 1
# text_columns[ icol ].append( ( 'AV{:02d}'.format( get_primary_number( reps[0] ) ), rep_colors[reps[0]] ) ) ; icol += 1
# text_columns[ icol ].append( ( 'AJ{:02d}'.format( get_primary_number( reps[1] ) ), rep_colors[reps[1]] ) ) ; icol += 1
if 'B' in ab:
text_columns[ icol ].append( ( '{}{:02d}'.format( reps[2][2:4], get_primary_number( reps[2] ) ),
rep_colors[reps[2]] ) )
icol += 1
text_columns[ icol ].append( ( '{}{:02d}'.format( reps[3][2:4], get_primary_number( reps[3] ) ),
rep_colors[reps[3]] ) )
icol += 1
# text_columns[ icol ].append( ( 'BV{:02d}'.format( get_primary_number( reps[2] ) ), rep_colors[reps[2]] ) ) ; icol += 1
# text_columns[ icol ].append( ( 'BJ{}'.format( reps[3][4:]) , rep_colors[reps[3]] ) ) ; icol += 1
text_columns[ icol ].append( ( my_color_scores_labels[old_index], get_tcr_score_color( old_index ) ) ) ; icol += 1
assert icol == num_columns
## now go through and figure out how wide each of the text columns is
x_offset = xmargin
for col,header_tag in zip( text_columns, header ):
assert len(col) == num_tcrs
maxlen = max((len(x[0]) for x in col ))
if not maxlen: continue
for old_index, ( text,color ) in enumerate(col):
new_index = old2new_index[ old_index ]
ypos = node_position[ new_index ]
lower_left = [ x_offset, ypos+0.5*label_fontsize*0.75 ]
cmds.append( svg_basic.make_text( text, lower_left, label_fontsize, color=color ) )
if header_tag:
max_ypos = max( node_position.values() )
lower_left = [ x_offset, max_ypos+2.0*label_fontsize*0.75 ]
cmds.append( svg_basic.make_text( header_tag, lower_left, label_fontsize, color='black' ) )
x_offset += text_column_separation + label_fontsize * 0.6 * maxlen
## how wide should the tree be?
tree_p0 = [x_offset + labels_tree_separation, ymargin ]
tree_p1 = [tree_p0[0] + tree_width, tree_height+ymargin ]
total_svg_width = tree_p1[0] + xmargin
## node_position tells us where the different clusters are located, vertically
##
plotter = svg_basic.SVG_tree_plotter()
Log('Canvas_tree 2nd time')
node_position,Transform,canvas_tree_min_rmsd, canvas_tree_w_factor = \
score_trees_devel.Canvas_tree( tree, names, sizes, tree_p0, tree_p1, branch_width_fraction,
plotter, label_internal_nodes = False,
score_range_for_coloring = color_score_range )
cmds.extend( plotter.cmds )
## label pvals
if edge_pvals:
plotting_info=(sizes, node_position, Transform, canvas_tree_w_factor, canvas_tree_min_rmsd)
label_pval_edges( cmds, edge_pvals, tree, plotting_info )
## now we make an svg file
prefix = '{}_tree_{}_{}_{}'.format(clones_file[:-4],ab,epitope,color_scheme)
print 'create: {}.png'.format(prefix)
svg_basic.create_file( cmds, total_svg_width, total_svg_height, prefix+'.svg', create_png=True)
|
|
##
## ODB2 - Pure Python object database
## Copyright (C) 2004 Sami Hangaslammi (shang@jyu.fi)
##
import weakref
import gc
import time
import copy
import cPickle
import filestr
class ODBError(Exception): pass
class NotInitialised(ODBError): pass
class AlreadyInitialised(ODBError): pass
class NoSuchRoot(ODBError): pass
class InvalidObject(ODBError): pass
class RestoreError(ODBError): pass
class NoTransaction(ODBError): pass
error = ODBError ## so you can catch odb.error
from trdict import TrackingDict
## --- option flags ---
_auto_wake = True
_auto_transaction = False
_use_data_cache = True
_use_object_cache = False
_storage = None
_storage_cache = TrackingDict()
_object_cache = TrackingDict()
_roots = {}
_transaction = None ## current transaction
_transactions = weakref.WeakKeyDictionary()
_pickle_proto = -1
_debug_info = 0
_allocblock = 256
def open(filename, pickle_proto=None):
global _storage, _roots, _pickle_proto
if pickle_proto is not None:
_pickle_proto = pickle_proto
if _storage is not None:
raise AlreadyInitialised
_storage = filestr.ObjectStorage(filename, _allocblock)
_storage.pickle_proto = _pickle_proto
if not _storage.has_id(0):
_roots = {}
id = _storage.new(_roots)
assert id == 0
else:
_roots = _storage[0]
def set_debug(flag):
global _debug_info
_debug_info = flag
def set_auto_wake(flag):
global _auto_wake
_auto_wake = flag
def set_auto_transaction(flag):
global _auto_transaction
_auto_transaction = flag
def set_data_cache(flag):
global _use_data_cache
_use_data_cache = flag
def set_object_cache(flag):
global _use_object_cache
_use_object_cache = flag
def set_flags(debug=None, auto_wake=None, auto_transaction=None, data_cache=None, object_cache=None):
if debug is not None: set_debug(debug)
if auto_wake is not None: set_auto_wake(auto_wake)
if auto_transaction is not None: set_auto_transaction(auto_transaction)
if data_cache is not None: set_data_cache(data_cache)
if object_cache is not None: set_object_cache(object_cache)
def transaction():
return _transaction
def new_transaction():
global _transaction
t = _transaction = ODBTransaction()
return t
def clear_transaction():
global _transaction
_transaction = None
def clear_cache(timelimit=0):
if _use_data_cache:
_storage_cache.del_old(timelimit)
if _use_object_cache:
_object_cache.del_old(timelimit)
def close():
global _storage, _roots
if _storage is None:
raise NotInitialised
_roots = {}
_storage.close()
_storage = None
class TransactionError(ODBError): pass
class CommitConflict(TransactionError): pass
class ODBTransaction(object):
def __new__(cls):
o = object.__new__(cls)
_transactions[o] = None
if _debug_info:
print "new transaction %s" % o
return o
def __init__(self):
self._cache = {} ## id -> proxy/object
self._commiting = False
self._count = 0
self._aborted = False
self._new_roots = {}
self._read_roots = {}
self._write_buffer = {}
## actions made by other transactions during this transaction
self._other_reads = {}
self._other_writes = {}
self._other_roots = {}
def __getattribute__(self, key):
## EXPERIMENTAL: for automatically setting self as the current transaction when methods are accessed
global _transaction
_transaction = self
return object.__getattribute__(self, key)
def activate(self):
"""transaction.activate() -> None
Activate this transaction.
"""
global _transaction
if self._aborted:
raise TransactionError, "Cannot activate a finished transaction"
if self._commiting:
raise TransactionError, "Cannot activate a transaction that is commiting data"
_transaction = self
def commit(self):
"""transaction.commit() -> int
Returns the number of objects updated in the commit. The method will
throw CommitConflict if another transaction has modified the same
objects.
"""
global _transaction
if self._aborted:
raise TransactionError, "Cannot commit a finished transaction"
if self._commiting:
raise TransactionError, "Already commiting this transaction"
if _storage is None:
raise NotInitialised
if _debug_info:
print "committing transaction %s" % self
try:
self._commiting = True
_transaction = self
deletions = {}
reads = {}
writes = {}
## Check for conflicts
for k,v in self._cache.items():
if not isinstance(v, ODBProxy) and k in self._other_writes:
raise CommitConflict
for k,v in self._new_roots.items():
if k in self._other_roots:
raise CommitConflict
for k in self._read_roots:
if k in self._other_roots:
raise CommitConflict
## Write updated objects and mark read objects
for k,v in self._cache.items():
if not isinstance(v, ODBProxy):
id = v._p_id
if v._p_deleted:
writes[id] = None
deletions[id] = None
elif v._p_changed:
writes[id] = None
self._store_object(v)
else:
reads[id] = None
new_roots = {}
## Validate new roots
for k,v in self._new_roots.iteritems():
id = self._flatten_object(v)
new_roots[k] = id
except:
self._commiting = False
raise
## Everything has been pickled succesfully. Now do the real thing.
## Update the status of other transactions
for t in _transactions:
if t is not self:
t._other_reads.update(reads)
t._other_writes.update(writes)
t._other_roots.update(new_roots)
## Remove deleted items
for id in deletions:
if id in _storage:
del _storage[id]
self._count += 1
if _use_data_cache and id in _storage_cache: ## TODO: faster to try-catch
del _storage_cache[id]
if _use_object_cache and id in _object_cache:
del _object_cache[id]
## Write updated objects
for id, pckl in self._write_buffer.iteritems():
if id in _storage:
if _debug_info:
print "writing object %i to file" % id
_storage.set_pickle(id, pckl)
if _use_data_cache: _storage_cache[id] = pckl
self._count += 1
self._flatten_object(self._cache[id])
## Write new roots
if new_roots:
_roots.update(new_roots)
_storage[0] = _roots
_storage.flush()
count = self._count
self._aborted = True
self._commiting = False
del _transactions[self]
if _transaction is self:
_transaction = None
return count
def restart(self):
"""transaction.restart() -> None
Cancel all changes and restart the transaction. All objects waken
by this transaction are put back to hibernation and must be reawakened.
"""
if self._aborted:
raise TransactionError, "Cannot restart a finished transaction"
self.__init__() ## TODO
def abort(self):
"""transaction.abort() -> None
Cancel all changes and invalidate this transaction.
"""
global _transaction
self.__init__() ## TODO
del _transactions[self]
self._aborted = True
if _transaction is self:
_transaction = None
### --- ###
def get_roots(self):
"""transaction.get_roots() -> [list of root keys]"""
return _roots.keys() ## TODO
def get_root(self, name=""):
"""transaction.get_root([name]) -> root
Return a root object of the database and awaken it.
"""
if name in self._new_roots:
return self._new_roots[name]
else:
if name not in _roots:
raise NoSuchRoot, name
id = _roots[name]
self._read_roots[name] = None
if name in self._other_roots:
del self._other_roots[name]
proxy = self._restore_object(id)
return self.wake(proxy)
def set_root(self, name, obj=None):
"""transaction.set_root([name,] object) -> None
Assign a root object for the database. Effective after commit.
"""
if obj is None:
obj = name
name = ""
self.wake(obj)
if not isinstance(obj, (ODBObject, ODBProxy)):
raise TypeError, "root objects must be instances of ODBObject"
if _storage is None:
raise NotInitialised
self._new_roots[name] = obj
def mark_changed(self, obj):
"""transaction.mark_changed(object) -> None
Mark an object as "changed" so that it will be written to the
database when committing the transaction. Usually ODBObjects can
keep track of this automatically, but this method might be needed
if an object has non-odb-aware mutable attributes.
"""
global _transaction
_transaction = self
obj.__odb_changed__()
def wake(self, *objs):
"""transaction.wake(object[,object2][,object3]...) -> object
Awaken a proxy object so that it can be used in operations.
"""
global _transaction
_transaction = self
for obj in objs:
if not isinstance(obj, ODBProxy):
continue
self._restore_proxy(obj)
if len(objs) == 1:
return objs[0]
return objs
def delete(self, obj):
"""transaction.delete(object) -> None
Cause an object to be deleted from database when this
transaction is committed.
"""
global _transaction
_transaction = self
if not isinstance(obj, (ODBProxy, ODBObject)):
raise TypeError, "cannot remove a non-odb object"
self.wake(obj)
if obj._p_deleted:
return
obj.__odb_changed__()
obj._p_deleted = True
def _reduce_object(self, obj):
if not isinstance(obj, ODBObject):
raise TypeError, "can only reduce isntances of ODBObject"
if obj._p_id not in self._cache:
raise TransactionError, "object %r not owned by transaction %r" % (obj, self)
dct = obj.__get_odb_state__()
cls = obj.__class__
if _use_object_cache and obj._p_id in _object_cache:
_object_cache[obj._p_id] = (cls,dct)
return (cls,dct)
def _pickle(self, obj):
reduced = self._reduce_object(obj)
return cPickle.dumps(reduced, _pickle_proto)
def _new_object(self, obj):
assert self._commiting
if not isinstance(obj, ODBObject):
raise TypeError, "can only register isntances of ODBObject with the ODB"
if _storage is None:
raise NotInitialised
if obj._p_deleted:
return
if "_p_id" in obj.__dict__:
return obj._p_id
id = _storage.new_id()
if _debug_info:
print "new object %r gets id %i" % (obj, id)
obj._p_id = id
obj._p_changed = True
self._cache[id] = obj
self._write_buffer[id] = self._pickle(obj)
return id
def _store_object(self, obj):
assert self._commiting
if isinstance(obj, ODBProxy):
return obj._p_id
if not isinstance(obj, ODBObject):
raise InvalidObject, "Cannot store object of type %s" % type(obj)
if obj._p_deleted:
return
if "_p_id" in obj.__dict__:
id = obj._p_id
if not ('p_changed' in obj.__dict__) or obj._p_changed:
if _debug_info:
print "putting object %i to write buffer" % id
self._write_buffer[id] = self._pickle(obj)
else:
if _debug_info:
print "object %i hasn't changed, not saving" % id
obj._p_changed = False
return id
return self._new_object(obj)
def _flatten_object(self, obj):
assert self._commiting
if isinstance(obj, ODBProxy):
return obj._p_id
if obj._p_deleted:
return -1
if not isinstance(obj, ODBObject):
raise InvalidObject, "Cannot flatten object of type %s" % type(obj)
self._store_object(obj)
id = obj._p_id
if _debug_info:
print "flatting object %i to a proxy" % id
obj.__clear_odb_state__()
obj.__dict__.clear()
obj.__dict__.update({'_p_id':id})
obj.__class__ = ODBProxy
return id
def _load_state(self, id):
if _use_data_cache:
if id in _storage_cache: ## TODO: probably faster with try-catch
pckl = _storage_cache[id]
else:
pckl = _storage.get_pickle(id)
_storage_cache[id] = pckl
else:
pckl = _storage.get_pickle(id)
##cls,dct = _storage[id]
return cPickle.loads(pckl)
def _restore_proxy(self, proxy):
if not isinstance(proxy, ODBProxy):
raise TypeError, "tried to restore a non-proxy object"
id = proxy._p_id
if id in self._cache and not isinstance(self._cache[id], ODBProxy):
if _debug_info:
print "getting object %i from transaction cache" % id
return self._cache[id]
if _debug_info:
print "restoring object %i" % id
if _use_object_cache and id in _object_cache:
if _debug_info:
print "...from object cache"
cls,dct = _object_cache[id]
else:
cls,dct = self._load_state(id)
obj = cls.__new__(cls)
obj.__set_odb_state__(dct)
obj.__dict__['_p_changed'] = 0
obj.__dict__['_p_id'] = id
obj.__dict__['_p_deleted'] = False
self._cache[id] = obj
if _use_object_cache:
_object_cache[id] = cls,dct
if _debug_info:
print "...got object %r" % obj
if id in self._other_writes:
## object read after another transaction commited it, so it is up-to-date
del self._other_writes[id]
return obj
def _restore_object(self, id):
global _transaction
_transaction = self
return _restore_object(id)
def _r(id):
if _storage is None:
raise NotInitialised
if _transaction is None:
if not _auto_transaction:
raise NoTransaction
new_transaction()
if id not in _storage:
if _debug_info:
print "Object %i has been deleted!" % id
return None
## try:
## obj = _transaction._cache[id]
## if _debug_info:
## print "restoring object %i from cache" % id
## except KeyError:
if _debug_info:
print "creating proxy for object %i" % id
obj = ODBProxy()
obj._p_id = id
if id not in _transaction._cache:
_transaction._cache[id] = obj
return obj
_restore_object = _r
_r.__safe_for_unpickling__ = 1
def _none():
return None
_none.__safe_for_unpickling__ = 1
class ODBProxy(object):
_members = '_p_id', '__getstate__','__setstate__','__reduce__','_p_restore','__reduce_ex__','__class__'
def _p_restore(self):
if _transaction is None:
if not _auto_transaction:
raise NoTransaction
new_transaction()
if not _auto_wake and (self._p_id not in _transaction._cache \
or isinstance(_transaction._cache[self._p_id], ODBProxy)):
raise ODBError, "Accessing an hibernating object. Call wake first"
return _transaction._restore_proxy(self)
def __reduce__(self):
return (_r, (self._p_id,))
def __getattribute__(self, name):
if name in ODBProxy._members:
return object.__getattribute__(self, name)
if _debug_info:
print "getting attribute %s from proxy" % name
state = self._p_restore()
return getattr(state, name)
def __setattr__(self, name, value):
if name in ODBProxy._members:
object.__setattr__(self,name,value)
return
state = self._p_restore()
setattr(state, name, value)
def __delattr__(self, name):
state = self._p_restore()
delattr(state, name)
def __hasattr__(self, name):
state = self._p_restore()
return hasattr(state, name)
def __getitem__(self, name):
state = self._p_restore()
return state.__getitem__(name)
def __setitem__(self, name, value):
state = self._p_restore()
state.__setitem__(name, value)
def __contains__(self, name):
state = self._p_restore()
return state.__contains__(name)
def __delitem__(self, name):
state = self._p_restore()
state.__delitem__(name)
def __iter__(self):
state = self._p_restore()
return iter(state)
def __hash__(self):
state = self._p_restore()
return hash(state)
def __str__(self):
state = self._p_restore()
return str(state)
def __repr__(self):
state = self._p_restore()
return repr(state)
def __cmp__(self, other):
state = self._p_restore()
return cmp(state, other)
def __len__(self):
state = self._p_restore()
return len(state)
class ODBObject(object):
_p_deleted = False
_p_changed = False
def __reduce__(self):
#print "reducing %r" % self
if '_p_deleted' in self.__dict__ and self._p_deleted:
return (_none, ())
if _transaction is None:
raise NoTransaction
if not '_p_id' in self.__dict__:
_transaction._new_object(self)
#print "new object:%s" % self._p_id
else:
#print "old object:%s" % self._p_id
if self._p_id not in _transaction._cache:
raise TransactionError, "object in wrong transaction"
return (_r, (self._p_id,))
def __get_odb_state__(self):
d = {}
d.update(self.__dict__)
if '_p_id' in d: del d['_p_id']
if '_p_changed' in d: del d['_p_changed']
if '_p_deleted' in d: del d['_p_deleted']
return d
def __set_odb_state__(self, state):
self.__dict__.clear()
self.__dict__.update(state)
def __clear_odb_state__(self):
pass
def __odb_changed__(self):
## TODO: copy-on-write
if self._p_changed: return
if _use_object_cache and "_p_id" in self.__dict__ and self._p_id in _object_cache:
if _transaction is None:
if not _auto_transaction:
raise NoTransaction
new_transaction()
_transaction._cache[self._p_id] = self
cls,dct = _transaction._load_state(self._p_id)
id = self._p_id
deleted = self._p_deleted
self.__set_odb_state__(dct)
self.__dict__['_p_id'] = id
self.__dict__['_p_deleted'] = deleted
object.__setattr__(self, "_p_changed", True)
def __setitem__(self, key, value):
self.__odb_changed__()
object.__setitem__(self, key, value)
def __setattr__(self, key, value):
if key.startswith("_p_") or key.startswith("__"):
pass
else:
self.__odb_changed__()
object.__setattr__(self, key, value)
def __delitem__(self, key):
self.__odb_changed__()
object.__delitem__(self, key)
def __delattr__(self, key):
self.__odb_changed__()
object.__delattr__(self, key)
def type(self):
return type(self)
def isinstance(self, cls):
return isinstance(self, cls)
class _NoParam(object): pass
class ODBList(ODBObject):
def __init__(self, items=None):
if items is None:
self.list = []
else:
self.list = list(items)
def __setitem__(self, index, value):
self.__odb_changed__()
self.list[index] = value
def __delitem__(self, index):
self.__odb_changed__()
del self.list[index]
def insert(self, index, obj):
self.__odb_changed__()
self.list.insert(index, obj)
def append(self, obj):
self.__odb_changed__()
self.list.append(obj)
def pop(self, index=None):
self.__odb_changed__()
if index is None:
return self.list.pop()
self.list.pop(index)
def sort(self):
self.__odb_changed__()
self.list.sort()
def __getitem__(self, index):
return self.list[index]
def __iter__(self):
return iter(self.list)
def __contains__(self, item):
return item in self.list
def __len__(self):
return len(self.list)
class ODBDict(ODBObject):
def __init__(self, *arg):
self.dict = dict(*arg)
def __setitem__(self, key, value):
self.__odb_changed__()
self.dict[key] = value
def __delitem__(self, key):
self.__odb_changed__()
del self.dict[key]
def setdefault(self, key, value):
self.__odb_changed__()
self.dict.setdefault(key, value)
def clear(self):
self.__odb_changed__()
self.dict.clear(self)
def update(self, src):
self.__odb_changed__()
self.dict.update(self, src)
def get(self, key, default=_NoParam):
if default is _NoParam:
return self.dict.get(key)
return self.dict.get(key, default)
def pop(self, key, default=_NoParam):
self.__odb_changed__()
if default is _NoParam:
return self.dict.pop(key)
return self.dict.pop(key, default)
def __getitem__(self, key):
return self.dict[key]
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __len__(self):
return len(self.dict)
def items(self):
return self.dict.items()
def keys(self):
return self.dict.keys()
def values(self):
return self.dict.values()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
class ODBObjectVersioned(ODBObject):
persistenceVersion = 0
def __get_odb_state__(self):
d=ODBObject.__get_odb_state__(self)
d['persistenceVersion']=self.__class__.persistenceVersion
print '__get_odb_state__', d
return d
def __set_odb_state__(self, state):
ODBObject.__set_odb_state__(self, state)
curVer=state.get('persistenceVersion', 0)
## print 'preVer', curVer, self.__class__.persistenceVersion
if curVer < self.__class__.persistenceVersion:
while curVer < self.__class__.persistenceVersion:
curVer += 1
method=self.__class__.__dict__.get('upgradeToVersion%d' % curVer, None)
if method:
method(self)
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import inspect
import json
import os
import sys
import textwrap
import warnings
import jsonschema
import prettytable
import sqlalchemy.exc
from rally import api
from rally.common import cfg
from rally.common import logging
from rally.common.plugin import info
from rally import exceptions
from rally.utils import encodeutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# Some CLI-specific constants
MARGIN = 3
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = "Missing arguments: %s" % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param args: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
required_args = [
p.name for p in inspect.signature(fn).parameters.values()
if p.default == inspect.Parameter.empty
and p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]
missing_required_args = required_args[len(args):]
missing = [arg for arg in missing_required_args if arg not in kwargs]
if missing:
raise MissingArgs(missing)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None,
normalize_field_names=False,
table_label=None, print_header=True, print_border=True,
print_row_border=False,
out=sys.stdout):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
:param normalize_field_names: If True, field names will be transformed,
e.g. "Field Name" -> "field_name", otherwise they will be used
unchanged.
:param table_label: Label to use as header for the whole table.
:param print_header: print table header.
:param print_border: print table border.
:param print_row_border: use border between rows
:param out: stream to write output to.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError("Field labels list %(labels)s has different number of"
" elements than fields list %(fields)s"
% {"labels": field_labels, "fields": fields})
kwargs = {}
if sortby_index is not None:
kwargs = {"sortby": field_labels[sortby_index]}
if print_border and print_row_border:
headers_horizontal_char = "="
kwargs["hrules"] = prettytable.ALL
else:
headers_horizontal_char = "-"
pt = prettytable.PrettyTable(field_labels)
pt.align = "l"
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
field_name = field
if normalize_field_names:
if field_name not in mixed_case_fields:
field_name = field_name.lower()
field_name = field_name.replace(" ", "_").replace("-", "_")
if isinstance(o, dict):
data = o.get(field_name, "")
else:
data = getattr(o, field_name, "")
row.append(data)
pt.add_row(row)
if not print_border or not print_header:
pt.set_style(prettytable.PLAIN_COLUMNS)
pt.left_padding_width = 0
pt.right_padding_width = 1
table_body = pt.get_string(header=print_header,
border=print_border,
**kwargs) + "\n"
if print_border and print_row_border:
table_body = table_body.split("\n", 3)
table_body[2] = table_body[2].replace("-", headers_horizontal_char)
table_body = "\n".join(table_body)
table_header = ""
if table_label:
table_width = table_body.index("\n")
table_header = make_table_header(
table_label, table_width, horizontal_char=headers_horizontal_char)
table_header += "\n"
if table_header:
out.write(encodeutils.safe_encode(table_header).decode())
out.write(encodeutils.safe_encode(table_body).decode())
def print_dict(obj, fields=None, formatters=None, mixed_case_fields=False,
normalize_field_names=False, property_label="Property",
value_label="Value", table_label=None, print_header=True,
print_border=True, wrap=0, out=sys.stdout):
"""Print dict as a table.
:param obj: dict to print
:param fields: `dict` of keys to print from d. Defaults to all keys
:param formatters: `dict` of callables for field formatting
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param normalize_field_names: If True, field names will be transformed,
e.g. "Field Name" -> "field_name", otherwise they will be used
unchanged.
:param property_label: label of "property" column
:param value_label: label of "value" column
:param table_label: Label to use as header for the whole table.
:param print_header: print table header.
:param print_border: print table border.
:param out: stream to write output to.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
if not fields:
if isinstance(obj, dict):
fields = sorted(obj.keys())
else:
fields = [name for name in dir(obj)
if (not name.startswith("_")
and not callable(getattr(obj, name)))]
pt = prettytable.PrettyTable([property_label, value_label], caching=False)
pt.align = "l"
for field_name in fields:
if field_name in formatters:
data = formatters[field_name](obj)
else:
field = field_name
if normalize_field_names:
if field not in mixed_case_fields:
field = field_name.lower()
field = field.replace(" ", "_").replace("-", "_")
if isinstance(obj, dict):
data = obj.get(field, "")
else:
data = getattr(obj, field, "")
# convert dict to str to check length
if isinstance(data, (dict, list)):
data = json.dumps(data)
if wrap > 0:
data = textwrap.fill(str(data), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if (data and isinstance(data, str)
and (r"\n" in data or "\r" in data)):
# "\r" would break the table, so remove it.
if "\r" in data:
data = data.replace("\r", "")
lines = data.strip().split(r"\n")
col1 = field_name
for line in lines:
pt.add_row([col1, line])
col1 = ""
else:
if data is None:
data = "-"
pt.add_row([field_name, data])
table_body = pt.get_string(header=print_header,
border=print_border) + "\n"
table_header = ""
if table_label:
table_width = table_body.index("\n")
table_header = make_table_header(table_label, table_width)
table_header += "\n"
if table_header:
out.write(encodeutils.safe_encode(table_header).decode())
out.write(encodeutils.safe_encode(table_body).decode())
def make_table_header(table_label, table_width,
junction_char="+", horizontal_char="-",
vertical_char="|"):
"""Generalized way make a table header string.
:param table_label: label to print on header
:param table_width: total width of table
:param junction_char: character used where vertical and
horizontal lines meet.
:param horizontal_char: character used for horizontal lines.
:param vertical_char: character used for vertical lines.
:returns: string
"""
if len(table_label) >= (table_width - 2):
raise ValueError(
"Table header %s is longer than total width of the table.")
label_and_space_width = table_width - len(table_label) - 2
padding = 0 if label_and_space_width % 2 == 0 else 1
half_table_width = label_and_space_width // 2
left_spacing = (" " * half_table_width)
right_spacing = (" " * (half_table_width + padding))
border_line = "".join((junction_char,
(horizontal_char * (table_width - 2)),
junction_char,))
label_line = "".join((vertical_char,
left_spacing,
table_label,
right_spacing,
vertical_char,))
return "\n".join((border_line, label_line,))
def make_header(text, size=80, symbol="-"):
"""Unified way to make header message to CLI.
:param text: what text to write
:param size: Length of header decorative line
:param symbol: What symbol to use to create header
"""
header = symbol * size + "\n"
header += "%s\n" % text
header += symbol * size + "\n"
return header
def suppress_warnings(f):
f._suppress_warnings = True
return f
class CategoryParser(argparse.ArgumentParser):
"""Customized arguments parser
We need this one to override hardcoded behavior.
So, we want to print item's help instead of 'error: too few arguments'.
Also, we want not to print positional arguments in help message.
"""
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
# INFO(oanufriev) _action_groups[0] contains positional arguments.
for action_group in self._action_groups[1:]:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def error(self, message):
self.print_help(sys.stderr)
if message.startswith("argument") and message.endswith("is required"):
# NOTE(pirsriva) Argparse will currently raise an error
# message for only 1 missing argument at a time i.e. in the
# error message it WILL NOT LIST ALL the missing arguments
# at once INSTEAD only 1 missing argument at a time
missing_arg = message.split()[1]
print("Missing argument:\n%s" % missing_arg)
sys.exit(2)
def pretty_float_formatter(field, ndigits=None):
"""Create a float value formatter function for the given field.
:param field: str name of an object, which value should be formatted
:param ndigits: int number of digits after decimal point to round
default is None - this disables rounding
:returns: field formatter function
"""
def _formatter(obj):
value = obj[field] if isinstance(obj, dict) else getattr(obj, field)
if type(value) in (int, float):
if ndigits:
return round(value, ndigits)
return value
return "n/a"
return _formatter
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault("args", []).insert(0, (args, kwargs))
if "metavar" not in kwargs and "action" not in kwargs:
# NOTE(andreykurilin): argparse constructs awful metavars...
kwargs["metavar"] = "<%s>" % args[0].replace(
"--", "").replace("-", "_")
return func
return _decorator
def alias(command_name):
"""Allow cli to use alias command name instead of function name.
:param command_name: desired command name
"""
def decorator(func):
func.alias = command_name
return func
return decorator
def deprecated_args(*args, **kwargs):
def _decorator(func):
if "release" not in kwargs:
raise ValueError("'release' is required keyword argument of "
"'deprecated_args' decorator.")
release = kwargs.pop("release")
alternative = kwargs.pop("alternative", None)
help_msg = "[Deprecated since Rally %s] " % release
if alternative:
help_msg += "Use '%s' instead. " % alternative
if "help" in kwargs:
help_msg += kwargs["help"]
kwargs["help"] = help_msg
func.__dict__.setdefault("args", []).insert(0, (args, kwargs))
func.__dict__.setdefault("deprecated_args", {})
func.deprecated_args[args[0]] = (release, alternative)
return func
return _decorator
def help_group(uuid):
"""Label cli method with specific group.
Joining methods by groups allows to compose more user-friendly help
messages in CLI.
:param uuid: Name of group to find common methods. It will be used for
sorting groups in help message, so you can start uuid with
some number (i.e "1_launcher", "2_management") to put groups in proper
order. Note: default group had "0" uuid.
"""
def wrapper(func):
func.help_group = uuid
return func
return wrapper
def _methods_of(cls):
"""Get all callable methods of a class that don't start with underscore.
:returns: a list of tuples of the form (method_name, method)
"""
# The idea of unbound methods exists in Python 2 and was removed in
# Python 3, so "inspect.ismethod" is used here for Python 2 and
# "inspect.isfunction" for Python 3.
all_methods = inspect.getmembers(
cls, predicate=lambda x: inspect.ismethod(x) or inspect.isfunction(x))
methods = [m for m in all_methods if not m[0].startswith("_")]
help_groups = {}
for m in methods:
group = getattr(m[1], "help_group", "0")
help_groups.setdefault(group, []).append(m)
if len(help_groups) > 1:
# we should sort methods by groups
methods = []
for group in sorted(help_groups.items(), key=lambda x: x[0]):
if methods:
# None -> empty line between groups
methods.append((None, None))
methods.extend(group[1])
return methods
def _compose_category_description(category):
descr_pairs = _methods_of(category)
description = ""
doc = category.__doc__
if doc:
description = doc.strip()
if descr_pairs:
description += "\n\nCommands:\n"
sublen = lambda item: len(item[0]) if item[0] else 0
first_column_len = max(map(sublen, descr_pairs)) + MARGIN
for item in descr_pairs:
if item[0] is None:
description += "\n"
continue
name = getattr(item[1], "alias", item[0].replace("_", "-"))
if item[1].__doc__:
doc = info.parse_docstring(
item[1].__doc__)["short_description"]
else:
doc = ""
name += " " * (first_column_len - len(name))
description += " %s%s\n" % (name, doc)
return description
def _compose_action_description(action_fn):
description = ""
if action_fn.__doc__:
parsed_doc = info.parse_docstring(action_fn.__doc__)
short = parsed_doc.get("short_description")
long = parsed_doc.get("long_description")
description = "%s\n\n%s" % (short, long) if long else short
return description
def _print_version():
from rally.common import version
print("Rally version: %s" % version.version_string())
packages = version.plugins_versions()
if packages:
print("\nInstalled Plugins:")
print("\n".join("\t%s: %s" % p for p in sorted(packages.items())))
def _add_command_parsers(categories, subparsers):
# INFO(oanufriev) This monkey patching makes our custom parser class to be
# used instead of native. This affects all subparsers down from
# 'subparsers' parameter of this function (categories and actions).
subparsers._parser_class = CategoryParser
parser = subparsers.add_parser("bash-completion")
parser.add_argument("query_category", nargs="?")
for category in categories:
command_object = categories[category]()
descr = _compose_category_description(categories[category])
parser = subparsers.add_parser(
category, description=descr,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest="action")
for method_name, method in _methods_of(command_object):
if method is None:
continue
method_name = method_name.replace("_", "-")
descr = _compose_action_description(method)
parser = category_subparsers.add_parser(
getattr(method, "alias", method_name),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=descr, help=descr)
action_kwargs = []
for args, kwargs in getattr(method, "args", []):
# FIXME(markmc): hack to assume dest is the arg name without
# the leading hyphens if no dest is supplied
kwargs.setdefault("dest", args[0][2:])
action_kwargs.append(kwargs["dest"])
kwargs["dest"] = "action_kwarg_" + kwargs["dest"]
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=method)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument("action_args", nargs="*")
def validate_deprecated_args(argv, fn):
if (len(argv) > 3
and (argv[2] == fn.__name__)
and getattr(fn, "deprecated_args", None)):
for item, details in fn.deprecated_args.items():
if item in argv[3:]:
msg = ("The argument `%s` is deprecated since Rally %s." %
(item, details[0]))
if details[1]:
msg += " Use `%s` instead." % details[1]
LOG.warning(msg)
def run(argv, categories):
if len(argv) > 1 and argv[1] in ["version", "--version"]:
_print_version()
return 0
parser = lambda subparsers: _add_command_parsers(categories, subparsers)
category_opt = cfg.SubCommandOpt("category",
title="Command categories",
help="Available categories",
handler=parser)
CONF.register_cli_opt(category_opt)
help_msg = ("Additional custom plugin locations. Multiple files or "
"directories may be specified. All plugins in the specified"
" directories and subdirectories will be imported. Plugins in"
" /opt/rally/plugins and ~/.rally/plugins will always be "
"imported.")
CONF.register_cli_opt(cfg.ListOpt("plugin-paths",
default=os.environ.get(
"RALLY_PLUGIN_PATHS"),
help=help_msg))
# NOTE(andreykurilin): this dirty hack is done to unblock the gates.
# Currently, we are using oslo.config for CLI purpose (don't do this!)
# and it makes the things too complicated.
# To discover which CLI method can be affected by warnings and which not
# (based on suppress_warnings decorator) we need to obtain a desired
# CLI method. It can be done only after initialization of oslo_config
# which is located in rally.api.API init method.
# Initialization of rally.api.API can produce a warning (for example,
# from pymysql), so suppressing of warnings later will not work in such
# case (it is what actually had happened now in our CI with the latest
# release of PyMySQL).
#
# https://bitbucket.org/zzzeek/sqlalchemy/issues/4120/mysql-5720-warns-on-tx_isolation
try:
import pymysql
warnings.filterwarnings("ignore", category=pymysql.Warning)
except ImportError:
pass
try:
rapi = api.API(config_args=argv[1:], skip_db_check=True)
except exceptions.RallyException as e:
print(e)
return(2)
if CONF.category.name == "bash-completion":
print(_generate_bash_completion_script())
return(0)
fn = CONF.category.action_fn
fn_args = [encodeutils.safe_decode(arg)
for arg in CONF.category.action_args]
# api instance always is the first argument
fn_args.insert(0, rapi)
fn_kwargs = {}
for k in CONF.category.action_kwargs:
v = getattr(CONF.category, "action_kwarg_" + k)
if v is None:
continue
if isinstance(v, str):
v = encodeutils.safe_decode(v)
fn_kwargs[k] = v
# call the action with the remaining arguments
# check arguments
try:
validate_args(fn, *fn_args, **fn_kwargs)
except MissingArgs as e:
# NOTE(mikal): this isn't the most helpful error message ever. It is
# long, and tells you a lot of things you probably don't want to know
# if you just got a single arg wrong.
print(fn.__doc__)
CONF.print_help()
print("Missing arguments:")
for missing in e.missing:
for arg in fn.args:
if arg[1].get("dest", "").endswith(missing):
print(" " + arg[0][0])
break
return(1)
try:
validate_deprecated_args(argv, fn)
# skip db check for db and plugin commands
if CONF.category.name not in ("db", "plugin"):
rapi.check_db_revision()
if getattr(fn, "_suppress_warnings", False):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ret = fn(*fn_args, **fn_kwargs)
else:
ret = fn(*fn_args, **fn_kwargs)
return ret
except (IOError, TypeError, ValueError,
exceptions.RallyException, jsonschema.ValidationError) as e:
known_errors = (exceptions.InvalidTaskConfig, )
if logging.is_debug() and not isinstance(e, known_errors):
LOG.exception("Unexpected exception in CLI")
else:
print(e)
return getattr(e, "error_code", 1)
except sqlalchemy.exc.OperationalError as e:
if logging.is_debug():
LOG.exception("Something went wrong with database")
print(e)
print("Looks like Rally can't connect to its DB.")
print("Make sure that connection string in rally.conf is proper:")
print(CONF.database.connection)
return 1
except Exception:
print("Command failed, please check log for more info")
raise
def _generate_bash_completion_script():
from rally.cli import main
bash_data = """#!/bin/bash
# Standalone _filedir() alternative.
# This exempts from dependence of bash completion routines
function _rally_filedir()
{
test "${1}" \\
&& COMPREPLY=( \\
$(compgen -f -- "${cur}" | grep -E "${1}") \\
$(compgen -o plusdirs -- "${cur}") ) \\
|| COMPREPLY=( \\
$(compgen -o plusdirs -f -- "${cur}") \\
$(compgen -d -- "${cur}") )
}
_rally()
{
declare -A SUBCOMMANDS
declare -A OPTS
%(data)s
for OPT in ${!OPTS[*]} ; do
CMD=${OPT%%%%_*}
CMDSUB=${OPT#*_}
SUBCOMMANDS[${CMD}]+="${CMDSUB} "
done
COMMANDS="${!SUBCOMMANDS[*]}"
COMPREPLY=()
local cur="${COMP_WORDS[COMP_CWORD]}"
local prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ $cur =~ ^(\\.|\\~|\\/) ]] || [[ $prev =~ ^--out(|put-file)$ ]] ; then
_rally_filedir
elif [[ $prev =~ ^--(task|filename)$ ]] ; then
_rally_filedir "\\.json|\\.yaml|\\.yml"
elif [ $COMP_CWORD == "1" ] ; then
COMPREPLY=($(compgen -W "$COMMANDS" -- ${cur}))
elif [ $COMP_CWORD == "2" ] ; then
COMPREPLY=($(compgen -W "${SUBCOMMANDS[${prev}]}" -- ${cur}))
else
COMMAND="${COMP_WORDS[1]}_${COMP_WORDS[2]}"
COMPREPLY=($(compgen -W "${OPTS[$COMMAND]}" -- ${cur}))
fi
return 0
}
complete -o filenames -F _rally rally
"""
completion = []
for category, cmds in main.categories.items():
for name, command in _methods_of(cmds):
if name is None:
continue
command_name = getattr(command, "alias", name.replace("_", "-"))
args_list = []
for arg in getattr(command, "args", []):
if getattr(command, "deprecated_args", []):
if arg[0][0] not in command.deprecated_args:
args_list.append(arg[0][0])
else:
args_list.append(arg[0][0])
args = " ".join(args_list)
completion.append(""" OPTS["{cat}_{cmd}"]="{args}"\n""".format(
cat=category, cmd=command_name, args=args))
return bash_data % {"data": "".join(sorted(completion))}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.