gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from jsonschema import ValidationError
import io
import logging
import os
import pkg_resources
import pytest
from textwrap import dedent
import re
import yaml
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugins.pre_reactor_config import (ReactorConfig,
ReactorConfigPlugin,
get_config)
from tests.constants import TEST_IMAGE
from tests.docker_mock import mock_docker
from flexmock import flexmock
class TestReactorConfigPlugin(object):
def prepare(self):
mock_docker()
tasker = DockerTasker()
workflow = DockerBuildWorkflow({'provider': 'git', 'uri': 'asd'},
TEST_IMAGE)
return tasker, workflow
def test_no_config(self):
tasker, workflow = self.prepare()
conf = get_config(workflow)
assert isinstance(conf, ReactorConfig)
same_conf = get_config(workflow)
assert conf is same_conf
@pytest.mark.parametrize('basename', ['reactor-config.yaml', None])
def test_filename(self, tmpdir, basename):
filename = os.path.join(str(tmpdir), basename or 'config.yaml')
with open(filename, 'w'):
pass
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow,
config_path=str(tmpdir),
basename=filename)
assert plugin.run() is None
def test_filename_not_found(self):
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path='/not-found')
with pytest.raises(Exception):
plugin.run()
def test_no_schema_resource(self, tmpdir, caplog):
class FakeProvider(object):
def get_resource_stream(self, pkg, rsc):
raise IOError
# pkg_resources.resource_stream() cannot be mocked directly
# Instead mock the module-level function it calls.
(flexmock(pkg_resources)
.should_receive('get_provider')
.and_return(FakeProvider()))
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w'):
pass
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
with caplog.atLevel(logging.ERROR), pytest.raises(Exception):
plugin.run()
captured_errs = [x.message for x in caplog.records()]
assert "unable to extract JSON schema, cannot validate" in captured_errs
@pytest.mark.parametrize('schema', [
# Invalid JSON
'{',
# Invalid schema
'{"properties": {"any": null}}',
])
def test_invalid_schema_resource(self, tmpdir, caplog, schema):
class FakeProvider(object):
def get_resource_stream(self, pkg, rsc):
return io.BufferedReader(io.BytesIO(schema))
# pkg_resources.resource_stream() cannot be mocked directly
# Instead mock the module-level function it calls.
(flexmock(pkg_resources)
.should_receive('get_provider')
.and_return(FakeProvider()))
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w'):
pass
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
with caplog.atLevel(logging.ERROR), pytest.raises(Exception):
plugin.run()
captured_errs = [x.message for x in caplog.records()]
assert any("cannot validate" in x for x in captured_errs)
@pytest.mark.parametrize(('config', 'errors'), [
("""\
clusters:
foo:
- name: bar
max_concurrent_builds: 1
""", [
"validation error (at top level): "
"%r is a required property" % u'version',
]),
("""\
version: 1
clusters:
foo:
bar: 1
plat/form:
- name: foo
max_concurrent_builds: 1
""", [
"validation error (clusters.foo): None is not of type %r" % u'array',
"validation error (clusters.bar): 1 is not of type %r" % u'array',
re.compile(r"validation error \(clusters\): .*'plat/form'"),
]),
("""\
version: 1
clusters:
foo:
- name: 1
max_concurrent_builds: 1
- name: blah
max_concurrent_builds: one
- name: "2" # quoting prevents error
max_concurrent_builds: 2
- name: negative
max_concurrent_builds: -1
""", [
"validation error (clusters.foo[0].name): "
"1 is not of type %r" % u'string',
"validation error (clusters.foo[1].max_concurrent_builds): "
"'one' is not of type %r" % u'integer',
"validation error (clusters.foo[3].max_concurrent_builds): "
"-1 is less than the minimum of 0",
]),
("""\
version: 1
clusters:
foo:
- name: blah
max_concurrent_builds: 1
enabled: never
""", [
"validation error (clusters.foo[0].enabled): "
"'never' is not of type %r" % u'boolean',
]),
("""\
version: 1
clusters:
foo:
# missing name
- nam: bar
max_concurrent_builds: 1
# missing max_concurrent_builds
- name: baz
max_concurrrent_builds: 2
- name: bar
max_concurrent_builds: 4
extra: false
""", [
"validation error (clusters.foo[0]): "
"%r is a required property" % u'name',
"validation error (clusters.foo[1]): "
"%r is a required property" % u'max_concurrent_builds',
"validation error (clusters.foo[2]): "
"Additional properties are not allowed ('extra' was unexpected)",
])
])
def test_bad_cluster_config(self, tmpdir, caplog, config, errors):
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w') as fp:
fp.write(dedent(config))
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
with caplog.atLevel(logging.ERROR), pytest.raises(ValidationError):
plugin.run()
captured_errs = [x.message for x in caplog.records()]
for error in errors:
try:
# Match regexp
assert any(filter(error.match, captured_errs))
except AttributeError:
# String comparison
assert error in captured_errs
def test_bad_version(self, tmpdir):
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w') as fp:
fp.write("version: 2")
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
with pytest.raises(ValueError):
plugin.run()
@pytest.mark.parametrize(('config', 'clusters'), [
# Empty config
("", []),
# Built-in default config
(yaml.dump(ReactorConfig.DEFAULT_CONFIG), []),
# Unknown key
("""\
version: 1
special: foo
""", []),
("""\
version: 1
clusters:
ignored:
- name: foo
max_concurrent_builds: 2
platform:
- name: one
max_concurrent_builds: 4
- name: two
max_concurrent_builds: 8
enabled: true
- name: three
max_concurrent_builds: 16
enabled: false
""", [
('one', 4),
('two', 8),
]),
])
def test_good_cluster_config(self, tmpdir, config, clusters):
filename = os.path.join(str(tmpdir), 'config.yaml')
with open(filename, 'w') as fp:
fp.write(dedent(config))
tasker, workflow = self.prepare()
plugin = ReactorConfigPlugin(tasker, workflow, config_path=str(tmpdir))
assert plugin.run() is None
conf = get_config(workflow)
enabled = conf.get_enabled_clusters_for_platform('platform')
assert set([(x.name, x.max_concurrent_builds)
for x in enabled]) == set(clusters)
|
|
__all__ = ['imread', 'imsave']
import numpy as np
from six import string_types
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
from ...external.tifffile import imread as tif_imread, imsave as tif_imsave
def imread(fname, dtype=None, img_num=None, **kwargs):
"""Load an image from file.
Parameters
----------
fname : str
File name.
dtype : numpy dtype object or string specifier
Specifies data type of array elements.
img_num : int, optional
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
Addition keyword arguments to pass through (only applicable to Tiff
files for now, see `tifffile`'s `imread` function).
Notes
-----
Tiff files are handled by Christophe Golhke's tifffile.py [1]_, and support many
advanced image types including multi-page and floating point.
All other files are read using the Python Imaging Libary.
See PIL docs [2]_ for a list of supported formats.
References
----------
.. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
.. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
if hasattr(fname, 'lower') and dtype is None:
kwargs.setdefault('key', img_num)
if fname.lower().endswith(('.tiff', '.tif')):
return tif_imread(fname, **kwargs)
im = Image.open(fname)
try:
# this will raise an IOError if the file is not readable
im.getdata()[0]
except IOError as e:
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
pillow_error_message = str(e)
error_message = ('Could not load "%s" \n'
'Reason: "%s"\n'
'Please see documentation at: %s') % (fname, pillow_error_message, site)
raise ValueError(error_message)
else:
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
def pil_to_ndarray(im, dtype=None, img_num=None):
"""Import a PIL Image object to an ndarray, in memory.
Parameters
----------
Refer to ``imread``.
"""
frames = []
grayscale = None
i = 0
while 1:
try:
im.seek(i)
except EOFError:
break
frame = im
if img_num is not None and img_num != i:
im.getdata()[0]
i += 1
continue
if im.mode == 'P':
if grayscale is None:
grayscale = _palette_is_grayscale(im)
if grayscale:
frame = im.convert('L')
else:
frame = im.convert('RGB')
elif im.mode == '1':
frame = im.convert('L')
elif 'A' in im.mode:
frame = im.convert('RGBA')
elif im.mode == 'CMYK':
frame = im.convert('RGB')
if im.mode.startswith('I;16'):
shape = im.size
dtype = '>u2' if im.mode.endswith('B') else '<u2'
if 'S' in im.mode:
dtype = dtype.replace('u', 'i')
frame = np.fromstring(frame.tobytes(), dtype)
frame.shape = shape[::-1]
else:
frame = np.array(frame, dtype=dtype)
frames.append(frame)
i += 1
if img_num is not None:
break
if hasattr(im, 'fp') and im.fp:
im.fp.close()
if img_num is None and len(frames) > 1:
return np.array(frames)
elif frames:
return frames[0]
elif img_num:
raise IndexError('Could not find image #%s' % img_num)
def _palette_is_grayscale(pil_image):
"""Return True if PIL image in palette mode is grayscale.
Parameters
----------
pil_image : PIL image
PIL Image that is in Palette mode.
Returns
-------
is_grayscale : bool
True if all colors in image palette are gray.
"""
assert pil_image.mode == 'P'
# get palette as an array with R, G, B columns
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
# Not all palette colors are used; unused colors have junk values.
start, stop = pil_image.getextrema()
valid_palette = palette[start:stop]
# Image is grayscale if channel differences (R - G and G - B)
# are all zero.
return np.allclose(np.diff(valid_palette), 0)
def ndarray_to_pil(arr, format_str=None):
"""Export an ndarray to a PIL object.
Parameters
----------
Refer to ``imsave``.
"""
if arr.ndim == 3:
arr = img_as_ubyte(arr)
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
elif format_str in ['png', 'PNG']:
mode = 'I;16'
mode_base = 'I'
if arr.dtype.kind == 'f':
arr = img_as_uint(arr)
elif arr.max() < 256 and arr.min() >= 0:
arr = arr.astype(np.uint8)
mode = mode_base = 'L'
else:
arr = img_as_uint(arr)
else:
arr = img_as_ubyte(arr)
mode = 'L'
mode_base = 'L'
try:
array_buffer = arr.tobytes()
except AttributeError:
array_buffer = arr.tostring() # Numpy < 1.9
if arr.ndim == 2:
im = Image.new(mode_base, arr.T.shape)
try:
im.frombytes(array_buffer, 'raw', mode)
except AttributeError:
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
else:
image_shape = (arr.shape[1], arr.shape[0])
try:
im = Image.frombytes(mode, image_shape, array_buffer)
except AttributeError:
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
return im
def imsave(fname, arr, format_str=None, **kwargs):
"""Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Tiff files are handled by Christophe Golhke's tifffile.py [1]_,
and support many advanced image types including multi-page and
floating point.
All other image formats use the Python Imaging Libary.
See PIL docs [2]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
- Floating point and other integers -> img_as_uint16
References
----------
.. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
.. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, string_types) and format_str is None:
format_str = "PNG"
# Check for png in filename
if (isinstance(fname, string_types)
and fname.lower().endswith(".png")):
format_str = "PNG"
arr = np.asanyarray(arr).squeeze()
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
use_tif = False
if hasattr(fname, 'lower'):
if fname.lower().endswith(('.tiff', '.tif')):
use_tif = True
if not format_str is None:
if format_str.lower() in ['tiff', 'tif']:
use_tif = True
if use_tif:
tif_imsave(fname, arr, **kwargs)
return
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % arr.shape)
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = ndarray_to_pil(arr, format_str=format_str)
img.save(fname, format=format_str, **kwargs)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Lite flatbuffer generation from saved_models.
Example:
bazel run third_party/tensorflow/contrib/lite/python:convert_savedmodel -- \
--saved_model_dir=/tmp/test_saved_model/1519865537 \
--output_tflite=/tmp/test.lite
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.framework import ops
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
flags.DEFINE_string("saved_model_dir", "", "Saved model directory to convert.")
flags.DEFINE_string("output_tflite", None, "File path to write flatbuffer.")
flags.DEFINE_string("output_arrays", None,
"List of output tensor names, the default value is None, "
"which means the conversion will keep all outputs.")
flags.DEFINE_integer("batch_size", 1,
"If input tensor shape has None at first dimension, "
"e.g. (None,224,224,3), replace None with batch_size.")
flags.DEFINE_string("tag_set", tag_constants.SERVING,
"Group of tag(s) of the MetaGraphDef in the saved_model, "
"in string format, separated by ','. For tag-set contains "
"multiple tags, all tags must be passed in.")
flags.DEFINE_string("signature_key",
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
"This is signature key to extract inputs, outputs.")
def log_tensor_details(tensor_info):
"""Log tensor details: name, shape, and type."""
for key in tensor_info:
val = tensor_info[key]
dtype = types_pb2.DataType.Name(val.dtype)
if val.tensor_shape.unknown_rank:
shape = "unknown_rank"
else:
dims = [str(dim.size) for dim in val.tensor_shape.dim]
shape = "({})".format(", ".join(dims))
logging.info("Tensor's key in savedmodel's tensor_map: %s", key)
logging.info(" tensor name: %s, shape: %s, type: %s", val.name, shape,
dtype)
def get_meta_graph_def(saved_model_dir, tag_set):
"""Validate savedmodel and extract MetaGraphDef.
Args:
saved_model_dir: Savedmodel path to convert.
tag_set: Set of tag(s) of the MetaGraphDef to load.
Returns:
The meta_graph_def used for tflite conversion.
Raises:
ValueError: No valid MetaGraphDef for given tag_set.
"""
saved_model = reader.read_saved_model(saved_model_dir)
tag_sets = []
result_meta_graph_def = None
for meta_graph_def in saved_model.meta_graphs:
meta_graph_tag_set = set(meta_graph_def.meta_info_def.tags)
tag_sets.append(meta_graph_tag_set)
if meta_graph_tag_set == tag_set:
result_meta_graph_def = meta_graph_def
logging.info("The given SavedModel contains the following tags: %s", tag_sets)
if result_meta_graph_def is not None:
return result_meta_graph_def
else:
raise ValueError("No valid MetaGraphDef for this tag_set '{}'. Possible "
"values are '{}'. ".format(tag_set, tag_sets))
def get_signature_def(meta_graph, signature_key):
"""Get the signature def from meta_graph with given signature_key.
Args:
meta_graph: meta_graph_def.
signature_key: signature_def in the meta_graph_def.
Returns:
The signature_def used for tflite conversion.
Raises:
ValueError: Given signature_key is not valid for this meta_graph.
"""
signature_def_map = meta_graph.signature_def
signature_def_keys = set(signature_def_map.keys())
logging.info(
"The given SavedModel MetaGraphDef contains SignatureDefs with the "
"following keys: %s", signature_def_keys)
if signature_key not in signature_def_keys:
raise ValueError("No '{}' in the saved_model\'s SignatureDefs. Possible "
"values are '{}'. ".format(signature_key,
signature_def_keys))
signature_def = signature_def_utils.get_signature_def_by_key(
meta_graph, signature_key)
return signature_def
def get_inputs_outputs(signature_def):
"""Get inputs and outputs from signature def.
Args:
signature_def: signatuer def in the meta_graph_def for conversion.
Returns:
The inputs and outputs in the graph for conversion.
"""
inputs_tensor_info = signature_def.inputs
outputs_tensor_info = signature_def.outputs
logging.info("input tensors info: ")
log_tensor_details(inputs_tensor_info)
logging.info("output tensors info: ")
log_tensor_details(outputs_tensor_info)
def gather_names(tensor_info):
return [tensor_info[key].name for key in tensor_info]
inputs = gather_names(inputs_tensor_info)
outputs = gather_names(outputs_tensor_info)
return inputs, outputs
def convert(saved_model_dir,
output_tflite=None,
output_arrays=None,
tag_set=None,
signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
batch_size=1):
"""Convert a savedmodel to tflite flatbuffer.
Args:
saved_model_dir: Saved model directory to convert.
output_tflite: File path to write result flatbuffer.
output_arrays: List of output tensor names, the default value is None, which
means conversion keeps all output tensors. This is also used to filter
tensors that are from Op currently not supported in tflite, e.g., Argmax).
tag_set: This is the set of tags to get meta_graph_def in saved_model.
signature_key: This is the signature key to extract inputs, outputs.
batch_size: If input tensor shape has None at first dimension,
e.g. (None,224,224,3), replace None with batch_size.
Returns:
The converted data. For example if tflite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
ValueError: If tag_set does not indicate any meta_graph_def in saved_model,
or signature_key is not in relevant meta_graph_def,
or input shape has None beyond 1st dimension, e.g., (1,None, None, 3),
or given output_arrays are not valid causing empty outputs.
"""
if tag_set is None:
tag_set = set([tag_constants.SERVING])
meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
signature_def = get_signature_def(meta_graph, signature_key)
inputs, outputs = get_inputs_outputs(signature_def)
graph = ops.Graph()
with session.Session(graph=graph) as sess:
loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)
in_tensors = [graph.get_tensor_by_name(input_) for input_ in inputs]
# Users can use output_arrays to filter output tensors for conversion.
# If output_arrays is None, we keep all output tensors. In future, we may
# use tflite supported Op list and check whether op is custom Op to
# automatically filter output arrays.
# TODO(zhixianyan): Use tflite supported Op list to filter outputs.
if output_arrays is not None:
output_arrays = output_arrays.split(",")
out_tensors = [
graph.get_tensor_by_name(output)
for output in outputs
if output.split(":")[0] in output_arrays
]
else:
out_tensors = [graph.get_tensor_by_name(output) for output in outputs]
output_names = [node.split(":")[0] for node in outputs]
if not out_tensors:
raise ValueError(
"No valid output tensors for '{}', possible values are '{}'".format(
output_arrays, output_names))
frozen_graph_def = tf_graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), output_names)
# Toco requires fully defined tensor shape, for input tensor with None in
# their shape, e.g., (None, 224, 224, 3), we need to replace first None with
# a given batch size. For shape with more None, e.g. (None, None, None, 3),
# still be able to replace and convert, but require further investigation.
# TODO(zhixianyan): Add supports for input tensor with more None in shape.
for i in range(len(in_tensors)):
shape = in_tensors[i].get_shape().as_list()
if shape[0] is None:
shape[0] = batch_size
if None in shape[1:]:
raise ValueError(
"Only support None shape at 1st dim as batch_size. But tensor "
"'{}' 's shape '{}' has None at other dimension. ".format(
inputs[i], shape))
in_tensors[i].set_shape(shape)
result = lite.toco_convert(frozen_graph_def, in_tensors, out_tensors)
if output_tflite is not None:
with gfile.Open(output_tflite, "wb") as f:
f.write(result)
logging.info("Successfully converted to: %s", output_tflite)
return result
def main(_):
convert(
saved_model_dir=flags.FLAGS.saved_model_dir,
output_tflite=flags.FLAGS.output_tflite,
output_arrays=flags.FLAGS.output_arrays,
batch_size=flags.FLAGS.batch_size,
tag_set=set(flags.FLAGS.tag_set.split(",")),
signature_key=flags.FLAGS.signature_key)
if __name__ == "__main__":
app.run(main)
|
|
# -*- coding: utf-8 -*-
from datetime import datetime
import json
from bson import ObjectId
from flask import Blueprint, current_app, jsonify, request, Response, \
make_response
from flask.ext.login import current_user, login_required
from flask.ext.mail import Message
from pymongo.errors import DuplicateKeyError
import requests
from ..extensions import mongo, mail, gh_issues
from ..utils import get_current_time, PLURAL_METHODS, SINGLE_METHODS, \
jsonify_mongo, HTTP_METHODS
api = Blueprint('api', __name__, url_prefix='/api/v1')
@api.route('/remote/static/<path:path>', methods=['GET'])
def remote_static(path):
"""Route incoming API calls to the Tornado backend and sends JSON response"""
# Check if GET 206
range_header = request.headers.get('Range', None)
if range_header:
headers = {'Range': range_header}
else:
headers = {}
# Route request to Tornado
TORNADO_PORT = current_app.config.get('TORNADO_PORT')
url = 'http://clinical-db.scilifelab.se:{port}/static/{path}'\
.format(port=TORNADO_PORT, path=path)
cookie = {'institute': ','.join(['cmms'])}
if request.method == 'GET':
resp = requests.get(url, cookies=cookie, headers=headers)
new_resp = make_response(resp.content)
else:
# HEAD request incoming
resp = requests.head(url, cookies=cookie, headers=headers)
new_resp = make_response(u'')
new_resp.status_code = resp.status_code
new_resp.headers['content-length'] = resp.headers.get('content-length')
new_resp.headers['accept-ranges'] = resp.headers.get('accept-ranges')
new_resp.headers['Content-Type'] = 'application/octet-stream'
return new_resp
@api.route('/activities', methods=PLURAL_METHODS)
@api.route('/activities/<document_id>', methods=SINGLE_METHODS)
@login_required
def activities(document_id=None):
# Store the submitted query options
query_args = request.args.to_dict()
data = request.json
if document_id:
# Well, at least we know we should try to fetch a document
document = mongo.db.activity.find_one({'_id': ObjectId(document_id)})
if request.method == 'POST':
# Update data before inserting a new document
data['created_at'] = get_current_time()
data['user_id'] = ObjectId(data['user_id'])
# Add a new activity to the collection
try:
document_id = mongo.db.activity.insert(data)
document = data
document['_id'] = document_id
except DuplicateKeyError:
return Response('Document already exists'), 500
elif request.method == 'GET':
if document_id is None:
activities = list(mongo.db.activity.find(
query_args).sort('created_at', -1))
users = mongo.db.user.find({'_id':
{'$in': [activity['user_id'] for activity in activities]}})
return jsonify_mongo(activities=activities, users=list(users))
else:
user = mongo.db.user.find_one({'_id': document['user_id']})
document['user'] = user
elif request.method == 'PUT':
# Update a specific document
# Start by updating the changed fields
for key, value in data.items():
if key == 'user_id':
value = ObjectId(value)
elif key in ('created_at', 'updated_at'):
value = datetime(value)
document[key] = value
mongo.db.activity.save(document)
elif request.method == 'DELETE':
# Remove a document from the collection
effect = mongo.db.activity.remove(ObjectId(document_id))
if effect['err']:
# There was an error while deleting the record
return Response(effect['err']), 500
return jsonify_mongo(document)
@api.route('/users/<user_id>', methods=['OPTIONS', 'GET'])
@login_required
def users(user_id=None):
if user_id:
if user_id == 'current':
# Get the user id from the logged in user
user_id = current_user['user_id']
# Fetch document from the store
user = mongo.db.user.find_one({'_id': ObjectId(user_id)})
if user is None:
return Response('User document with id: %s not found' % user_id), 404
# Return json object for the logged in user
return jsonify_mongo(**user)
# +--------------------------------------------------------------------+
# | Sanger Sequencing Order Mail
# +--------------------------------------------------------------------+
@api.route('/sanger', methods=['POST'])
@login_required
def sanger_order():
sender_email = current_user.get('email', 'robin.andeer@gmail.com')
# Send an email with Sanger sequencing order
msg = Message(
'Sanger sequencing of ' + request.form['hgnc_symbol'],
sender=current_app.config.get('MAIL_USERNAME'),
recipients=current_app.config.get('MAIL_RECEPIENTS'),
cc=[sender_email],
bcc=['robin.andeer@scilifelab.se']
)
msg.html = request.form['message']
mail.send(msg)
return jsonify(message=msg.html)
# +--------------------------------------------------------------------+
# | GitHub Issues
# +--------------------------------------------------------------------+
@api.route('/issues', methods=PLURAL_METHODS)
@api.route('/issues/<issue_id>', methods=SINGLE_METHODS)
@login_required
def issues(issue_id=None):
if request.method == 'POST':
# Submit an issue to the Scout repo at GitHub
# Build the body content
body = """{body}
submitted by **{author}**.
""".format(body=request.json.get('body', '').encode('utf-8'),
author=current_user.get('name').encode('utf-8'))
# Create the new isssue
issue = gh_issues.create(request.json['title'], body)
return jsonify(id=issue.number, body=issue.body, title=issue.title,
html=issue.body_html, url=issue.html_url)
elif request.method == 'GET':
if issue_id:
# Find an existing issue
issue = gh_issues.find(issue_id)
payload = dict(
id=issue.number,
title=issue.title,
body=issue.body,
html=issue.body_html,
created_at=issue.created_at.date().isoformat(),
url=issue.html_url
)
return jsonify(**payload)
else:
# Get all existing issues
issues = [{
'id': issue.number,
'title': issue.title,
'body': issue.body,
'html': issue.body_html,
'created_at': issue.created_at.date().isoformat(),
'url': issue.html_url
} for issue in gh_issues.find()]
return jsonify(issues=issues)
# Route incoming API calls to the Tornado backend and sends JSON response
@api.route('/<path:path>', methods=HTTP_METHODS)
@login_required
def remote_api(path):
# Route incoming request to Tornado
try:
cookie = {'institute': ','.join(current_user.get('institutes', []))}
except AttributeError:
return jsonify(error='You are not logged in!'), 403
# Double check that user has access to the institute
institute = request.args.get('institute')
unauthorized = institute not in cookie['institute'].split(',')
if request.args.get('institute') and unauthorized:
return Response('Error'), 401
TORNADO_PORT = current_app.config.get('TORNADO_PORT')
url = 'http://clinical-db.scilifelab.se:{port}/{path}?{query}'\
.format(port=TORNADO_PORT, path=path, query=request.query_string)
mimetype = 'application/json'
if request.method == 'GET':
r = requests.get(url, cookies=cookie)
elif request.method == 'POST':
# POST request
headers = {'Content-type': 'application/json'}
r = requests.post(url, data=json.dumps(request.form.to_dict()),
cookies=cookie, headers=headers)
elif request.method == 'DELETE':
r = requests.delete(url, cookies=cookie)
else:
return jsonify(error='Not a valid REST method.')
# Send JSON response
return Response(r.text, mimetype=mimetype), r.status_code, dict(r.headers)
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class bridgetable(base_resource) :
""" Configuration for bridge table entry resource. """
def __init__(self) :
self._bridgeage = 0
self._vlan = 0
self._ifnum = ""
self._vxlan = 0
self._mac = ""
self._vtep = ""
self._flags = 0
self._channel = 0
self.___count = 0
@property
def bridgeage(self) :
"""Time-out value for the bridge table entries, in seconds. The new value applies only to the entries that are dynamically learned after the new value is set. Previously existing bridge table entries expire after the previously configured time-out value.<br/>Default value: 300<br/>Minimum length = 60<br/>Maximum length = 300.
"""
try :
return self._bridgeage
except Exception as e:
raise e
@bridgeage.setter
def bridgeage(self, bridgeage) :
"""Time-out value for the bridge table entries, in seconds. The new value applies only to the entries that are dynamically learned after the new value is set. Previously existing bridge table entries expire after the previously configured time-out value.<br/>Default value: 300<br/>Minimum length = 60<br/>Maximum length = 300
"""
try :
self._bridgeage = bridgeage
except Exception as e:
raise e
@property
def vlan(self) :
"""VLAN whose entries are to be removed.<br/>Minimum length = 1<br/>Maximum length = 4094.
"""
try :
return self._vlan
except Exception as e:
raise e
@vlan.setter
def vlan(self, vlan) :
"""VLAN whose entries are to be removed.<br/>Minimum length = 1<br/>Maximum length = 4094
"""
try :
self._vlan = vlan
except Exception as e:
raise e
@property
def ifnum(self) :
"""INTERFACE whose entries are to be removed.
"""
try :
return self._ifnum
except Exception as e:
raise e
@ifnum.setter
def ifnum(self, ifnum) :
"""INTERFACE whose entries are to be removed.
"""
try :
self._ifnum = ifnum
except Exception as e:
raise e
@property
def vxlan(self) :
"""VXLAN whose entries are to be removed.<br/>Minimum length = 1<br/>Maximum length = 16777215.
"""
try :
return self._vxlan
except Exception as e:
raise e
@vxlan.setter
def vxlan(self, vxlan) :
"""VXLAN whose entries are to be removed.<br/>Minimum length = 1<br/>Maximum length = 16777215
"""
try :
self._vxlan = vxlan
except Exception as e:
raise e
@property
def mac(self) :
"""The MAC address of the target.
"""
try :
return self._mac
except Exception as e:
raise e
@property
def vtep(self) :
"""The IP address of the VTEP.
"""
try :
return self._vtep
except Exception as e:
raise e
@property
def flags(self) :
"""Display flags,.
"""
try :
return self._flags
except Exception as e:
raise e
@property
def channel(self) :
"""The Tunnel through which bridge entry is learned.
"""
try :
return self._channel
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(bridgetable_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.bridgetable
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update bridgetable.
"""
try :
if type(resource) is not list :
updateresource = bridgetable()
updateresource.bridgeage = resource.bridgeage
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ bridgetable() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].bridgeage = resource[i].bridgeage
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of bridgetable resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = bridgetable()
return unsetresource.unset_resource(client, args)
else :
if (resource and len(resource) > 0) :
unsetresources = [ bridgetable() for _ in range(len(resource))]
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def clear(cls, client, resource) :
""" Use this API to clear bridgetable.
"""
try :
if type(resource) is not list :
clearresource = bridgetable()
clearresource.vlan = resource.vlan
clearresource.ifnum = resource.ifnum
clearresource.vxlan = resource.vxlan
return clearresource.perform_operation(client,"clear")
else :
if (resource and len(resource) > 0) :
clearresources = [ bridgetable() for _ in range(len(resource))]
for i in range(len(resource)) :
clearresources[i].vlan = resource[i].vlan
clearresources[i].ifnum = resource[i].ifnum
clearresources[i].vxlan = resource[i].vxlan
result = cls.perform_operation_bulk_request(client, clearresources,"clear")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the bridgetable resources that are configured on netscaler.
"""
try :
if not name :
obj = bridgetable()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of bridgetable resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = bridgetable()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the bridgetable resources configured on NetScaler.
"""
try :
obj = bridgetable()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of bridgetable resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = bridgetable()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class bridgetable_response(base_response) :
def __init__(self, length=1) :
self.bridgetable = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.bridgetable = [bridgetable() for _ in range(length)]
|
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import itertools
import logging
from dashboard.common import math_utils
from dashboard.pinpoint.models import change as change_module
from dashboard.pinpoint.models import compare
from dashboard.pinpoint.models import evaluators
from dashboard.pinpoint.models import exploration
from dashboard.pinpoint.models import task as task_module
from dashboard.pinpoint.models.tasks import find_isolate
from dashboard.pinpoint.models.tasks import read_value
from dashboard.pinpoint.models.tasks import run_test
from dashboard.pinpoint.models.quest import run_telemetry_test
from dashboard.pinpoint.models.quest import run_vr_telemetry_test
from dashboard.pinpoint.models.quest import run_gtest
from dashboard.pinpoint.models.quest import run_webrtc_test
from dashboard.services import gitiles_service
_DEFAULT_SPECULATION_LEVELS = 2
AnalysisOptions = collections.namedtuple('AnalysisOptions', (
'comparison_magnitude',
'min_attempts',
'max_attempts',
))
BuildOptionTemplate = collections.namedtuple('BuildOptionTemplate',
('builder', 'target', 'bucket'))
TestOptionTemplate = collections.namedtuple(
'TestOptionTemplate', ('swarming_server', 'dimensions', 'extra_args'))
ReadOptionTemplate = collections.namedtuple(
'ReadOptionTemplate',
('benchmark', 'histogram_options', 'graph_json_options', 'mode'))
TaskOptions = collections.namedtuple(
'TaskOptions',
('build_option_template', 'test_option_template', 'read_option_template',
'analysis_options', 'start_change', 'end_change', 'pinned_change'))
def _CreateRunTestTaskTemplate(test_option_template, change, arguments):
# Because we're calling "legacy" functions that deal with dictionaries of
# options, we reconstitute the ones we know of and re-create the 'arguments'
# dictionary that would have been passed to Pinpoint.
# TODO(dberris): Fix this up so that when we delete the quests, we end up with
# a simpler extra argument generation mechanism.
return TestOptionTemplate(test_option_template.swarming_server,
test_option_template.dimensions,
ComputeExtraArgs(arguments, change))._asdict()
def _CreateReadTaskOptions(build_option_template, test_option_template,
read_option_template, analysis_options, change,
arguments):
return read_value.TaskOptions(
test_options=run_test.TaskOptions(
build_options=find_isolate.TaskOptions(
change=change, **build_option_template._asdict()),
attempts=analysis_options.min_attempts,
**_CreateRunTestTaskTemplate(test_option_template, change,
arguments)),
**read_option_template._asdict())
def CreateGraph(options, arguments=None):
if not isinstance(options, TaskOptions):
raise ValueError(
'options must be an instance of performance_bisection.TaskOptions')
start_change = options.start_change
end_change = options.end_change
if options.pinned_change:
start_change.Update(options.pinned_change)
end_change.Update(options.pinned_change)
start_change_read_options = _CreateReadTaskOptions(
options.build_option_template, options.test_option_template,
options.read_option_template, options.analysis_options, start_change,
arguments if arguments else {})
end_change_read_options = _CreateReadTaskOptions(
options.build_option_template, options.test_option_template,
options.read_option_template, options.analysis_options, end_change,
arguments if arguments else {})
# Given the start_change and end_change, we create two subgraphs that we
# depend on from the 'find_culprit' task. This means we'll need to create
# independent test options and build options from the template provided by the
# caller.
start_subgraph = read_value.CreateGraph(start_change_read_options)
end_subgraph = read_value.CreateGraph(end_change_read_options)
# Then we add a dependency from the 'FindCulprit' task with the payload
# describing the options set for the performance bisection.
find_culprit_task = task_module.TaskVertex(
id='performance_bisection',
vertex_type='find_culprit',
payload={
'start_change':
options.start_change.AsDict(),
'end_change':
options.end_change.AsDict(),
'pinned_change':
options.pinned_change.AsDict() if options.pinned_change else None,
# We still persist the templates, because we'll need that data in case
# we are going to extend the graph with the same build/test templates
# in subgraphs.
'analysis_options':
options.analysis_options._asdict(),
'build_option_template':
options.build_option_template._asdict(),
'test_option_template':
options.test_option_template._asdict(),
'read_option_template': {
'histogram_options':
options.read_option_template.histogram_options._asdict(),
'graph_json_options':
options.read_option_template.graph_json_options._asdict(),
'benchmark':
options.read_option_template.benchmark,
'mode':
options.read_option_template.mode,
},
# Because this is a performance bisection, we'll hard-code the
# comparison mode as 'performance'.
'comparison_mode':
'performance',
'arguments':
arguments if arguments else {},
})
return task_module.TaskGraph(
vertices=list(
itertools.chain(start_subgraph.vertices, end_subgraph.vertices)) +
[find_culprit_task],
edges=list(itertools.chain(start_subgraph.edges, end_subgraph.edges)) + [
task_module.Dependency(from_=find_culprit_task.id, to=v.id)
for v in itertools.chain(start_subgraph.vertices,
end_subgraph.vertices)
if v.vertex_type == 'read_value'
])
class PrepareCommits(collections.namedtuple('PrepareCommits', ('job', 'task'))):
# Save memory and avoid unnecessarily adding more attributes to objects of
# this type.
__slots__ = ()
@task_module.LogStateTransitionFailures
def __call__(self, _):
start_change = change_module.ReconstituteChange(
self.task.payload['start_change'])
end_change = change_module.ReconstituteChange(
self.task.payload['end_change'])
try:
# We're storing this once, so that we don't need to always get this when
# working with the individual commits. This reduces our reliance on
# datastore operations throughout the course of handling the culprit
# finding process.
#
# TODO(dberris): Expand the commits into the full table of dependencies?
# Because every commit in the chromium repository is likely to be building
# against different versions of the dependencies (v8, skia, etc.)
# we'd need to expand the concept of a changelist (CL, or Change in the
# Pinpoint codebase) so that we know which versions of the dependencies to
# use in specific CLs. Once we have this, we might be able to operate
# cleanly on just Change instances instead of just raw commits.
#
# TODO(dberris): Model the "merge-commit" like nature of auto-roll CLs by
# allowing the preparation action to model the non-linearity of the
# history. This means we'll need a concept of levels, where changes in a
# single repository history (the main one) operates at a higher level
# linearly, and if we're descending into rolls that we're exploring a
# lower level in the linear history. This is similar to the following
# diagram:
#
# main -> m0 -> m1 -> m2 -> roll0 -> m3 -> ...
# |
# dependency .............. +-> d0 -> d1
#
# Ideally we'll already have this expanded before we go ahead and perform
# a bisection, to amortise the cost of making requests to back-end
# services for this kind of information in tight loops.
commits = change_module.Commit.CommitRange(start_change.base_commit,
end_change.base_commit)
self.task.payload.update({
'commits': [
collections.OrderedDict(
[('repository', start_change.base_commit.repository),
('git_hash', start_change.base_commit.git_hash)])
] + [
collections.OrderedDict(
[('repository', start_change.base_commit.repository),
('git_hash', commit['commit'])])
for commit in reversed(commits)
]
})
task_module.UpdateTask(
self.job,
self.task.id,
new_state='ongoing',
payload=self.task.payload)
except gitiles_service.NotFoundError as e:
# TODO(dberris): We need to be more resilient to intermittent failures
# from the Gitiles service here.
self.task.payload.update({
'errors':
self.task.payload.get('errors', []) + [{
'reason': 'GitilesFetchError',
'message': e.message
}]
})
task_module.UpdateTask(
self.job, self.task.id, new_state='failed', payload=self.task.payload)
def __str__(self):
return 'PrepareCommits( job = %s, task = %s )' % (self.job.job_id,
self.task.id)
class RefineExplorationAction(
collections.namedtuple('RefineExplorationAction',
('job', 'task', 'change', 'new_size'))):
__slots__ = ()
def __str__(self):
return ('RefineExplorationAction(job = %s, task = %s, change = %s, +%s '
'attempts)') % (self.job.job_id, self.task.id,
self.change.id_string, self.new_size)
def __call__(self, accumulator):
# Outline:
# - Given the job and task, extend the TaskGraph to add new tasks and
# dependencies, being careful to filter the IDs from what we already see
# in the accumulator to avoid graph amendment errors.
# - If we do encounter graph amendment errors, we should log those and not
# block progress because that can only happen if there's concurrent
# updates being performed with the same actions.
build_option_template = BuildOptionTemplate(
**self.task.payload.get('build_option_template'))
test_option_template = TestOptionTemplate(
**self.task.payload.get('test_option_template'))
# The ReadOptionTemplate is special because it has nested structures, so
# we'll have to reconstitute those accordingly.
read_option_template_map = self.task.payload.get('read_option_template')
read_option_template = ReadOptionTemplate(
benchmark=self.task.payload.get('read_option_template').get(
'benchmark'),
histogram_options=read_value.HistogramOptions(
**read_option_template_map.get('histogram_options')),
graph_json_options=read_value.GraphJsonOptions(
**read_option_template_map.get('graph_json_options')),
mode=read_option_template_map.get('mode'))
analysis_options_dict = self.task.payload.get('analysis_options')
if self.new_size:
analysis_options_dict['min_attempts'] = min(
self.new_size, analysis_options_dict.get('max_attempts', 100))
analysis_options = AnalysisOptions(**analysis_options_dict)
new_subgraph = read_value.CreateGraph(
_CreateReadTaskOptions(build_option_template, test_option_template,
read_option_template, analysis_options,
self.change,
self.task.payload.get('arguments', {})))
try:
# Add all of the new vertices we do not have in the graph yet.
additional_vertices = [
v for v in new_subgraph.vertices if v.id not in accumulator
]
# All all of the new edges that aren't in the graph yet, and the
# dependencies from the find_culprit task to the new read_value tasks if
# there are any.
additional_dependencies = [
new_edge for new_edge in new_subgraph.edges
if new_edge.from_ not in accumulator
] + [
task_module.Dependency(from_=self.task.id, to=v.id)
for v in new_subgraph.vertices
if v.id not in accumulator and v.vertex_type == 'read_value'
]
logging.debug(
'Extending the graph with %s new vertices and %s new edges.',
len(additional_vertices), len(additional_dependencies))
task_module.ExtendTaskGraph(
self.job,
vertices=additional_vertices,
dependencies=additional_dependencies)
except task_module.InvalidAmendment as e:
logging.error('Failed to amend graph: %s', e)
class UpdateTaskPayloadAction(
collections.namedtuple('UpdateTaskPayloadAction', ('job', 'task'))):
__slots__ = ()
def __str__(self):
return 'UpdateTaskPayloadAction(job = %s, task = %s)' % (self.job.job_id,
self.task.id)
@task_module.LogStateTransitionFailures
def __call__(self, _):
task_module.UpdateTask(self.job, self.task.id, payload=self.task.payload)
class CompleteExplorationAction(
collections.namedtuple('CompleteExplorationAction',
('job', 'task', 'state'))):
__slots__ = ()
def __str__(self):
return 'CompleteExplorationAction(job = %s, task = %s, state = %s)' % (
self.job.job_id, self.task.id, self.state)
@task_module.LogStateTransitionFailures
def __call__(self, accumulator):
# TODO(dberris): Maybe consider cancelling outstanding actions? Here we'll
# need a way of synthesising actions if we want to force the continuation of
# a task graph's evaluation.
task_module.UpdateTask(
self.job, self.task.id, new_state=self.state, payload=self.task.payload)
class FindCulprit(collections.namedtuple('FindCulprit', ('job'))):
__slots__ = ()
def __call__(self, task, _, accumulator):
# Outline:
# - If the task is still pending, this means this is the first time we're
# encountering the task in an evaluation. Set up the payload data to
# include the full range of commits, so that we load it once and have it
# ready, and emit an action to mark the task ongoing.
#
# - If the task is ongoing, gather all the dependency data (both results
# and status) and see whether we have enough data to determine the next
# action. We have three main cases:
#
# 1. We cannot detect a significant difference between the results from
# two different CLs. We call this the NoReproduction case.
#
# 2. We do not have enough confidence that there's a difference. We call
# this the Indeterminate case.
#
# 3. We have enough confidence that there's a difference between any two
# ordered changes. We call this the SignificantChange case.
#
# - Delegate the implementation to handle the independent cases for each
# change point we find in the CL continuum.
if task.status == 'pending':
return [PrepareCommits(self.job, task)]
all_changes = None
actions = []
if 'changes' not in task.payload:
all_changes = [
change_module.Change(
commits=[
change_module.Commit(
repository=commit.get('repository'),
git_hash=commit.get('git_hash'))
],
patch=task.payload.get('pinned_change'))
for commit in task.payload.get('commits', [])
]
task.payload.update({
'changes': [change.AsDict() for change in all_changes],
})
actions.append(UpdateTaskPayloadAction(self.job, task))
else:
# We need to reconstitute the Change instances from the dicts we've stored
# in the payload.
all_changes = [
change_module.ReconstituteChange(change)
for change in task.payload.get('changes')
]
if task.status == 'ongoing':
# TODO(dberris): Validate and fail gracefully instead of asserting?
assert 'commits' in task.payload, ('Programming error, need commits to '
'proceed!')
# Collect all the dependency task data and analyse the results.
# Group them by change.
# Order them by appearance in the CL range.
# Also count the status per CL (failed, ongoing, etc.)
deps = set(task.dependencies)
results_by_change = collections.defaultdict(list)
status_by_change = collections.defaultdict(dict)
changes_with_data = set()
changes_by_status = collections.defaultdict(set)
associated_results = [(change_module.ReconstituteChange(t.get('change')),
t.get('status'), t.get('result_values'))
for dep, t in accumulator.items()
if dep in deps]
for change, status, result_values in associated_results:
if result_values:
filtered_results = [r for r in result_values if r is not None]
if filtered_results:
results_by_change[change].append(filtered_results)
status_by_change[change].update({
status: status_by_change[change].get(status, 0) + 1,
})
changes_by_status[status].add(change)
changes_with_data.add(change)
# If the dependencies have converged into a single status, we can make
# decisions on the terminal state of the bisection.
if len(changes_by_status) == 1 and changes_with_data:
# Check whether all dependencies are completed and if we do
# not have data in any of the dependencies.
if changes_by_status.get('completed') == changes_with_data:
changes_with_empty_results = [
change for change in changes_with_data
if not results_by_change.get(change)
]
if changes_with_empty_results:
task.payload.update({
'errors':
task.payload.get('errors', []) + [{
'reason':
'BisectionFailed',
'message': ('We did not find any results from '
'successful test runs.')
}]
})
return [CompleteExplorationAction(self.job, task, 'failed')]
# Check whether all the dependencies had the tests fail consistently.
elif changes_by_status.get('failed') == changes_with_data:
task.payload.update({
'errors':
task.payload.get('errors', []) + [{
'reason': 'BisectionFailed',
'message': 'All attempts in all dependencies failed.'
}]
})
return [CompleteExplorationAction(self.job, task, 'failed')]
# If they're all pending or ongoing, then we don't do anything yet.
else:
return actions
# We want to reduce the list of ordered changes to only the ones that have
# data available.
change_index = {change: index for index, change in enumerate(all_changes)}
ordered_changes = [c for c in all_changes if c in changes_with_data]
# From here we can then do the analysis on a pairwise basis, as we're
# going through the list of Change instances we have data for.
# NOTE: A lot of this algorithm is already in pinpoint/models/job_state.py
# which we're adapting.
def Compare(a, b):
# This is the comparison function which determines whether the samples
# we have from the two changes (a and b) are statistically significant.
if a is None or b is None:
return None
if 'pending' in status_by_change[a] or 'pending' in status_by_change[b]:
return compare.PENDING
# NOTE: Here we're attempting to scale the provided comparison magnitude
# threshold by the larger inter-quartile range (a measure of dispersion,
# simply computed as the 75th percentile minus the 25th percentile). The
# reason we're doing this is so that we can scale the tolerance
# according to the noise inherent in the measurements -- i.e. more noisy
# measurements will require a larger difference for us to consider
# statistically significant.
values_for_a = tuple(itertools.chain(*results_by_change[a]))
values_for_b = tuple(itertools.chain(*results_by_change[b]))
if not values_for_a:
return None
if not values_for_b:
return None
max_iqr = max(
math_utils.Iqr(values_for_a), math_utils.Iqr(values_for_b), 0.001)
comparison_magnitude = task.payload.get('comparison_magnitude',
1.0) / max_iqr
attempts = (len(values_for_a) + len(values_for_b)) // 2
result = compare.Compare(values_for_a, values_for_b, attempts,
'performance', comparison_magnitude)
return result.result
def DetectChange(change_a, change_b):
# We return None if the comparison determines that the result is
# inconclusive. This is required by the exploration.Speculate contract.
comparison = Compare(change_a, change_b)
if comparison == compare.UNKNOWN:
return None
return comparison == compare.DIFFERENT
changes_to_refine = []
def CollectChangesToRefine(a, b):
# Here we're collecting changes that need refinement, which happens when
# two changes when compared yield the "unknown" result.
attempts_for_a = sum(status_by_change[a].values())
attempts_for_b = sum(status_by_change[b].values())
# Grow the attempts of both changes by 50% every time when increasing
# attempt counts. This number is arbitrary, and we should probably use
# something like a Fibonacci sequence when scaling attempt counts.
new_attempts_size_a = min(
attempts_for_a + (attempts_for_a // 2),
task.payload.get('analysis_options', {}).get('max_attempts', 100))
new_attempts_size_b = min(
attempts_for_b + (attempts_for_b // 2),
task.payload.get('analysis_options', {}).get('max_attempts', 100))
# Only refine if the new attempt sizes are not large enough.
if new_attempts_size_a > attempts_for_a:
changes_to_refine.append((a, new_attempts_size_a))
if new_attempts_size_b > attempts_for_b:
changes_to_refine.append((b, new_attempts_size_b))
def FindMidpoint(a, b):
# Here we use the (very simple) midpoint finding algorithm given that we
# already have the full range of commits to bisect through.
a_index = change_index[a]
b_index = change_index[b]
subrange = all_changes[a_index:b_index + 1]
return None if len(subrange) <= 2 else subrange[len(subrange) // 2]
# We have a striding iterable, which will give us the before, current, and
# after for a given index in the iterable.
def SlidingTriple(iterable):
"""s -> (None, s0, s1), (s0, s1, s2), (s1, s2, s3), ..."""
p, c, n = itertools.tee(iterable, 3)
p = itertools.chain([None], p)
n = itertools.chain(itertools.islice(n, 1, None), [None])
return itertools.izip(p, c, n)
# This is a comparison between values at a change and the values at
# the previous change and the next change.
comparisons = [{
'prev': Compare(p, c),
'next': Compare(c, n),
} for (p, c, n) in SlidingTriple(ordered_changes)]
# Collect the result values for each change with values.
result_values = [
list(itertools.chain(*results_by_change.get(change, [])))
for change in ordered_changes
]
if task.payload.get('comparisons') != comparisons or task.payload.get(
'result_values') != result_values:
task.payload.update({
'comparisons': comparisons,
'result_values': result_values,
})
actions.append(UpdateTaskPayloadAction(self.job, task))
if len(ordered_changes) < 2:
# We do not have enough data yet to determine whether we should do
# anything.
return actions
additional_changes = exploration.Speculate(
ordered_changes,
change_detected=DetectChange,
on_unknown=CollectChangesToRefine,
midpoint=FindMidpoint,
levels=_DEFAULT_SPECULATION_LEVELS)
# At this point we can collect the actions to extend the task graph based
# on the results of the speculation, only if the changes don't have any
# more associated pending/ongoing work.
min_attempts = task.payload.get('analysis_options',
{}).get('min_attempts', 10)
actions += [
RefineExplorationAction(self.job, task, change, new_size)
for change, new_size in itertools.chain(
[(c, min_attempts) for _, c in additional_changes],
[(c, a) for c, a in changes_to_refine],
)
if not bool({'pending', 'ongoing'} & set(status_by_change[change]))
]
# Here we collect the points where we've found the changes.
def Pairwise(iterable):
"""s -> (s0, s1), (s1, s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
task.payload.update({
'culprits': [(a.AsDict(), b.AsDict())
for a, b in Pairwise(ordered_changes)
if DetectChange(a, b)],
})
can_complete = not bool(set(changes_by_status) - {'failed', 'completed'})
if not actions and can_complete:
# Mark this operation complete, storing the differences we can compute.
actions = [CompleteExplorationAction(self.job, task, 'completed')]
return actions
class Evaluator(evaluators.FilteringEvaluator):
def __init__(self, job):
super(Evaluator, self).__init__(
predicate=evaluators.All(
evaluators.TaskTypeEq('find_culprit'),
evaluators.Not(evaluators.TaskStatusIn({'completed', 'failed'}))),
delegate=FindCulprit(job))
def AnalysisSerializer(task, _, accumulator):
analysis_results = accumulator.setdefault(task.id, {})
read_option_template = task.payload.get('read_option_template')
graph_json_options = read_option_template.get('graph_json_options', {})
metric = None
if read_option_template.get('mode') == 'histogram_sets':
metric = read_option_template.get('benchmark')
if read_option_template.get('mode') == 'graph_json':
metric = graph_json_options.get('chart')
analysis_results.update({
'changes': [
change_module.ReconstituteChange(change)
for change in task.payload.get('changes', [])
],
'comparison_mode': task.payload.get('comparison_mode'),
'comparisons': task.payload.get('comparisons', []),
'culprits': task.payload.get('culprits', []),
'metric': metric,
'result_values': task.payload.get('result_values', [])
})
class Serializer(evaluators.FilteringEvaluator):
def __init__(self):
super(Serializer, self).__init__(
predicate=evaluators.All(
evaluators.TaskTypeEq('find_culprit'),
evaluators.TaskStatusIn(
{'ongoing', 'failed', 'completed', 'cancelled'}),
),
delegate=AnalysisSerializer)
EXPERIMENTAL_TELEMETRY_BENCHMARKS = {
'performance_test_suite_eve',
'performance_webview_test_suite',
'performance_web_engine_test_suite',
'telemetry_perf_webview_tests',
}
SUFFIXED_EXPERIMENTAL_TELEMETRY_BENCHMARKS = {
'performance_test_suite',
'telemetry_perf_tests',
}
SUFFIXES = {
'',
'_android_chrome',
'_android_monochrome',
'_android_monochrome_bundle',
'_android_weblayer',
'_android_webview',
'_android_clank_chrome',
'_android_clank_monochrome',
'_android_clank_monochrome_64_32_bundle',
'_android_clank_monochrome_bundle',
'_android_clank_trichrome_bundle',
'_android_clank_trichrome_webview',
'_android_clank_trichrome_webview_bundle',
'_android_clank_webview',
'_android_clank_webview_bundle',
}
for test in SUFFIXED_EXPERIMENTAL_TELEMETRY_BENCHMARKS:
for suffix in SUFFIXES:
EXPERIMENTAL_TELEMETRY_BENCHMARKS.add(test + suffix)
EXPERIMENTAL_VR_BENCHMARKS = {'vr_perf_tests'}
EXPERIMENTAL_TARGET_SUPPORT = (
EXPERIMENTAL_TELEMETRY_BENCHMARKS | EXPERIMENTAL_VR_BENCHMARKS)
def ComputeExtraArgs(args, change):
"""Returns a list of extra arugments based on inputs to a Pinpoint Job.
This function consolidates all the special arguments required for running
tests dependent on the "target" of a Pinpoint job. This allows us to
consolidate the supported targets and how we invoke the targets in a
performance bisection.
"""
target = args.get('target')
if not target:
return None
# All the targets in the experimental target set generate histogram sets and
# are Telemetry-driven benchmarks. We'll apply the logic used in the
# RunTelemetryTest quest here.
if target in EXPERIMENTAL_TELEMETRY_BENCHMARKS:
return _ComputeTelemetryArgs(args, change)
if target in EXPERIMENTAL_VR_BENCHMARKS:
return _ComputeVrArgs(args, change)
if target in 'webrtc_perf_tests':
return _ComputeWebRtcArgs(args)
return _ComputeGTestArgs(args)
def _ComputeTelemetryArgs(args, change):
return run_telemetry_test.ChangeDependentArgs(
run_telemetry_test.RunTelemetryTest._ExtraTestArgs(args), change)
def _ComputeVrArgs(args, change):
return run_telemetry_test.ChangeDependentArgs(
run_vr_telemetry_test.RunVrTelemetryTest._ExtraTestArgs(args), change)
def _ComputeWebRtcArgs(args):
return run_webrtc_test.RunWebRtcTest._ExtraTestArgs(args)
def _ComputeGTestArgs(args):
return run_gtest.RunGTest._ExtraTestArgs(args)
|
|
import time
import pymongo
import urllib2
import simplejson
import praw
import pytumblr
import twython
from songkick import *
#setting up mongodb connection
def connect_to_mongo():
client = pymongo.MongoClient()
db = client.newonspotify
global collection
collection = db.albums
#calling spotify API
def get_api_data(page):
if page == 1:
url = "http://ws.spotify.com/search/1/album.json?q=tag:new"
else:
url = "http://ws.spotify.com/search/1/album.json?q=tag:new&page=" + str(page)
request = urllib2.urlopen(url)
json_response = simplejson.loads(request.read())
album_data = json_response["albums"]
print "Getting data for page " + str(page)
return album_data
#search for an album in the database
def search_for_album(album, artist):
search = collection.find_one({"artist":artist, "album": album})
return search
#using tiny.cc API to shorten "open.spotify" url / rate limited to 500 per day / for use with twitter only
def shorten_url(url):
method = 'shorten'
login = 'username'
apiKey = 'apiKey'
version = '2.0.3'
format = 'json'
urlCall = 'http://tiny.cc/?c=rest_api&m=%s&login=%s&apiKey=%s&version=%s&format=%s&longUrl=%s' % (method, login, apiKey, version, format, url)
api_request = urllib2.urlopen(urlCall)
json_res = simplejson.loads(api_request.read())
if json_res["errorCode"] == '0':
print "Short URL created for Twitter submission."
return json_res["results"]["short_url"]
else:
print "Error - " + str(json_res["errorCode"]) + " - " + str(json_res["errorMessage"])
return url
#get the songkick concerts for the artist to add to the comments
def songkick_concerts(artist_name, platform):
songkick = Songkick(api_key='api_key')
songkick_events = songkick.events.query(artist_name=artist_name, per_page=5)
counter = 0
try:
for event in songkick_events:
event_url = event.uri
venue_url = event.venue.uri
event_date = event.event_start.date.strftime('%d %B, %Y')
if len(event.artists) > 1:
others = len(event.artists) - 1
artist_string = "%s and %s others" % (artist_name, others)
else:
artist_string = str(artist_name)
if platform == "tumblr":
if counter == 0:
response = "<b>Upcoming Concerts/Gigs</b> (powered by <a href='https://www.songkick.com/info/about'>Songkick</a>):<br><ul>"
counter += 1
response += "<li><a href='%s'>%s at %s, %s on %s</a></li>" % (event_url.decode('utf-8'), artist_string.decode('utf-8'), str(event.venue).decode('utf-8'), str(event.location.city).decode('utf-8'), event_date)
counter += 1
elif platform == "reddit":
if counter == 0:
response = "\n\n**Upcoming Concerts/Gigs** (powered by [Songkick](https://www.songkick.com/info/about)):"
counter += 1
response += "\n\n* [%s at %s, %s on %s](%s)" % (artist_string.decode('utf-8'), str(event.venue).decode('utf-8'), str(event.location.city).decode('utf-8'), event_date, event_url)
counter += 1
else:
return ""
except:
print "No upcoming concerts/gigs on Songkick for %s" % artist_name
return ""
else:
if platform == "tumblr":
response += "</ul>"
print str(counter) + " concerts posted on " + str(platform) + " for " + str(artist_name) + " via Songkick."
return response
def submit_new_twitter_link(post):
if len(post["album"].split(" ")) == 1:
album = "#" + post["album"]
else:
album = post["album"]
if len(post["artist"].split(" ")) == 1:
artist = "#" + post["artist"]
else:
artist = post["artist"]
link = shorten_url(post["album_link"])
status = album + " by " + artist
if len(status) > 140:
status_character_limit = 140 - (len(link) + 3)
final_status = status[:status_character_limit] + " - " + link
else:
final_status = status + " - " + link
try:
twitter_bot.update_status(status=final_status.encode('utf-8'))
except twython.exceptions.TwythonError as error:
print "Submission for twitter status went wrong:", error
time.sleep(10)
return False
else:
print post["album"] + " submitted to @_newonspotify."
return True
def submit_new_tumblr_link(post):
artist = post["artist"]
album = post["album"]
concerts = songkick_concerts(artist, "tumblr")
embed_html = "<b>Album Information</b><br><ul><li><i>Album</i> - <a href=%s>%s</a><li><i>Album Popularity</i> - %s<li><i>Artist</i> - <a href=%s>%s</a><li><i>Available Territories</i> - %s</ul><p><p>%s" % (post["album_link"], post["album"], post["popularity"], post["artist_link"], post["artist"], post["availableterritories"], concerts)
#submit new link for album
try:
submission = tumblr_bot.create_audio('newonspotify', caption=embed_html.encode('utf-8'), external_url=post["album_link"], tags=[artist.encode('utf-8'), album.encode('utf-8'), "spotify", "music", "newonspotify"])
except:
print "Submission for tumblr went wrong."
return False
else:
print post["album"] + " submitted to newonspotify.tumblr.com."
return True
def submit_new_reddit_link(post):
concerts = songkick_concerts(post["artist"], "reddit")
if concerts != "":
title = post["artist"] + " - " + post["album"] + " - ON TOUR!"
else:
title = post["artist"] + " - " + post["album"]
#submit new link for album
try:
submission = reddit_bot.submit('newonspotify', title, url=post["album_link"], raise_captcha_exception=True)
print "Submitted link for " + title
except:
print "Album already submitted on sub-reddit."
return "already_submitted"
try:
#creating links for comment
album = "[" + post["album"] + "](" + post["album_link"] + ")"
artist = "[" + post["artist"] + "](" + post["artist_link"] + ")"
#submit comment with additional information
comment = '**Additional Information**\n\n* Album Name - ' + album + '\n\n* Album Popularity - ' + post["popularity"] + '\n\n* Artist Name - ' + artist + '\n\n* Available Territories - ' + post["availableterritories"] + concerts
#add additional information as a comment to submission
submission.add_comment(comment)
print "submitted comment for " + title
return True
except:
print "Comment could not be submitted."
return False
def update_album_status(platform, album, artist):
search_request = search_for_album(album, artist)
if search_request is None:
print "I'm not submitting this album, there's no record in the db."
else:
try:
collection.update({'_id':search_request["_id"]}, {'$set':{platform:"submitted"}})
except:
print "Database record NOT updated for " + album + ". Something went wrong?"
else:
print "Database record updated for " + album + " on platform " + platform
def convert_spotify_link(href):
artist_check = "artist"
if artist_check in href:
new_href = href.replace("spotify:artist:", "http://open.spotify.com/artist/")
else:
new_href = href.replace("spotify:album:", "http://open.spotify.com/album/")
return new_href
#insert new albums into database
def insert_into_database(album_data):
counter = 0
for albums in album_data:
search_attempt = search_for_album(albums["name"], albums["artists"][0]["name"])
if search_attempt is None:
try:
album_link = convert_spotify_link(albums["href"])
artist_link = convert_spotify_link(albums["artists"][0]["href"])
mongo_album = { "album" : albums["name"],
"album_link" : album_link,
"popularity" : albums["popularity"],
"artist" : albums["artists"][0]["name"],
"artist_link": artist_link,
"availableterritories" : albums["availability"]["territories"],
"tumblr" : "to be submitted",
"reddit" : "to be submitted",
"twitter" : "to be submitted"
}
collection.insert(mongo_album)
print albums["name"] + " by " + albums["artists"][0]["name"] + " added to database."
counter = counter + 1
except:
print albums["name"] + " not added for some reason - check it out"
else:
print albums["name"] + " already in database"
print str(counter) + " albums added to Database."
def post_to_reddit():
#posting to /r/newonspotify
counter = 0
global reddit_bot
reddit_bot = praw.Reddit('Link Submitter for /r/newonspotify using PRAW'
'Created by: /u/GreasyBacon'
'Contact: dilutedthoughts@outlook.com' )
reddit_bot.login("username", "password")
posts_to_submit = collection.find({"reddit" : "to be submitted"})
for post in posts_to_submit:
response = submit_new_reddit_link(post)
if (response is True) or (response is "already_submitted"):
update_album_status("reddit", post["album"], post["artist"])
counter = counter + 1
time.sleep(10)
print str(counter) + " submissions of new albums to /r/newonspotify."
def post_to_tumblr():
#posting to newonspotify.tumblr.com
counter = 0
global tumblr_bot
tumblr_bot = pytumblr.TumblrRestClient(
'<consumer_key>',
'<consumer_secret>',
'<oauth_token>',
'<oauth_secret>'
)
try:
tumblr_bot.info()
except:
print "Issue with tumblr log-in credentials."
else:
posts_to_submit = collection.find({"tumblr" : "to be submitted"})
for post in posts_to_submit:
response = submit_new_tumblr_link(post)
if response is True:
update_album_status("tumblr", post["album"], post["artist"])
counter = counter + 1
time.sleep(10)
print str(counter) + " submissions of new albums to newonspotify.tumblr.com"
def post_to_twitter():
#posting to @_newonspotify
counter = 0
app_key = 'app_key'
app_secret = 'app_secret'
oauth_token = 'oauth_token'
oauth_token_secret = 'oauth_token_secret'
try:
global twitter_bot
twitter_bot = twython.Twython(app_key, app_secret, oauth_token, oauth_token_secret)
twitter_bot.verify_credentials()
except:
print "Issue with twitter log in credentials."
else:
posts_to_submit = collection.find({"twitter" : "to be submitted"})
for post in posts_to_submit:
response = submit_new_twitter_link(post)
if response is True:
update_album_status("twitter", post["album"], post["artist"])
counter = counter + 1
time.sleep(10)
if response is False:
print "403 Error - Limit reached."
break
print str(counter) + " submissions of new albums to @_newonspotify twitter"
if __name__ == "__main__":
pages = [12,11,10,9,8,7,6,5,4,3,2,1]
connect_to_mongo()
for page in pages:
albums = get_api_data(page)
insert_into_database(albums)
post_to_reddit()
post_to_tumblr()
post_to_twitter()
|
|
#!/usr/bin/env python
# All the CFC relatd matlab code converted to python
# Translated from matlab code by Mark Kramer (math.bu.edu/people/mak/MA666/)
import numpy as np
from scipy.signal import hilbert
import matplotlib.pyplot as pl
import mne
from mne.filter import band_pass_filter
def filter_and_make_analytic_signal(data, sfreq, l_phase_freq, h_phase_freq,
l_amp_freq, h_amp_freq, method='fft',
n_jobs=1):
""" Filter data to required range and compute analytic signal from it.
Parameters
----------
data : ndarray
The signal to be analysed.
l_phase_freq, h_phase_freq : float
Low and high phase modulating frequencies.
l_amp_freq, h_amp_freq : float
Low and high amplitude modulated frequencies.
method : 'fft' or 'iir'
Filter method to be used. (mne.filter.band_pass_filter)
n_jobs : int
Number of parallel jobs to run.
Returns
-------
theta : ndarray
Low frequency filtered signal (modulating)
gamma : ndarray
High frequency filtered signal (modulated)
phase : ndarray
Phase of low frequency signal above.
amp : ndarray
Amplitude envelope of the high freq. signal above.
"""
# filter theta and gamma signals
n_jobs = 4
method = 'fft'
l_phase_freq, h_phase_freq, l_amp_freq, h_amp_freq = 6, 10, 60, 150
theta = band_pass_filter(data, sfreq, l_phase_freq,
h_phase_freq, method=method, n_jobs=n_jobs)
gamma = band_pass_filter(data, sfreq, l_amp_freq, h_amp_freq,
method=method, n_jobs=n_jobs)
# phase of the low freq modulating signal
phase = np.angle(hilbert(theta))
# amplitude envelope of the high freq modulated signal
amp = np.abs(hilbert(gamma))
return theta, gamma, phase, amp
def compute_and_plot_psd(data, sfreq, NFFT=512, show=True):
""" Computes the power spectral density and produces a plot.
Parameters
----------
data : ndarray (n_times)
The signal.
sfreq : float
Sampling frequency.
NFFT : int (power of 2)
Number of bins for each block of FFT.
show : bool
Display or hide plot. (Default is True)
Returns
-------
power : ndarray
The power spectral density of the signal.
freqs : ndarray
Frequencies.
"""
if show is False:
pl.ioff()
power, freqs = pl.psd(data, Fs=sfreq, NFFT=NFFT)
return power, freqs
def correlate_envelope_signal(signal, amp_envelope, n_surrogates=100,
random_state=None):
""" Correlate the amplitude envelope with the signal.
Parameters
----------
signal : ndarrray (n_times)
Low freq filtered signal.
amp_envelope: ndarray (n_times)
Amplitude envelope of high freq signal.
n_surrogates : int
Number of surrogates to be computed.
random_state : int
Seed value for random generator.
Returns
-------
xcorr : ndarray (n_times)
Cross correlation of the two signals.
xcorr_surrogates : ndarray (n_surrogates)
Cross correlation of the surrogate signals.
max_surr : ndarray (n_surrogates)
Maximum value of surrogate cross correlation.
z_threshold : float
Threshold value after z-scoring.
"""
from sklearn.utils import check_random_state
xcorr = np.correlate(signal, amp_envelope, 'full')
xcorr_surrogates = np.zeros((n_surrogates, xcorr.size))
max_surr = np.zeros((n_surrogates, 1))
rng = check_random_state(random_state) # initialize random generator
for i in range(0, n_surrogates):
order = np.argsort(rng.randn(len(amp_envelope)))
xcorr_surrogates[i, :] = np.correlate(signal, amp_envelope[order], 'full')
max_surr[i] = np.max(np.abs(xcorr_surrogates[i, :]))
# compute some statistics
#NOTE Needs to be rechecked. I want to check if the xcorr values
# can come from the surrogates values (i.e. reject the null hypothesis
# that xcorr computed is random.
max_surr_mean = np.mean(max_surr)
max_surr_std = np.std(max_surr)
# compute zscores
zscores = (xcorr - max_surr_mean) / max_surr_std
from scipy import stats
p_values = stats.norm.pdf(zscores)
# perform fdr correction and compute threshold
accept, _ = mne.stats.fdr_correction(p_values, alpha=0.001)
z_threshold = np.abs(zscores[accept]).min()
return xcorr, xcorr_surrogates, max_surr, z_threshold
def average_envelope_versus_phase(amplitude_envelope, phase):
""" Computes the average amplitude envelope versus phase and plots it.
(Buzsaki et al)
Parameters
----------
amplitude_envelope : ndarray
Amplitude envelope signal.
phase : ndarray
Phase signal (in radians)
"""
phase = phase * 360.0 / (2.0 * np.pi) # use phase in degrees
a_mean = np.zeros((36)) # divide phase into 36 bins
a_std = np.zeros((36)) # each of width 10 degrees
angle = np.zeros((36)) # label the phase with center phase
for l, k in enumerate(range(-180, 180, 10)):
indices = [i for (i, val) in enumerate(phase) if np.logical_and(val >= k, val < k + 10)]
a_mean[l] = np.mean(amplitude_envelope[indices])
a_std[l] = np.std(amplitude_envelope[indices])
angle[l] = k + 5
pl.figure('Average envelope versus phase')
pl.plot(angle, a_mean, 'b')
pl.fill_between(angle, a_mean + a_std, a_mean - a_std, color='gray')
pl.xlabel('Phase (degrees)')
pl.ylabel('Amplitude envelope')
pl.xlim([-180, 180])
pl.show()
return
def event_related_average(gamma, data, win=100, show=True):
""" Compute and plot event related averages triggered by peaks in high
freq signal.
Parameters
----------
gamma : ndarray
The high freq gamma signal.
data : ndarray
The original data signal.
win : int
Window length.
show : bool
Shows matplotlib figure. (Default is True)
Returns
-------
average : ndarray
Event related averages.
References
----------
[1] Bragin et al, J Neurosci, 1995.
"""
# window length
# indices and magnitudes of peaks
locs, peaks = mne.preprocessing.peak_finder.peak_finder(gamma)
locs = locs[np.logical_and(locs > win, locs < len(data) - win)]
avg = np.zeros((len(locs), 2 * win))
for i in range(0, len(locs)):
avg[i, :] = data[locs[i] - win: locs[i] + win]
average = np.mean(avg, axis=0)
pl.plot(range(-win, win), average)
return average
# Compute the 1d modulation index
def modulation_index1d(amplitude_envelope, phase, sfreq, random_state=42):
""" Computes the one dimensional modulation index.
From Canolty et al, Science, 2006.
Parameters
----------
amplitude_envelope : ndarray
Amplitude envelope signal
phase: ndarray
Phases. (phase values of the low freq signal)
sfreq : float
Sampling frequency.
random_state : int or None
Seed for random state genesrators.
Returns
-------
mi : float
Modulation index
"""
n_samp = amplitude_envelope.size
n_surr = 200 # Number of surrogates
from mne.utils import check_random_state
rng = check_random_state(random_state)
shifts = np.ceil(n_samp * rng.rand(2 * n_surr))
# reduce shifts to within one epoch of occurence
# NOTE use sfreq for 1 second, n_samp / n_epochs for one epoch
n_epochs = 10
shifts = shifts[shifts > n_samp / n_epochs]
shifts = shifts[shifts < n_samp - (n_samp / n_epochs)]
shifts = shifts.astype(np.int)
# construct the composite signal
z = amplitude_envelope * np.exp(1j * phase)
# mean of composite signal
mean_raw = np.mean(z)
surr_means = np.zeros((n_surr, 1))
for i in range(0, n_surr):
# shift the amplitude for each surrogate
surr_amp = np.roll(amplitude_envelope, shifts[i])
# compute the mean length
surr_means[i] = np.abs(np.mean(surr_amp * np.exp(1j * phase)))
# compare treu mean to surrogate
surr_fits_mean = np.mean(surr_means, axis=0)
surr_fits_std = np.std(surr_means, axis=0)
# create a z score (m_norm_length)
# which I hope is the modulation index?
modulation_index = (np.abs(mean_raw) - surr_fits_mean) / surr_fits_std
return modulation_index
def modulation_index2d(data, sfreq):
""" Compute the two dimensional modulation index.
Parameters
----------
data : ndarray
The signal data
sfreq: float
Sampling frequency
Returns
-------
mod2d : ndarray
2 dimensional modulation index
"""
from mne.filter import band_pass_filter
from scipy.signal import hilbert
flow = np.arange(2, 40, 1)
flow_step = 1.0
fhigh = np.arange(5, 205, 5)
fhigh_step = 5.0
mod2d = np.zeros((flow.size, fhigh.size))
method = 'fft'
n_jobs = 2
for i in range(0, flow.size):
theta = band_pass_filter(data, sfreq, flow[i], flow[i] + flow_step,
method=method, n_jobs=n_jobs)
theta = theta[sfreq: data.size - sfreq]
phase = np.angle(hilbert(theta))
for j in range(0, fhigh.size):
gamma = band_pass_filter(data, sfreq, fhigh[j], fhigh[j] + fhigh_step,
method=method, n_jobs=n_jobs)
gamma = gamma[sfreq: data.size - sfreq]
amp = np.abs(hilbert(gamma))
# compute the modulation index
m_norm_length = modulation_index1d(amp, phase, sfreq)
mod2d[i, j] = m_norm_length
return mod2d
def bicoherence(data, sfreq, fmax=50):
"""Compute the bicoherence (or bispectrum) of data.
Bicoherence wiki entry - http://en.wikipedia.org/wiki/Bicoherence
Parameters
----------
data : ndarray (number of trials x times))
Data array
sfreq : float
Sampling frequency
fmax: float
Maximum frequency of interest
Returns
-------
bicoh :
Bicoherence
freqs : ndarray
list of frequencies
"""
data = data.reshape((1, data.size))
n_samp = data.size # number of samples
n_trials = len(data) # number of trials
# frequency axis in Hz
faxis = np.arange(0, n_samp/2, dtype=np.float32) / n_samp * sfreq
faxis = faxis[faxis < fmax]
b = np.zeros((faxis.size, faxis.size))
numerator = np.zeros((faxis.size, faxis.size), dtype=np.complex64)
power = np.zeros((n_samp))
from scipy.signal import hann
for k in range(0, n_trials):
x = np.fft.fft(hann(n_samp) * data[k, :].T) # take FFT of segment wit hanning
num_temp = np.zeros((faxis.size, faxis.size), dtype=np.complex64)
for i in range(0, faxis.size):
for j in range(0, faxis.size):
num_temp[i, j] = x[i] * x[j] * np.conj(x[i + j])
numerator += num_temp / n_trials # compute trial average of numerator
power += np.abs(x * np.conj(x)) / n_trials # compute FFT squared and avg over trials
# Compute bicoherence
for m in range(0, faxis.size):
for n in range(0, faxis.size):
b[m, n] = np.abs(numerator[m, n]) / np.sqrt(power[m] * power[n] * power[m + n])
# return the normalized computed bicoherence and frequencies
return b, faxis
|
|
from sqlalchemy import util, schema, topological
from sqlalchemy.sql import expression, visitors
"""Utility functions that build upon SQL and Schema constructs."""
class ClauseParameters(object):
"""Represent a dictionary/iterator of bind parameter key names/values.
Tracks the original [sqlalchemy.sql#_BindParamClause] objects as well as the
keys/position of each parameter, and can return parameters as a
dictionary or a list. Will process parameter values according to
the ``TypeEngine`` objects present in the ``_BindParamClause`` instances.
"""
__slots__ = 'dialect', '_binds', 'positional'
def __init__(self, dialect, positional=None):
self.dialect = dialect
self._binds = {}
if positional is None:
self.positional = []
else:
self.positional = positional
def get_parameter(self, key):
return self._binds[key]
def set_parameter(self, bindparam, value, name):
self._binds[name] = [bindparam, name, value]
def get_original(self, key):
return self._binds[key][2]
def get_type(self, key):
return self._binds[key][0].type
def get_processors(self):
"""return a dictionary of bind 'processing' functions"""
return dict([
(key, value) for key, value in
[(
key,
self._binds[key][0].bind_processor(self.dialect)
) for key in self._binds]
if value is not None
])
def get_processed(self, key, processors):
if key in processors:
return processors[key](self._binds[key][2])
else:
return self._binds[key][2]
def keys(self):
return self._binds.keys()
def __iter__(self):
return iter(self.keys())
def __getitem__(self, key):
(bind, name, value) = self._binds[key]
processor = bind.bind_processor(self.dialect)
if processor is not None:
return processor(value)
else:
return value
def __contains__(self, key):
return key in self._binds
def set_value(self, key, value):
self._binds[key][2] = value
def get_original_dict(self):
return dict([(name, value) for (b, name, value) in self._binds.values()])
def get_raw_list(self, processors):
binds, res = self._binds, []
for key in self.positional:
if key in processors:
res.append(processors[key](binds[key][2]))
else:
res.append(binds[key][2])
return res
def get_raw_dict(self, processors, encode_keys=False):
binds, res = self._binds, {}
if encode_keys:
encoding = self.dialect.encoding
for key in self.keys():
if key in processors:
res[key.encode(encoding)] = processors[key](binds[key][2])
else:
res[key.encode(encoding)] = binds[key][2]
else:
for key in self.keys():
if key in processors:
res[key] = processors[key](binds[key][2])
else:
res[key] = binds[key][2]
return res
def __repr__(self):
return self.__class__.__name__ + ":" + repr(self.get_original_dict())
class TableCollection(object):
def __init__(self, tables=None):
self.tables = tables or []
def __len__(self):
return len(self.tables)
def __getitem__(self, i):
return self.tables[i]
def __iter__(self):
return iter(self.tables)
def __contains__(self, obj):
return obj in self.tables
def __add__(self, obj):
return self.tables + list(obj)
def add(self, table):
self.tables.append(table)
if hasattr(self, '_sorted'):
del self._sorted
def sort(self, reverse=False):
try:
sorted = self._sorted
except AttributeError, e:
self._sorted = self._do_sort()
sorted = self._sorted
if reverse:
x = sorted[:]
x.reverse()
return x
else:
return sorted
def _do_sort(self):
tuples = []
class TVisitor(schema.SchemaVisitor):
def visit_foreign_key(_self, fkey):
if fkey.use_alter:
return
parent_table = fkey.column.table
if parent_table in self:
child_table = fkey.parent.table
tuples.append( ( parent_table, child_table ) )
vis = TVisitor()
for table in self.tables:
vis.traverse(table)
sorter = topological.QueueDependencySorter( tuples, self.tables )
head = sorter.sort()
sequence = []
def to_sequence( node, seq=sequence):
seq.append( node.item )
for child in node.children:
to_sequence( child )
if head is not None:
to_sequence( head )
return sequence
class TableFinder(TableCollection, visitors.NoColumnVisitor):
"""locate all Tables within a clause."""
def __init__(self, clause, check_columns=False, include_aliases=False):
TableCollection.__init__(self)
self.check_columns = check_columns
self.include_aliases = include_aliases
for clause in util.to_list(clause):
self.traverse(clause)
def visit_alias(self, alias):
if self.include_aliases:
self.tables.append(alias)
def visit_table(self, table):
self.tables.append(table)
def visit_column(self, column):
if self.check_columns:
self.tables.append(column.table)
class ColumnFinder(visitors.ClauseVisitor):
def __init__(self):
self.columns = util.Set()
def visit_column(self, c):
self.columns.add(c)
def __iter__(self):
return iter(self.columns)
class ColumnsInClause(visitors.ClauseVisitor):
"""Given a selectable, visit clauses and determine if any columns
from the clause are in the selectable.
"""
def __init__(self, selectable):
self.selectable = selectable
self.result = False
def visit_column(self, column):
if self.selectable.c.get(column.key) is column:
self.result = True
class AbstractClauseProcessor(visitors.NoColumnVisitor):
"""Traverse a clause and attempt to convert the contents of container elements
to a converted element.
The conversion operation is defined by subclasses.
"""
def convert_element(self, elem):
"""Define the *conversion* method for this ``AbstractClauseProcessor``."""
raise NotImplementedError()
def copy_and_process(self, list_):
"""Copy the container elements in the given list to a new list and
process the new list.
"""
list_ = list(list_)
self.process_list(list_)
return list_
def process_list(self, list_):
"""Process all elements of the given list in-place."""
for i in range(0, len(list_)):
elem = self.convert_element(list_[i])
if elem is not None:
list_[i] = elem
else:
list_[i] = self.traverse(list_[i], clone=True)
def visit_grouping(self, grouping):
elem = self.convert_element(grouping.elem)
if elem is not None:
grouping.elem = elem
def visit_clauselist(self, clist):
for i in range(0, len(clist.clauses)):
n = self.convert_element(clist.clauses[i])
if n is not None:
clist.clauses[i] = n
def visit_unary(self, unary):
elem = self.convert_element(unary.element)
if elem is not None:
unary.element = elem
def visit_binary(self, binary):
elem = self.convert_element(binary.left)
if elem is not None:
binary.left = elem
elem = self.convert_element(binary.right)
if elem is not None:
binary.right = elem
def visit_join(self, join):
elem = self.convert_element(join.left)
if elem is not None:
join.left = elem
elem = self.convert_element(join.right)
if elem is not None:
join.right = elem
join._init_primary_key()
def visit_select(self, select):
fr = util.OrderedSet()
for elem in select._froms:
n = self.convert_element(elem)
if n is not None:
fr.add((elem, n))
select._recorrelate_froms(fr)
col = []
for elem in select._raw_columns:
n = self.convert_element(elem)
if n is None:
col.append(elem)
else:
col.append(n)
select._raw_columns = col
class ClauseAdapter(AbstractClauseProcessor):
"""Given a clause (like as in a WHERE criterion), locate columns
which are embedded within a given selectable, and changes those
columns to be that of the selectable.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
and make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(self, selectable, include=None, exclude=None, equivalents=None):
self.selectable = selectable
self.include = include
self.exclude = exclude
self.equivalents = equivalents
def convert_element(self, col):
if isinstance(col, expression.FromClause):
if self.selectable.is_derived_from(col):
return self.selectable
if not isinstance(col, expression.ColumnElement):
return None
if self.include is not None:
if col not in self.include:
return None
if self.exclude is not None:
if col in self.exclude:
return None
newcol = self.selectable.corresponding_column(col, raiseerr=False, require_embedded=True, keys_ok=False)
if newcol is None and self.equivalents is not None and col in self.equivalents:
for equiv in self.equivalents[col]:
newcol = self.selectable.corresponding_column(equiv, raiseerr=False, require_embedded=True, keys_ok=False)
if newcol:
return newcol
#if newcol is None:
# self.traverse(col)
# return col
return newcol
|
|
from nose.tools import assert_true, assert_equal, assert_raises
from unittest import TestCase
import numpy as np
import numpy.linalg as npla
import numbers
from CrKr.crkr import CrKr
class TestCrKr(TestCase):
def setUp(self):
self.S_2x3 = np.array([[1, 2, 3], [4, 5, 6]])
self.C_2x2 = np.array([[1, 0], [0, 5]])
self.D_2x3 = np.array([[1, 2, 3], [4, 5, 6]])
self.ridge_factor_05 = 0.5
self.sigma_05 = 0.5
self.a_1 = 1
self.C_2x3 = np.array([[0.1, 0.0], [0.3, 0.4], [0.5, 0.6]])
self.C_3x3 = np.array([[0.1, 0.0, 0.0],
[0.0, 0.4, 0.0],
[0.0, 0.0, 0.6]])
self.D_3x3 = np.array([[7, 8, 9], [10, 11, 12], [14, 15, 16]])
def test_init_default(self):
"""Tests if default variables exists and their data types.
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)
ridge_factor = crkr.ridge_factor
sigma = crkr.sigma
a = crkr.a
assert_true(isinstance(ridge_factor, numbers.Number))
assert_true(isinstance(sigma, numbers.Number))
assert_true(isinstance(a, numbers.Number))
def test_init_custom(self):
"""Tests if custom variables are assigned.
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3,
self.ridge_factor_05, self.sigma_05, self.a_1)
assert_true(np.array_equal(self.S_2x3, crkr.S))
assert_true(np.array_equal(self.C_2x2, crkr.C))
assert_true(np.array_equal(self.D_2x3, crkr.D))
assert_equal(self.ridge_factor_05, crkr.ridge_factor)
assert_equal(self.sigma_05, crkr.sigma)
assert_equal(self.a_1, crkr.a)
def test_init_inconsistent_shape_c_matrix(self):
"""Tests if an exception is raised, for wrongly-shaped C parameter.
"""
assert_raises(ValueError, CrKr, self.S_2x3, self.C_3x3, self.D_2x3,
self.ridge_factor_05, self.sigma_05, self.a_1)
def test_init_non_square_c_matrix(self):
"""Tests if an exception is raised, for wrongly-shaped C parameter.
"""
assert_raises(ValueError, CrKr, self.S_2x3, self.C_2x3, self.D_2x3,
self.ridge_factor_05, self.sigma_05, self.a_1)
def test_init_inconsistent_shape_d_matrix(self):
"""Tests if an exception is raised, for wrongly-shaped D parameter.
"""
assert_raises(ValueError, CrKr, self.S_2x3, self.C_2x2, self.D_3x3,
self.ridge_factor_05, self.sigma_05, self.a_1)
def test_gaussian_kernel(self):
"""Tests if the gaussian kernel is correctly computed.
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3,
self.ridge_factor_05, self.sigma_05, self.a_1)
s1 = np.array([[1, 2, 3]])
s2 = np.array([[4, 5, 6]])
expected_gk = np.exp(-(self.a_1 * np.power(npla.norm(s1 - s2), 2) /
(2 * (self.sigma_05 ** 2))))
assert_equal(expected_gk, crkr._gaussian_kernel(s1, s2))
def test_gaussian_kernel_same_state(self):
"""Tests if the gaussian kernel is 1 for two equal states.
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)
s = np.array([[1, 2, 3]])
assert_equal(1, crkr._gaussian_kernel(s, s))
def test_compute_k(self):
"""Tests if k is correctly computed.
"""
S = self.S_2x3
new_s = np.array([[0, 1, 2]])
exponent = (-self.a_1 * np.power(npla.norm(new_s - S, axis=1), 2) /
(2 * (self.sigma_05 ** 2)))
expected_k = np.exp(exponent)
expected_k = np.array([expected_k]).T
crkr = CrKr(S, self.C_2x2, self.D_2x3,
self.ridge_factor_05, self.sigma_05, self.a_1)
result_k = crkr._compute_k(new_s)
assert_equal(expected_k.shape, result_k.shape)
assert_true(np.allclose(expected_k, result_k))
def test_compute_K(self):
"""Tests if K is correctly computed.
"""
S = self.S_2x3
expected_K = np.zeros((S.shape[0], S.shape[0]))
for i in range(0, S.shape[0]):
for j in range(0, S.shape[0]):
s1 = np.array([S[i, :]])
s2 = np.array([S[j, :]])
exponent = (-self.a_1 * np.power(npla.norm(s1 - s2), 2) /
(2 * (self.sigma_05 ** 2)))
expected_K[i, j] = np.exp(exponent)
crkr = CrKr(S, self.C_2x2, self.D_2x3,
self.ridge_factor_05, self.sigma_05, self.a_1)
assert_true(np.allclose(expected_K, crkr._compute_K()))
def test_delta_mean(self):
"""Tests if delta mean is correctly computed.
"""
S = self.S_2x3
C = self.C_2x2
D = self.D_2x3
ridge_factor = self.ridge_factor_05
sigma = self.sigma_05
a = self.a_1
crkr = CrKr(S, C, D, ridge_factor, sigma, a)
new_s = np.array([[0, 0, 0]])
k = crkr._compute_k(new_s)
K = crkr._compute_K()
expected_dm = np.dot(k.T,
np.dot(np.linalg.inv(K + ridge_factor * C), D))
assert_true(np.allclose(expected_dm, crkr._delta_mean(k, K)))
def test_delta_variance(self):
"""Tests if delta variance is correctly computed.
"""
S = self.S_2x3
C = self.C_2x2
D = self.D_2x3
ridge_factor = self.ridge_factor_05
sigma = self.sigma_05
a = self.a_1
crkr = CrKr(S, C, D, ridge_factor, sigma, a)
new_s = np.array([[1, 1, 1]])
k = crkr._compute_k(new_s)
K = crkr._compute_K()
expected_dv = (a +
ridge_factor -
np.dot(k.T, np.dot(npla.inv(K + ridge_factor * C), k)))
assert_true(np.allclose(expected_dv, crkr._delta_variance(k, K)))
def test_delta_estimate(self):
"""Test if a 2-dimensional numpy array is returned.
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)
new_s = np.array([[1, 2, 5]])
delta_estimate = crkr.delta_estimate(new_s)
assert_equal(delta_estimate.shape, (1, self.S_2x3.shape[1]))
def test_delta_estimate_diff_shape_state(self):
"""Tests if an exception is raised, for wrongly-shaped state
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)
new_s = np.array([[1, 2, 3, 4]])
assert_raises(ValueError, crkr.delta_estimate, new_s)
def test_update_matrices(self):
"""Tests if the matrices are correctly updated.
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)
new_s = np.array([[7, 8, 9]])
new_reward = 20
new_d = np.array([[13, 14, 15]])
expected_S = np.vstack((self.S_2x3, new_s))
expected_C_diag = np.append(np.diagonal(self.C_2x2), 1.0 / new_reward)
expected_C = np.diag(expected_C_diag)
expected_D = np.vstack((self.D_2x3, new_d))
crkr.update_matrices(new_s, new_reward, new_d)
assert_true(np.allclose(expected_S, crkr.S))
assert_true(np.allclose(expected_C, crkr.C))
assert_true(np.allclose(expected_D, crkr.D))
def test_update_matrices_diff_shape_state(self):
"""Tests if an exception is raises, for wrongly-shaped state.
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)
new_s = np.array([[7, 8, 9, 10]])
new_reward = 20
new_d = np.array([[13, 14, 15]])
assert_raises(ValueError,
crkr.update_matrices,
new_s,
new_reward,
new_d)
def test_update_matrices_zero_reward(self):
"""Tests if an exception is raises, for reward of 0.
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)
new_s = np.array([[7, 8, 9]])
new_reward = 0
new_d = np.array([[13, 14, 15]])
assert_raises(ValueError,
crkr.update_matrices,
new_s,
new_reward,
new_d)
def test_update_matrices_diff_shape_delta(self):
"""Tests if an exception is raises, for reward of 0.
"""
crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)
new_s = np.array([[7, 8, 9]])
new_reward = 0.2
new_d = np.array([[13, 14, 15, 16]])
assert_raises(ValueError,
crkr.update_matrices,
new_s,
new_reward,
new_d)
|
|
#!/usr/bin/env python
import numpy as np
from pandas.io.data import get_quote_yahoo
import json
import locale
locale.setlocale(locale.LC_ALL, '')
class Portfolio(object):
def __init__(self):
self.ideal_allocation = {}
self.stocks_owned = {}
self.class_total = {}
self.cash = 0.0
self.classification = {}
self.current_asset_percentages = []
self.core_total = 0.0
self.total = 0.0
self.tolerance = 3.5 # percentage off ideal before recommended action
pass
def get_ideal_allocation(self, infile):
"""Reads in file of ideal portfolio allocation.
Use 1-word (no spaces) for asset class.
"tolerance" is a special word which gives the tolerance level
before a rebalance is recommended."""
with open(infile, 'r') as file_handle:
for line in file_handle:
if line.split()[0] == "tolerance":
self.tolerance = float(line.split()[1])
else:
self.ideal_allocation[line.split()[0]] = float(line.split()[1])
self.class_total[line.split()[0]] = 0.0
def parse_ideal_allocation(self, infile):
"""Reads in json formatted file of the ideal portfolio
allocation."""
with open(infile, 'r') as file_handle:
allocation_dict = json.load(file_handle)
for key in allocation_dict:
if key == "tolerance":
self.tolerance = allocation_dict[key]
else:
self.ideal_allocation[key] = float(allocation_dict[key])
self.class_total[key] = 0.0
pass
def get_account_details(self, infiles):
for infile in infiles:
with open(infile, 'r') as file_handle:
for line in file_handle:
name = line.split()[0]
if name == 'CASH':
self.cash += float(line.split()[1].strip("$"))
else:
if name not in self.stocks_owned:
self.stocks_owned[name] = {}
self.stocks_owned[name]['shares'] = 0.0
self.stocks_owned[name]['shares'] += float(line.split()[1])
self.stocks_owned[name]['asset_class'] = line.split()[2]
else:
self.stocks_owned[name]['shares'] += float(line.split()[1])
self.stocks_owned[name]['asset_class'] = line.split()[2]
def parse_account_details(self, webdict):
for name in webdict:
if name == 'CASH':
self.cash += webdict[name]
else:
if name not in self.stocks_owned:
self.stocks_owned[name] = {}
self.stocks_owned[name]['shares'] = 0.0
self.stocks_owned[name]['shares'] += webdict[name]['shares']
self.stocks_owned[name]['asset_class'] = webdict[name]['asset_class']
else:
self.stocks_owned[name]['shares'] += webdict[name]['shares']
self.stocks_owned[name]['asset_class'] = webdict[name]['asset_class']
def get_stock_prices(self):
dataframe = get_quote_yahoo([stock for stock in self.stocks_owned])
for stock in self.stocks_owned:
self.stocks_owned[stock]['price'] = dataframe.ix[stock]['last']
def get_core_total(self):
self.core_total = 0.0
self.total = 0.0
self.core_total += self.cash
self.total += self.cash
for stock in self.stocks_owned:
temp_amount = self.stocks_owned[stock]['price'] * self.stocks_owned[stock]['shares']
if self.stocks_owned[stock]['asset_class'] in self.ideal_allocation:
self.core_total += temp_amount
self.class_total[self.stocks_owned[stock]['asset_class']] += temp_amount
self.total += temp_amount
else:
self.total += temp_amount
pass
def get_current_allocation(self):
"""Remember same stock can't have two asset_classes."""
for stock in self.stocks_owned:
if self.stocks_owned[stock]['asset_class'] in self.ideal_allocation:
temp_asset = self.stocks_owned[stock]['asset_class']
self.current_asset_percentages.append(
(stock,
self.class_total[temp_asset] / self.core_total * 100. - self.ideal_allocation[temp_asset],
temp_asset))
def get_recommendations(self):
"""Print recommendations."""
print "Recommended actions:"
for st, perc, asset in sorted(self.current_asset_percentages, key=lambda x: np.abs(x[1]), reverse=True):
shares = round(self.core_total * perc / 100. / self.stocks_owned[st]['price'], 0)
if np.abs(perc) >= self.tolerance:
if shares > 0:
print "Sell:", int(np.abs(shares)), st, asset, round(perc, 1)
if shares < 0:
print "Buy:", int(np.abs(shares)), st, asset, round(perc, 1)
else:
print "W/in tol:",
if shares > 0.0:
print "Sell", int(np.abs(shares)), st, asset, round(perc, 1)
else:
print "Buy", int(np.abs(shares)), st, asset, round(perc, 1)
pass
def push_recommendations(self, return_string=""):
"""Pushover recommendations."""
priority = 0
return_string = '\n'.join([return_string, "Recommended actions:", '\n'])
for st, perc, asset in sorted(self.current_asset_percentages, key=lambda x: x[1], reverse=True):
shares = round(self.core_total * perc / 100. / self.stocks_owned[st]['price'], 0)
if np.abs(perc) >= self.tolerance:
priority = 1
if shares > 0:
return_string = ' '.join([return_string,
"Sell:",
str(int(np.abs(shares))),
str(st), str(asset),
str(round(perc, 1)),
'\n'])
if shares < 0:
return_string = ' '.join([return_string,
"Buy:",
str(int(np.abs(shares))),
str(st),
str(asset),
str(round(perc, 1)),
'\n'])
else:
return_string = ' '.join([return_string, "W/in tol:", ])
if shares > 0.0:
return_string = ' '.join([return_string,
"Sell",
str(int(np.abs(shares))),
str(st),
str(asset),
str(round(perc, 1)),
'\n'])
else:
return_string = ' '.join([return_string,
"Buy",
str(int(np.abs(shares))),
str(st),
str(asset),
str(round(perc, 1)),
'\n'])
return return_string, priority
def get_summary(self):
print "Cash:", locale.currency(self.cash, grouping=True)
print "Core Total:", locale.currency(self.core_total, grouping=True)
print "Total:", locale.currency(self.total, grouping=True)
pass
def push_summary(self):
"""Pushover summary."""
return_string = ""
return_string = ' '.join([return_string,
"Cash:",
locale.currency(self.cash, grouping=True),
"\n"])
return_string = ' '.join([return_string,
"Core Total:",
locale.currency(self.core_total, grouping=True),
"\n"])
return_string = ' '.join([return_string,
"Total:",
locale.currency(self.total, grouping=True),
"\n"])
return return_string
def push_full_recommendations(self):
"""Both overall summary and recommendations."""
summary = self.push_summary()
return self.push_recommendations(summary)
def detailed_summary(self):
for stock in self.stocks_owned:
print stock, locale.currency(self.stocks_owned[stock]['price'] * self.stocks_owned[stock]['shares'],
grouping=True)
pass
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 95700
MY_VERSION = 95704 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.nTime = 0
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nTime = struct.unpack("<Q", f.read(8))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nTime)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i nTime=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, self.nTime, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# ION Uniqueness
def get_uniqueness(self, prevout):
r = b""
r += struct.pack("<I", prevout.n)
r += ser_uint256(prevout.hash)
return r
def solve_stake(self, prevouts):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for prevout in prevouts:
nvalue, txBlockTime, stakeModifier, hashStake = prevouts[prevout]
target = int(target0 * nvalue / 100) % 2**256
data = b""
data += ser_uint64(stakeModifier)
data += struct.pack("<I", txBlockTime)
# prevout for zPoS is serial hashes hex strings
if isinstance(prevout, COutPoint):
data += self.get_uniqueness(prevout)
else:
data += ser_uint256(uint256_from_str(bytes.fromhex(hashStake)[::-1]))
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
|
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import optparse
import sys
import os
import time
import shutil
import filecmp
from difflib import unified_diff
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('./common')
addToPath('./ruby')
addToPath('./topologies')
import Options
import Ruby
import Simulation
import CacheConfig
import MemConfig
from Caches import *
from cpu2006 import *
def get_processes(options):
"""Interprets provided options and returns a list of processes"""
multiprocesses = []
inputs = []
outputs = []
errouts = []
pargs = []
workloads = options.cmd.split(';')
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
if options.options != "":
pargs = options.options.split(';')
idx = 0
for wrkld in workloads:
process = LiveProcess()
process.executable = wrkld
process.cwd = os.getcwd()
if len(pargs) > idx:
process.cmd = [wrkld] + pargs[idx].split()
else:
process.cmd = [wrkld]
if len(inputs) > idx:
process.input = inputs[idx]
if len(outputs) > idx:
process.output = outputs[idx]
if len(errouts) > idx:
process.errout = errouts[idx]
multiprocesses.append(process)
idx += 1
if options.smt:
assert(options.cpu_type == "detailed" or options.cpu_type == "inorder")
return multiprocesses, idx
else:
return multiprocesses, 1
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
parser.add_option("--dram-size", type="string", default="0B",
help="Size of DRAM")
parser.add_option("--att-length", type="int", default=0,
help="Number of Addr Translation Table entries (for NVM)")
parser.add_option("--block-bits", type="int", default=6,
help="Number of bits of cache line/block")
parser.add_option("--page-bits", type="int", default=12,
help="Number of bits of page in the secondary page table")
parser.add_option("--reserved-writes", type="int",
help="Number of reserved writeback buffers in caches")
parser.add_option("--disable-timing", action="store_true", default=False,
help="Whether to avoid timing THNVM")
parser.add_option("--cpu-2006", default="", type="string",
help="The CPU 2006 benchmark to be loaded.")
parser.add_option("--check-cpu-2006", default="", type="string",
help="The CPU 2006 benchmark whose output to be checked.")
parser.add_option("--cpu-2006-root", default="", type="string",
help="The CPU 2006 root dir that contains benchmark subdirs.")
parser.add_option("--cpu-2006-build-name", default="", type="string",
help="The dir that contains the runnable in benchmark's build dir.")
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
multiprocesses = []
numThreads = 1
def config_fingerprint(options):
fp = 'a' + str(options.att_length)
fp += '-d' + options.dram_size
if options.disable_timing:
fp += '-dsbl'
fp += '-' + str(int(time.time()))
return fp
fp = config_fingerprint(options)
if options.check_cpu_2006:
bench_process, bench_output, ref_output = make_process(
options.check_cpu_2006,
options.cpu_2006_root, options.cpu_2006_build_name, fp)
if ref_output is None or not os.path.isfile(ref_output):
print 'SPEC CPU 2006 ' + options.check_cpu_2006 + ' has no output!'
elif not os.path.isfile(bench_output):
print 'SPEC CPU 2006 ' + options.check_cpu_2006 + \
' outputs check: FAILED! No output file:'
print bench_output
elif filecmp.cmp(bench_output, ref_output):
print 'SPEC CPU 2006 ' + options.check_cpu_2006 + ' outputs check: OK!'
else:
print 'SPEC CPU 2006 outputs ' + options.check_cpu_2006 + \
' check: FAILED!'
for line in unified_diff(open(bench_output).readlines(),
open(ref_output).readlines(),
bench_output, ref_output):
sys.stderr.write(line)
sys.exit(0)
if options.cpu_2006:
bench_process, bench_out, ref_output = make_process(options.cpu_2006,
options.cpu_2006_root, options.cpu_2006_build_name, fp)
if not bench_out is None and os.path.isfile(bench_out):
shutil.move(bench_out, bench_out + '.old')
if bench_process is not None:
multiprocesses = [ bench_process ]
else:
print "SPEC CPU 2006 process is not established!"
sys.exit(1)
elif options.bench:
apps = options.bench.split("-")
if len(apps) != options.num_cpus:
print "number of benchmarks not equal to set num_cpus!"
sys.exit(1)
for app in apps:
try:
if buildEnv['TARGET_ISA'] == 'alpha':
exec("workload = %s('alpha', 'tru64', 'ref')" % app)
else:
exec("workload = %s(buildEnv['TARGET_ISA'], 'linux', 'ref')" % app)
multiprocesses.append(workload.makeLiveProcess())
except:
print >>sys.stderr, "Unable to find workload for %s: %s" % (buildEnv['TARGET_ISA'], app)
sys.exit(1)
elif options.cmd:
multiprocesses, numThreads = get_processes(options)
else:
print >> sys.stderr, "No workload specified. Exiting!\n"
sys.exit(1)
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.numThreads = numThreads
MemClass = Simulation.setMemClass(options)
# Check -- do not allow SMT with multiple CPUs
if options.smt and options.num_cpus > 1:
fatal("You cannot use SMT with multiple CPUs!")
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
mem_mode = test_mem_mode,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size)
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
system.cpu_voltage_domain)
# All cpus belong to a common cpu_clk_domain, therefore running at a common
# frequency.
for cpu in system.cpu:
cpu.clk_domain = system.cpu_clk_domain
# Sanity check
if options.fastmem:
if CPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.fastmem:
system.cpu[i].fastmem = True
if options.simpoint_profile:
system.cpu[i].simpoint_profile = True
system.cpu[i].simpoint_interval = options.simpoint_interval
if options.checker:
system.cpu[i].addCheckerCpu()
system.cpu[i].createThreads()
if options.ruby:
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
# Set the option for physmem so that it is not allocated any space
system.physmem = MemClass(range=AddrRange(options.mem_size),
null = True)
options.use_map = True
Ruby.create_system(options, system)
assert(options.num_cpus == len(system.ruby._cpu_ruby_ports))
for i in xrange(np):
ruby_port = system.ruby._cpu_ruby_ports[i]
# Create the interrupt controller and connect its ports to Ruby
# Note that the interrupt controller is always present but only
# in x86 does it have message ports that need to be connected
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts.pio = ruby_port.master
system.cpu[i].interrupts.int_master = ruby_port.slave
system.cpu[i].interrupts.int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
else:
system.membus = CoherentBus()
system.system_port = system.membus.slave
MemConfig.config_mem(options, system)
CacheConfig.config_cache(options, system)
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
|
|
'''
Modular Input Script
Copyright (C) 2012 Splunk, Inc.
All Rights Reserved
'''
import os,sys,logging
import xml.dom.minidom, xml.sax.saxutils
import time
SPLUNK_HOME = os.environ.get("SPLUNK_HOME")
sys.path.append(SPLUNK_HOME + "/etc/apps/snmp_ta/bin/pyasn1-0.1.6-py2.7.egg")
sys.path.append(SPLUNK_HOME + "/etc/apps/snmp_ta/bin/pysnmp-4.2.4-py2.7.egg")
sys.path.append(SPLUNK_HOME + "/etc/apps/snmp_ta/bin/pysnmp_mibs-0.1.4-py2.7.egg")
from pysnmp.entity.rfc3413.oneliner import cmdgen
#set up logging
logging.root
logging.root.setLevel(logging.ERROR)
formatter = logging.Formatter('%(levelname)s %(message)s')
#with zero args , should go to STD ERR
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
SCHEME = """<scheme>
<title>SNMP</title>
<description>Poll attributes from a device's SNMP interface</description>
<use_external_validation>true</use_external_validation>
<streaming_mode>xml</streaming_mode>
<use_single_instance>false</use_single_instance>
<endpoint>
<args>
<arg name="name">
<title>SNMP Input Name</title>
<description>Name of this SNMP input</description>
</arg>
<arg name="destination">
<title>Destination</title>
<description>IP or hostname of the device you would like to query</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="port">
<title>Port</title>
<description>The SNMP port. Defaults to 161</description>
<validation>is_port('port'), "value for port must be a positive integer"</validation>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="mib">
<title>MIB</title>
<description>The MIB that contains the OID to query. Defaults to "SNMPv2-MIB"</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oid">
<title>OID</title>
<description>The OID that you want to query. Defaults to "sysDescr"</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="snmpindex">
<title>SNMP Index</title>
<description>The index of the OID to query. Defaults to 0</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="communitystring">
<title>Community String</title>
<description>Community String used for authentication</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="snmpinterval">
<title>Interval</title>
<description>How often to run the SNMP query (in seconds). Defaults to 60 seconds</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
</args>
</endpoint>
</scheme>
"""
def do_validate():
config = get_validation_config()
#TODO
#if error , print_validation_error & sys.exit(2)
def do_run():
config = get_input_config()
#parameters with defaults
destination=config.get("destination")
port=config.get("port",161)
mib=config.get("mib","SNMPv2-MIB")
oid=config.get("oid","sysDescr")
snmpindex=config.get("snmpindex",0)
communitystring=config.get("communitystring", "public")
snmpinterval=config.get("snmpinterval",60)
while True:
try:
cmdGen = cmdgen.CommandGenerator()
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
cmdgen.CommunityData(communitystring),
cmdgen.UdpTransportTarget((destination, port)),
cmdgen.MibVariable(mib, oid, snmpindex),
'.1.3.6.1.2.1.1.3.0',
lookupNames=True, lookupValues=True
)
if errorIndication:
raise RuntimeError(errorIndication)
logging.error(errorIndication)
elif errorStatus:
raise RuntimeError(errorStatus)
logging.error(errorStatus)
else:
splunkevent =""
for name, val in varBinds:
splunkevent += '%s = "%s", ' % (name.prettyPrint(), val.prettyPrint())
print_xml_single_instance_mode(splunkevent)
sys.stdout.flush()
except RuntimeError:
logging.error("Looks like an error: %s" % str(e))
sys.exit(1)
raise
time.sleep(float(snmpinterval))
# prints validation error data to be consumed by Splunk
def print_validation_error(s):
print "<error><message>%s</message></error>" % xml.sax.saxutils.escape(s)
# prints XML stream
def print_xml_single_instance_mode(s):
print "<stream><event><data>%s</data></event></stream>" % xml.sax.saxutils.escape(s)
# prints XML stream
def print_xml_multi_instance_mode(s,stanza):
print "<stream><event stanza=""%s""><data>%s</data></event></stream>" % stanza,xml.sax.saxutils.escape(s)
# prints simple stream
def print_simple(s):
print "%s\n" % s
def usage():
print "usage: %s [--scheme|--validate-arguments]"
logging.error("Incorrect Program Usage")
sys.exit(2)
def do_scheme():
print SCHEME
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_input_config():
config = {}
try:
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logging.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logging.debug("XML: found stanza " + stanza_name)
config["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logging.debug("XML: found param '%s'" % param_name)
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
config[param_name] = data
logging.debug("XML: '%s' -> '%s'" % (param_name, data))
checkpnt_node = root.getElementsByTagName("checkpoint_dir")[0]
if checkpnt_node and checkpnt_node.firstChild and \
checkpnt_node.firstChild.nodeType == checkpnt_node.firstChild.TEXT_NODE:
config["checkpoint_dir"] = checkpnt_node.firstChild.data
if not config:
raise Exception, "Invalid configuration received from Splunk."
except Exception, e:
raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e)
return config
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_validation_config():
val_data = {}
# read everything from stdin
val_str = sys.stdin.read()
# parse the validation XML
doc = xml.dom.minidom.parseString(val_str)
root = doc.documentElement
logging.debug("XML: found items")
item_node = root.getElementsByTagName("item")[0]
if item_node:
logging.debug("XML: found item")
name = item_node.getAttribute("name")
val_data["stanza"] = name
params_node = item_node.getElementsByTagName("param")
for param in params_node:
name = param.getAttribute("name")
logging.debug("Found param %s" % name)
if name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
val_data[name] = param.firstChild.data
return val_data
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "--scheme":
do_scheme()
elif sys.argv[1] == "--validate-arguments":
do_validate()
else:
usage()
else:
do_run()
sys.exit(0)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import mxnet.ndarray as nd
from mxnet.ndarray import zeros_like
from mxnet.autograd import *
from mxnet.test_utils import *
from common import setup_module, with_seed, teardown
def grad_and_loss(func, argnum=None):
"""Return function that computes both gradient of arguments and loss value.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_and_loss_func: a python function
A function that would compute both the gradient of arguments and loss value.
"""
@functools.wraps(func)
def wrapped(*args):
"""Wrapped function."""
variables = args
if argnum is not None:
argnum_ = argnum if isinstance(argnum, list) else [argnum]
variables = [args[i] for i in argnum_]
for x in variables:
assert isinstance(x, NDArray), "type of autograd input should NDArray."
grads = [zeros_like(x) for x in variables]
mark_variables(variables, grads)
with record():
outputs = func(*args)
backward([outputs] if isinstance(outputs, NDArray) else outputs)
return grads, outputs
return wrapped
def grad(func, argnum=None):
"""Return function that computes gradient of arguments.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_func: a python function
A function that would compute the gradient of arguments.
Examples
--------
>>> # autograd supports dynamic graph which is changed
>>> # every instance
>>> def func(x):
>>> r = random.randint(0, 1)
>>> if r % 2:
>>> return x**2
>>> else:
>>> return x/3
>>> # use `grad(func)` to get the gradient function
>>> for x in range(10):
>>> grad_func = grad(func)
>>> inputs = nd.array([[1, 2, 3], [4, 5, 6]])
>>> grad_vals = grad_func(inputs)
"""
grad_with_loss_func = grad_and_loss(func, argnum)
@functools.wraps(grad_with_loss_func)
def wrapped(*args):
return grad_with_loss_func(*args)[0]
return wrapped
def autograd_assert(*args, **kwargs):
func = kwargs["func"]
grad_f = kwargs["grad_func"]
argnum = kwargs["argnum"] if 'argnum' in kwargs else None
grad_func = grad_and_loss(func, argnum)
grad_vals, output = grad_func(*args)
res = func(*args)
assert same(output.asnumpy(), res.asnumpy())
grad_res = grad_f(*args)
assert len(grad_vals) == len(grad_res)
for a, b in zip(grad_vals, grad_res):
assert same(a.asnumpy(), b.asnumpy())
@with_seed()
def test_unary_func():
def check_unary_func(x):
f_exp = lambda x: nd.exp(x)
f_exp_grad = lambda x: [nd.exp(x)]
autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
f_half = lambda x: x/2
f_half_grad = lambda x: [nd.ones(x.shape) * 0.5]
autograd_assert(x, func=f_half, grad_func=f_half_grad)
f_square = lambda x: x**2
f_square_grad = lambda x: [2*x]
autograd_assert(x, func=f_square, grad_func=f_square_grad)
uniform = nd.uniform(shape=(4, 5))
stypes = ['default', 'row_sparse', 'csr']
for stype in stypes:
check_unary_func(uniform.tostype(stype))
@with_seed()
def test_binary_func():
def check_binary_func(x, y):
f_add = lambda x, y: x+y
f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)]
autograd_assert(x, y, func=f_add, grad_func=f_add_grad)
f_mul = lambda x, y: x*y
f_mul_grad = lambda x, y: [y, x]
autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad)
f_compose = lambda x, y: x+x*y
f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x]
autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad)
uniform_x = nd.uniform(shape=(4, 5))
uniform_y = nd.uniform(shape=(4, 5))
stypes = ['default', 'row_sparse', 'csr']
for stype_x in stypes:
for stype_y in stypes:
x = uniform_x.tostype(stype_x)
y = uniform_y.tostype(stype_y)
check_binary_func(x, y)
@with_seed()
def test_operator_with_state():
def f_fc(a, b, weight, bias):
x = a*b
fc = nd.FullyConnected(
x, weight, bias, num_hidden=32)
return fc
a = nd.uniform(shape=(64, 50))
b = nd.uniform(shape=(64, 50))
weight = nd.uniform(shape=(32, 50))
bias = nd.uniform(shape=(32, ))
grad_func = grad_and_loss(f_fc)
grad_vals, outputs = grad_func(a, b, weight, bias)
# (TODO) assert
@with_seed()
def test_argnum():
def f_with_mode(a, b, mode):
if mode:
return a+b
else:
return a*b
a = nd.uniform(shape=(3, 2))
b = nd.uniform(shape=(3, 2))
f_add_grad = lambda x, y, mode: [nd.ones(x.shape), nd.ones(y.shape)]
f_mul_grad = lambda x, y, mode: [y, x]
autograd_assert(a, b, True,
argnum=[0, 1], func=f_with_mode, grad_func=f_add_grad)
autograd_assert(a, b, False,
argnum=[0, 1], func=f_with_mode, grad_func=f_mul_grad)
@with_seed()
def test_training():
x = nd.ones((10, 10))
with record():
y = nd.Dropout(x, p=0.5)
assert not (y.asnumpy() == x.asnumpy()).all()
with pause():
y = nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
@with_seed()
def test_out_grads():
x = nd.ones((3, 5))
dx = nd.zeros_like(x)
mark_variables([x], [dx])
da = None
db = nd.array([1,2,3,4,5])
dc = nd.array([5,4,3,2,1])
with record():
a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
backward([a, b, c], [da, db, dc])
assert (dx.asnumpy() == np.array(
[[1,1,1,1,1],
[1,2,3,4,5],
[5,4,3,2,1]])).all()
@with_seed()
def test_detach_updated_grad():
x = nd.ones((2, 2))
dx = nd.zeros_like(x)
y = nd.ones_like(x)
dy = nd.zeros_like(x)
mark_variables([x, y], [dx, dy])
assert x._fresh_grad == False
assert y._fresh_grad == False
with record():
x2 = x + 2
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 1).all()
assert x._fresh_grad == True
assert y._fresh_grad == True
dx[:] = 0
x._fresh_grad = False
y._fresh_grad = False
assert x._fresh_grad == False
assert y._fresh_grad == False
with record():
x2 = x + 2
x2 = x2.detach()
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 0).all()
assert y._fresh_grad == True
assert x._fresh_grad == False
@with_seed()
def test_retain_grad():
x = mx.nd.ones((2, 2))
dx = mx.nd.zeros((2, 2))
mark_variables([x], [dx], grad_reqs='add')
with record():
y = x + 1
y.backward(retain_graph=False)
assert (dx.asnumpy() == 1).all()
dx[:] = 0
with record():
y = x + 1
y.backward(retain_graph=True)
y.backward(retain_graph=False)
assert (dx.asnumpy() == 2).all()
# The following sequence should throw an exception. We discard the expected
# stderr stack trace output for this operation to keep the test logs clean.
with discard_stderr():
try:
with record():
y = x + 1
y.backward()
y.backward()
except Exception:
return
raise AssertionError(
"differentiating the same graph twice without retain_graph should fail")
@with_seed()
def test_attach_grad():
def check_attach_grad(x):
assert x.grad is None
x.attach_grad()
with record():
y = x * 2
assert y.grad is None
y.backward(out_grad=mx.nd.ones_like(y).tostype(x.stype))
assert (x.grad.asnumpy() == 2).all()
zeros = mx.nd.zeros((10, 10))
stypes = ['default', 'row_sparse', 'csr']
for stype in stypes:
x = zeros.tostype(stype)
check_attach_grad(x)
@with_seed()
def test_is_train():
x = mx.nd.ones((10, 10))
x.attach_grad()
with record(train_mode=True):
assert is_recording()
assert is_training()
y = mx.nd.Dropout(x, p=0.5)
assert y.asnumpy().max() == 2 and y.asnumpy().min() == 0
y.backward()
assert (x.grad.asnumpy() == y.asnumpy()).all()
with predict_mode():
assert is_recording()
assert not is_training()
y = mx.nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
y.backward(train_mode=False)
assert (x.grad.asnumpy() == x.asnumpy()).all()
with record(train_mode=False):
assert is_recording()
assert not is_training()
y = mx.nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
y.backward(train_mode=False)
assert (x.grad.asnumpy() == x.asnumpy()).all()
with train_mode():
assert is_recording()
assert is_training()
y = mx.nd.Dropout(x, p=0.5)
assert y.asnumpy().max() == 2 and y.asnumpy().min() == 0
y.backward()
assert (x.grad.asnumpy() == y.asnumpy()).all()
assert not is_recording()
assert not is_training()
y = mx.nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
with train_mode():
assert not is_recording()
assert is_training()
y = mx.nd.Dropout(x, p=0.5)
assert y.asnumpy().max() == 2 and y.asnumpy().min() == 0
@with_seed()
def test_function():
class func(Function):
def forward(self, x, y):
m = x / y
n = x * y
self.save_for_backward(x, y)
return m, n
def backward(self, dm, dn):
x, y = self.saved_tensors
dx = dm/y + dn*y
dy = dn*x - dm * x / y / y
return dx, dy
f = func()
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
y = mx.nd.random.uniform(shape=(10,))
y.attach_grad()
with record():
m, n = f(x, y)
backward([m, n])
dx1 = x.grad.asnumpy()
dy1 = y.grad.asnumpy()
with record():
backward([x/y, x*y])
# Non-zero atol required, as exposed by seed 630179191
atol = 1e-6
assert_almost_equal(x.grad.asnumpy(), dx1, atol=atol)
assert_almost_equal(y.grad.asnumpy(), dy1, atol=atol)
@with_seed()
def test_get_symbol():
x = mx.nd.ones((1,))
x.attach_grad()
with record():
y = x*x + 2*x - 1
assert len(get_symbol(y).list_arguments()) == 1
z = mx.nd.ones((1,))
z.attach_grad()
with record():
y = x*x + 2*z - 1
assert len(get_symbol(y).list_arguments()) == 2
@with_seed()
def test_grad_with_stype():
def check_grad_with_stype(array_stype, grad_stype, expected_stype):
x = mx.nd.zeros((1, 1), stype=array_stype)
x.attach_grad(stype=grad_stype)
# check grad attached
assert x.grad.stype == expected_stype
y = x.detach()
# check array detached
assert y.stype == array_stype
stypes = ['default', 'csr', 'row_sparse']
for stype in stypes:
# check the default stype of the gradient (same as the array stype)
check_grad_with_stype(stype, None, stype)
for grad_stype in stypes:
# check the stype of the gradient when provided
check_grad_with_stype(stype, grad_stype, grad_stype)
@with_seed()
def test_sparse_dot_grad():
def check_sparse_dot_grad(rhs):
lhs = rand_ndarray((2, 8), 'csr')
with mx.autograd.record():
y = mx.nd.dot(lhs, rhs)
y.backward()
grad = rhs.grad
grad_np = np.dot(lhs.asnumpy().T, np.ones((lhs.shape[0], rhs.shape[1])))
assert grad.stype == 'row_sparse'
assert_almost_equal(grad.asnumpy(), grad_np)
# check grad with row_sparse weight
shape = (8, 3)
rsp = mx.nd.ones(shape).tostype('row_sparse')
rsp.attach_grad()
check_sparse_dot_grad(rsp)
# check grad with dense weight
dns = mx.nd.ones(shape)
dns.attach_grad(stype='row_sparse')
check_sparse_dot_grad(dns)
@with_seed()
def test_gradient():
x = mx.nd.ones((1,))
x.attach_grad()
with mx.autograd.record():
z = mx.nd.elemwise_add(mx.nd.exp(x), x)
dx, = mx.autograd.grad(z, [x], create_graph=True)
assert abs(dx.asscalar() - 3.71828175) < 1e-7
dx.backward()
assert abs(x.grad.asscalar() - 2.71828175) < 1e-7
if __name__ == "__main__":
import nose
nose.runmodule()
|
|
"""
pygments.lexers.int_fiction
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for interactive fiction languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Generic
__all__ = ['Inform6Lexer', 'Inform6TemplateLexer', 'Inform7Lexer',
'Tads3Lexer']
class Inform6Lexer(RegexLexer):
"""
For `Inform 6 <http://inform-fiction.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 6'
aliases = ['inform6', 'i6']
filenames = ['*.inf']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_name = r'[a-zA-Z_]\w*'
# Inform 7 maps these four character classes to their ASCII
# equivalents. To support Inform 6 inclusions within Inform 7,
# Inform6Lexer maps them too.
_dash = '\\-\u2010-\u2014'
_dquote = '"\u201c\u201d'
_squote = "'\u2018\u2019"
_newline = '\\n\u0085\u2028\u2029'
tokens = {
'root': [
(r'\A(!%%[^%s]*[%s])+' % (_newline, _newline), Comment.Preproc,
'directive'),
default('directive')
],
'_whitespace': [
(r'\s+', Text),
(r'![^%s]*' % _newline, Comment.Single)
],
'default': [
include('_whitespace'),
(r'\[', Punctuation, 'many-values'), # Array initialization
(r':|(?=;)', Punctuation, '#pop'),
(r'<', Punctuation), # Second angle bracket in an action statement
default(('expression', '_expression'))
],
# Expressions
'_expression': [
include('_whitespace'),
(r'(?=sp\b)', Text, '#pop'),
(r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text,
('#pop', 'value')),
(r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator),
(r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop')
],
'expression': [
include('_whitespace'),
(r'\(', Punctuation, ('expression', '_expression')),
(r'\)', Punctuation, '#pop'),
(r'\[', Punctuation, ('#pop', 'statements', 'locals')),
(r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation),
(r'\+\+|[%s]{2}(?!>)' % _dash, Operator),
(r',', Punctuation, '_expression'),
(r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash,
Operator, '_expression'),
(r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word,
'_expression'),
(r'sp\b', Name),
(r'\?~?', Name.Label, 'label?'),
(r'[@{]', Error),
default('#pop')
],
'_assembly-expression': [
(r'\(', Punctuation, ('#push', '_expression')),
(r'[\[\]]', Punctuation),
(r'[%s]>' % _dash, Punctuation, '_expression'),
(r'sp\b', Keyword.Pseudo),
(r';', Punctuation, '#pop:3'),
include('expression')
],
'_for-expression': [
(r'\)', Punctuation, '#pop:2'),
(r':', Punctuation, '#pop'),
include('expression')
],
'_keyword-expression': [
(r'(from|near|to)\b', Keyword, '_expression'),
include('expression')
],
'_list-expression': [
(r',', Punctuation, '#pop'),
include('expression')
],
'_object-expression': [
(r'has\b', Keyword.Declaration, '#pop'),
include('_list-expression')
],
# Values
'value': [
include('_whitespace'),
# Strings
(r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'),
(r'([%s])(@\{[0-9a-fA-F]*\})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'([%s])(@.{2})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')),
(r'[%s]' % _dquote, String.Double, ('#pop', 'string')),
# Numbers
(r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash),
Number.Float, '#pop'),
(r'\$[0-9a-fA-F]+', Number.Hex, '#pop'),
(r'\$\$[01]+', Number.Bin, '#pop'),
(r'[0-9]+', Number.Integer, '#pop'),
# Values prefixed by hashes
(r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'),
(r'(#g\$)(%s)' % _name,
bygroups(Operator, Name.Variable.Global), '#pop'),
(r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')),
(r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'),
(r'#', Name.Builtin, ('#pop', 'system-constant')),
# System functions
(words((
'child', 'children', 'elder', 'eldest', 'glk', 'indirect', 'metaclass',
'parent', 'random', 'sibling', 'younger', 'youngest'), suffix=r'\b'),
Name.Builtin, '#pop'),
# Metaclasses
(r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'),
# Veneer routines
(words((
'Box__Routine', 'CA__Pr', 'CDefArt', 'CInDefArt', 'Cl__Ms',
'Copy__Primitive', 'CP__Tab', 'DA__Pr', 'DB__Pr', 'DefArt', 'Dynam__String',
'EnglishNumber', 'Glk__Wrap', 'IA__Pr', 'IB__Pr', 'InDefArt', 'Main__',
'Meta__class', 'OB__Move', 'OB__Remove', 'OC__Cl', 'OP__Pr', 'Print__Addr',
'Print__PName', 'PrintShortName', 'RA__Pr', 'RA__Sc', 'RL__Pr', 'R_Process',
'RT__ChG', 'RT__ChGt', 'RT__ChLDB', 'RT__ChLDW', 'RT__ChPR', 'RT__ChPrintA',
'RT__ChPrintC', 'RT__ChPrintO', 'RT__ChPrintS', 'RT__ChPS', 'RT__ChR',
'RT__ChSTB', 'RT__ChSTW', 'RT__ChT', 'RT__Err', 'RT__TrPS', 'RV__Pr',
'Symb__Tab', 'Unsigned__Compare', 'WV__Pr', 'Z__Region'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other built-in symbols
(words((
'call', 'copy', 'create', 'DEBUG', 'destroy', 'DICT_CHAR_SIZE',
'DICT_ENTRY_BYTES', 'DICT_IS_UNICODE', 'DICT_WORD_SIZE', 'false',
'FLOAT_INFINITY', 'FLOAT_NAN', 'FLOAT_NINFINITY', 'GOBJFIELD_CHAIN',
'GOBJFIELD_CHILD', 'GOBJFIELD_NAME', 'GOBJFIELD_PARENT',
'GOBJFIELD_PROPTAB', 'GOBJFIELD_SIBLING', 'GOBJ_EXT_START',
'GOBJ_TOTAL_LENGTH', 'Grammar__Version', 'INDIV_PROP_START', 'INFIX',
'infix__watching', 'MODULE_MODE', 'name', 'nothing', 'NUM_ATTR_BYTES', 'print',
'print_to_array', 'recreate', 'remaining', 'self', 'sender', 'STRICT_MODE',
'sw__var', 'sys__glob0', 'sys__glob1', 'sys__glob2', 'sys_statusline_flag',
'TARGET_GLULX', 'TARGET_ZCODE', 'temp__global2', 'temp__global3',
'temp__global4', 'temp_global', 'true', 'USE_MODULES', 'WORDSIZE'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other values
(_name, Name, '#pop')
],
# Strings
'dictionary-word': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _squote, String.Single),
(r'[({]', String.Single),
(r'@\{[0-9a-fA-F]*\}', String.Escape),
(r'@.{2}', String.Escape),
(r'[%s]' % _squote, String.Single, '#pop')
],
'string': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _dquote, String.Double),
(r'[({]', String.Double),
(r'\\', String.Escape),
(r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' %
(_newline, _newline), String.Escape),
(r'@(\\\s*[%s]\s*)*\{((\\\s*[%s]\s*)*[0-9a-fA-F])*'
r'(\\\s*[%s]\s*)*\}' % (_newline, _newline, _newline),
String.Escape),
(r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline),
String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'plain-string': [
(r'[^~^\\({\[\]%s]+' % _dquote, String.Double),
(r'[~^({\[\]]', String.Double),
(r'\\', String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
# Names
'_constant': [
include('_whitespace'),
(_name, Name.Constant, '#pop'),
include('value')
],
'_global': [
include('_whitespace'),
(_name, Name.Variable.Global, '#pop'),
include('value')
],
'label?': [
include('_whitespace'),
(_name, Name.Label, '#pop'),
default('#pop')
],
'variable?': [
include('_whitespace'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
# Values after hashes
'obsolete-dictionary-word': [
(r'\S\w*', String.Other, '#pop')
],
'system-constant': [
include('_whitespace'),
(_name, Name.Builtin, '#pop')
],
# Directives
'directive': [
include('_whitespace'),
(r'#', Punctuation),
(r';', Punctuation, '#pop'),
(r'\[', Punctuation,
('default', 'statements', 'locals', 'routine-name?')),
(words((
'abbreviate', 'endif', 'dictionary', 'ifdef', 'iffalse', 'ifndef', 'ifnot',
'iftrue', 'ifv3', 'ifv5', 'release', 'serial', 'switches', 'system_file',
'version'), prefix='(?i)', suffix=r'\b'),
Keyword, 'default'),
(r'(?i)(array|global)\b', Keyword,
('default', 'directive-keyword?', '_global')),
(r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')),
(r'(?i)class\b', Keyword,
('object-body', 'duplicates', 'class-name')),
(r'(?i)(constant|default)\b', Keyword,
('default', 'expression', '_constant')),
(r'(?i)(end\b)(.*)', bygroups(Keyword, Text)),
(r'(?i)(extend|verb)\b', Keyword, 'grammar'),
(r'(?i)fake_action\b', Keyword, ('default', '_constant')),
(r'(?i)import\b', Keyword, 'manifest'),
(r'(?i)(include|link|origsource)\b', Keyword,
('default', 'before-plain-string?')),
(r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')),
(r'(?i)message\b', Keyword, ('default', 'diagnostic')),
(r'(?i)(nearby|object)\b', Keyword,
('object-body', '_object-head')),
(r'(?i)property\b', Keyword,
('default', 'alias?', '_constant', 'property-keyword*')),
(r'(?i)replace\b', Keyword,
('default', 'routine-name?', 'routine-name?')),
(r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')),
(r'(?i)stub\b', Keyword, ('default', 'routine-name?')),
(r'(?i)trace\b', Keyword,
('default', 'trace-keyword?', 'trace-keyword?')),
(r'(?i)zcharacter\b', Keyword,
('default', 'directive-keyword?', 'directive-keyword?')),
(_name, Name.Class, ('object-body', '_object-head'))
],
# [, Replace, Stub
'routine-name?': [
include('_whitespace'),
(_name, Name.Function, '#pop'),
default('#pop')
],
'locals': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'\*', Punctuation),
(r'"', String.Double, 'plain-string'),
(_name, Name.Variable)
],
# Array
'many-values': [
include('_whitespace'),
(r';', Punctuation),
(r'\]', Punctuation, '#pop'),
(r':', Error),
default(('expression', '_expression'))
],
# Attribute, Property
'alias?': [
include('_whitespace'),
(r'alias\b', Keyword, ('#pop', '_constant')),
default('#pop')
],
# Class, Object, Nearby
'class-name': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class, '#pop')
],
'duplicates': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'expression', '_expression')),
default('#pop')
],
'_object-head': [
(r'[%s]>' % _dash, Punctuation),
(r'(class|has|private|with)\b', Keyword.Declaration, '#pop'),
include('_global')
],
'object-body': [
include('_whitespace'),
(r';', Punctuation, '#pop:2'),
(r',', Punctuation),
(r'class\b', Keyword.Declaration, 'class-segment'),
(r'(has|private|with)\b', Keyword.Declaration),
(r':', Error),
default(('_object-expression', '_expression'))
],
'class-segment': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class),
default('value')
],
# Extend, Verb
'grammar': [
include('_whitespace'),
(r'=', Punctuation, ('#pop', 'default')),
(r'\*', Punctuation, ('#pop', 'grammar-line')),
default('_directive-keyword')
],
'grammar-line': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'[/*]', Punctuation),
(r'[%s]>' % _dash, Punctuation, 'value'),
(r'(noun|scope)\b', Keyword, '=routine'),
default('_directive-keyword')
],
'=routine': [
include('_whitespace'),
(r'=', Punctuation, 'routine-name?'),
default('#pop')
],
# Import
'manifest': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r',', Punctuation),
(r'(?i)global\b', Keyword, '_global'),
default('_global')
],
# Include, Link, Message
'diagnostic': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')),
default(('#pop', 'before-plain-string?', 'directive-keyword?'))
],
'before-plain-string?': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string')),
default('#pop')
],
'message-string': [
(r'[~^]+', String.Escape),
include('plain-string')
],
# Keywords used in directives
'_directive-keyword!': [
include('_whitespace'),
(words((
'additive', 'alias', 'buffer', 'class', 'creature', 'data', 'error', 'fatalerror',
'first', 'has', 'held', 'initial', 'initstr', 'last', 'long', 'meta', 'multi',
'multiexcept', 'multiheld', 'multiinside', 'noun', 'number', 'only', 'private',
'replace', 'reverse', 'scope', 'score', 'special', 'string', 'table', 'terminating',
'time', 'topic', 'warning', 'with'), suffix=r'\b'),
Keyword, '#pop'),
(r'static\b', Keyword),
(r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop')
],
'_directive-keyword': [
include('_directive-keyword!'),
include('value')
],
'directive-keyword?': [
include('_directive-keyword!'),
default('#pop')
],
'property-keyword*': [
include('_whitespace'),
(r'(additive|long)\b', Keyword),
default('#pop')
],
'trace-keyword?': [
include('_whitespace'),
(words((
'assembly', 'dictionary', 'expressions', 'lines', 'linker',
'objects', 'off', 'on', 'symbols', 'tokens', 'verbs'), suffix=r'\b'),
Keyword, '#pop'),
default('#pop')
],
# Statements
'statements': [
include('_whitespace'),
(r'\]', Punctuation, '#pop'),
(r'[;{}]', Punctuation),
(words((
'box', 'break', 'continue', 'default', 'give', 'inversion',
'new_line', 'quit', 'read', 'remove', 'return', 'rfalse', 'rtrue',
'spaces', 'string', 'until'), suffix=r'\b'),
Keyword, 'default'),
(r'(do|else)\b', Keyword),
(r'(font|style)\b', Keyword,
('default', 'miscellaneous-keyword?')),
(r'for\b', Keyword, ('for', '(?')),
(r'(if|switch|while)', Keyword,
('expression', '_expression', '(?')),
(r'(jump|save|restore)\b', Keyword, ('default', 'label?')),
(r'objectloop\b', Keyword,
('_keyword-expression', 'variable?', '(?')),
(r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'),
(r'\.', Name.Label, 'label?'),
(r'@', Keyword, 'opcode'),
(r'#(?![agrnw]\$|#)', Punctuation, 'directive'),
(r'<', Punctuation, 'default'),
(r'move\b', Keyword,
('default', '_keyword-expression', '_expression')),
default(('default', '_keyword-expression', '_expression'))
],
'miscellaneous-keyword?': [
include('_whitespace'),
(r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b',
Keyword, '#pop'),
(r'(a|A|an|address|char|name|number|object|property|string|the|'
r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo,
'#pop'),
(r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function,
'#pop'),
default('#pop')
],
'(?': [
include('_whitespace'),
(r'\(', Punctuation, '#pop'),
default('#pop')
],
'for': [
include('_whitespace'),
(r';', Punctuation, ('_for-expression', '_expression')),
default(('_for-expression', '_expression'))
],
'print-list': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r':', Error),
default(('_list-expression', '_expression', '_list-expression', 'form'))
],
'form': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')),
default('#pop')
],
# Assembly
'opcode': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')),
(_name, Keyword, 'operands')
],
'operands': [
(r':', Error),
default(('_assembly-expression', '_expression'))
]
}
def get_tokens_unprocessed(self, text):
# 'in' is either a keyword or an operator.
# If the token two tokens after 'in' is ')', 'in' is a keyword:
# objectloop(a in b)
# Otherwise, it is an operator:
# objectloop(a in b && true)
objectloop_queue = []
objectloop_token_count = -1
previous_token = None
for index, token, value in RegexLexer.get_tokens_unprocessed(self,
text):
if previous_token is Name.Variable and value == 'in':
objectloop_queue = [[index, token, value]]
objectloop_token_count = 2
elif objectloop_token_count > 0:
if token not in Comment and token not in Text:
objectloop_token_count -= 1
objectloop_queue.append((index, token, value))
else:
if objectloop_token_count == 0:
if objectloop_queue[-1][2] == ')':
objectloop_queue[0][1] = Keyword
while objectloop_queue:
yield objectloop_queue.pop(0)
objectloop_token_count = -1
yield index, token, value
if token not in Comment and token not in Text:
previous_token = token
while objectloop_queue:
yield objectloop_queue.pop(0)
def analyse_text(text):
"""We try to find a keyword which seem relatively common, unfortunately
there is a decent overlap with Smalltalk keywords otherwise here.."""
result = 0
if re.search('\borigsource\b', text, re.IGNORECASE):
result += 0.05
return result
class Inform7Lexer(RegexLexer):
"""
For `Inform 7 <http://inform7.com/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 7'
aliases = ['inform7', 'i7']
filenames = ['*.ni', '*.i7x']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_dash = Inform6Lexer._dash
_dquote = Inform6Lexer._dquote
_newline = Inform6Lexer._newline
_start = r'\A|(?<=[%s])' % _newline
# There are three variants of Inform 7, differing in how to
# interpret at signs and braces in I6T. In top-level inclusions, at
# signs in the first column are inweb syntax. In phrase definitions
# and use options, tokens in braces are treated as I7. Use options
# also interpret "{N}".
tokens = {}
token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
for level in token_variants:
tokens[level] = {
'+i6-root': list(Inform6Lexer.tokens['root']),
'+i6t-root': [ # For Inform6TemplateLexer
(r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc,
('directive', '+p'))
],
'root': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]' % _dquote, Generic.Heading,
('+main', '+titling', '+titling-string')),
default(('+main', '+heading?'))
],
'+titling-string': [
(r'[^%s]+' % _dquote, Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '#pop')
],
'+titling': [
(r'\[', Comment.Multiline, '+comment'),
(r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '+titling-string'),
(r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote),
Text, ('#pop', '+heading?')),
(r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'),
(r'[|%s]' % _newline, Generic.Heading)
],
'+main': [
(r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text),
(r'[%s]' % _dquote, String.Double, '+text'),
(r':', Text, '+phrase-definition'),
(r'(?i)\bas\b', Text, '+use-option'),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-not-inline'), Punctuation)),
(r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' %
(_start, _dquote, _newline), Text, '+heading?'),
(r'(?i)[a(|%s]' % _newline, Text)
],
'+phrase-definition': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive',
'default', 'statements'),
i6t='+i6t-inline'), Punctuation), '#pop'),
default('#pop')
],
'+use-option': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-use-option'), Punctuation), '#pop'),
default('#pop')
],
'+comment': [
(r'[^\[\]]+', Comment.Multiline),
(r'\[', Comment.Multiline, '#push'),
(r'\]', Comment.Multiline, '#pop')
],
'+text': [
(r'[^\[%s]+' % _dquote, String.Double),
(r'\[.*?\]', String.Interpol),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'+heading?': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'),
(r'[%s]{1,3}' % _dash, Text),
(r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline,
Generic.Heading, '#pop'),
default('#pop')
],
'+documentation-heading': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(?i)documentation\s+', Text, '+documentation-heading2'),
default('#pop')
],
'+documentation-heading2': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s' % _dash, Text, '+documentation'),
default('#pop:2')
],
'+documentation': [
(r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' %
(_start, _newline), Generic.Heading),
(r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline),
Generic.Subheading),
(r'((%s)\t.*?[%s])+' % (_start, _newline),
using(this, state='+main')),
(r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text),
(r'\[', Comment.Multiline, '+comment'),
],
'+i6t-not-inline': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p')
],
'+i6t-use-option': [
include('+i6t-not-inline'),
(r'(\{)(N)(\})', bygroups(Punctuation, Text, Punctuation))
],
'+i6t-inline': [
(r'(\{)(\S[^}]*)?(\})',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+i6t': [
(r'(\{[%s])(![^}]*)(\}?)' % _dash,
bygroups(Punctuation, Comment.Single, Punctuation)),
(r'(\{[%s])(lines)(:)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation), '+lines'),
(r'(\{[%s])([^:}]*)(:?)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation)),
(r'(\(\+)(.*?)(\+\)|\Z)',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+p': [
(r'[^@]+', Comment.Preproc),
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc, '#pop'),
(r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading),
(r'@', Comment.Preproc)
],
'+lines': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p'),
(r'(%s)@\w*[ %s]' % (_start, _newline), Keyword),
(r'![^%s]*' % _newline, Comment.Single),
(r'(\{)([%s]endlines)(\})' % _dash,
bygroups(Punctuation, Keyword, Punctuation), '#pop'),
(r'[^@!{]+?([%s]|\Z)|.' % _newline, Text)
]
}
# Inform 7 can include snippets of Inform 6 template language,
# so all of Inform6Lexer's states are copied here, with
# modifications to account for template syntax. Inform7Lexer's
# own states begin with '+' to avoid name conflicts. Some of
# Inform6Lexer's states begin with '_': these are not modified.
# They deal with template syntax either by including modified
# states, or by matching r'' then pushing to modified states.
for token in Inform6Lexer.tokens:
if token == 'root':
continue
tokens[level][token] = list(Inform6Lexer.tokens[token])
if not token.startswith('_'):
tokens[level][token][:0] = [include('+i6t'), include(level)]
def __init__(self, **options):
level = options.get('i6t', '+i6t-not-inline')
if level not in self._all_tokens:
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class Inform6TemplateLexer(Inform7Lexer):
"""
For `Inform 6 template
<http://inform7.com/sources/src/i6template/Woven/index.html>`_ code.
.. versionadded:: 2.0
"""
name = 'Inform 6 template'
aliases = ['i6t']
filenames = ['*.i6t']
def get_tokens_unprocessed(self, text, stack=('+i6t-root',)):
return Inform7Lexer.get_tokens_unprocessed(self, text, stack)
class Tads3Lexer(RegexLexer):
"""
For `TADS 3 <http://www.tads.org/>`_ source code.
"""
name = 'TADS 3'
aliases = ['tads3']
filenames = ['*.t']
flags = re.DOTALL | re.MULTILINE
_comment_single = r'(?://(?:[^\\\n]|\\+[\w\W])*$)'
_comment_multiline = r'(?:/\*(?:[^*]|\*(?!/))*\*/)'
_escape = (r'(?:\\(?:[\n\\<>"\'^v bnrt]|u[\da-fA-F]{,4}|x[\da-fA-F]{,2}|'
r'[0-3]?[0-7]{1,2}))')
_name = r'(?:[_a-zA-Z]\w*)'
_no_quote = r'(?=\s|\\?>)'
_operator = (r'(?:&&|\|\||\+\+|--|\?\?|::|[.,@\[\]~]|'
r'(?:[=+\-*/%!&|^]|<<?|>>?>?)=?)')
_ws = r'(?:\\|\s|%s|%s)' % (_comment_single, _comment_multiline)
_ws_pp = r'(?:\\\n|[^\S\n]|%s|%s)' % (_comment_single, _comment_multiline)
def _make_string_state(triple, double, verbatim=None, _escape=_escape):
if verbatim:
verbatim = ''.join(['(?:%s|%s)' % (re.escape(c.lower()),
re.escape(c.upper()))
for c in verbatim])
char = r'"' if double else r"'"
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
prefix = '%s%s' % ('t' if triple else '', 'd' if double else 's')
tag_state_name = '%sqt' % prefix
state = []
if triple:
state += [
(r'%s{3,}' % char, token, '#pop'),
(r'\\%s+' % char, String.Escape),
(char, token)
]
else:
state.append((char, token, '#pop'))
state += [
include('s/verbatim'),
(r'[^\\<&{}%s]+' % char, token)
]
if verbatim:
# This regex can't use `(?i)` because escape sequences are
# case-sensitive. `<\XMP>` works; `<\xmp>` doesn't.
state.append((r'\\?<(/|\\\\|(?!%s)\\)%s(?=[\s=>])' %
(_escape, verbatim),
Name.Tag, ('#pop', '%sqs' % prefix, tag_state_name)))
else:
state += [
(r'\\?<!([^><\\%s]|<(?!<)|\\%s%s|%s|\\.)*>?' %
(char, char, escaped_quotes, _escape), Comment.Multiline),
(r'(?i)\\?<listing(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/listing' % prefix, tag_state_name)),
(r'(?i)\\?<xmp(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/xmp' % prefix, tag_state_name)),
(r'\\?<([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)*' %
(char, char, escaped_quotes, _escape), Name.Tag,
tag_state_name),
include('s/entity')
]
state += [
include('s/escape'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'[\\&{}<]', token)
]
return state
def _make_tag_state(triple, double, _escape=_escape):
char = r'"' if double else r"'"
quantifier = r'{3,}' if triple else r''
state_name = '%s%sqt' % ('t' if triple else '', 'd' if double else 's')
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
return [
(r'%s%s' % (char, quantifier), token, '#pop:2'),
(r'(\s|\\\n)+', Text),
(r'(=)(\\?")', bygroups(Punctuation, String.Double),
'dqs/%s' % state_name),
(r"(=)(\\?')", bygroups(Punctuation, String.Single),
'sqs/%s' % state_name),
(r'=', Punctuation, 'uqs/%s' % state_name),
(r'\\?>', Name.Tag, '#pop'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)+' %
(char, char, escaped_quotes, _escape), Name.Attribute),
include('s/escape'),
include('s/verbatim'),
include('s/entity'),
(r'[\\{}&]', Name.Attribute)
]
def _make_attribute_value_state(terminator, host_triple, host_double,
_escape=_escape):
token = (String.Double if terminator == r'"' else
String.Single if terminator == r"'" else String.Other)
host_char = r'"' if host_double else r"'"
host_quantifier = r'{3,}' if host_triple else r''
host_token = String.Double if host_double else String.Single
escaped_quotes = (r'+|%s(?!%s{2})' % (host_char, host_char)
if host_triple else r'')
return [
(r'%s%s' % (host_char, host_quantifier), host_token, '#pop:3'),
(r'%s%s' % (r'' if token is String.Other else r'\\?', terminator),
token, '#pop'),
include('s/verbatim'),
include('s/entity'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(host_char, host_char, escaped_quotes, _escape), String.Interpol),
(r'([^\s"\'<%s{}\\&])+' % (r'>' if token is String.Other else r''),
token),
include('s/escape'),
(r'["\'\s&{<}\\]', token)
]
tokens = {
'root': [
('\ufeff', Text),
(r'\{', Punctuation, 'object-body'),
(r';+', Punctuation),
(r'(?=(argcount|break|case|catch|continue|default|definingobj|'
r'delegated|do|else|for|foreach|finally|goto|if|inherited|'
r'invokee|local|nil|new|operator|replaced|return|self|switch|'
r'targetobj|targetprop|throw|true|try|while)\b)', Text, 'block'),
(r'(%s)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?/root', 'more/parameters', 'main/parameters')),
include('whitespace'),
(r'\++', Punctuation),
(r'[^\s!"%-(*->@-_a-z{-~]+', Error), # Averts an infinite loop
(r'(?!\Z)', Text, 'main/root')
],
'main/root': [
include('main/basic'),
default(('#pop', 'object-body/no-braces', 'classes', 'class'))
],
'object-body/no-braces': [
(r';', Punctuation, '#pop'),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('object-body')
],
'object-body': [
(r';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r':', Punctuation, ('classes', 'class')),
(r'(%s?)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?', 'more/parameters', 'main/parameters')),
(r'(%s)(%s*)(\{)' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation), 'block'),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation),
('object-body/no-braces', 'classes', 'class')),
include('whitespace'),
(r'->|%s' % _operator, Punctuation, 'main'),
default('main/object-body')
],
'main/object-body': [
include('main/basic'),
(r'(%s)(%s*)(=?)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation), ('#pop', 'more', 'main')),
default('#pop:2')
],
'block?/root': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
(r'(?=[\[\'"<(:])', Text, # It might be a VerbRule macro.
('#pop', 'object-body/no-braces', 'grammar', 'grammar-rules')),
# It might be a macro like DefineAction.
default(('#pop', 'object-body/no-braces'))
],
'block?': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
default('#pop')
],
'block/basic': [
(r'[;:]+', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r'default\b', Keyword.Reserved),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Label, using(this, state='whitespace'),
Punctuation)),
include('whitespace')
],
'block': [
include('block/basic'),
(r'(?!\Z)', Text, ('more', 'main'))
],
'block/embed': [
(r'>>', String.Interpol, '#pop'),
include('block/basic'),
(r'(?!\Z)', Text, ('more/embed', 'main'))
],
'main/basic': [
include('whitespace'),
(r'\(', Punctuation, ('#pop', 'more', 'main')),
(r'\[', Punctuation, ('#pop', 'more/list', 'main')),
(r'\{', Punctuation, ('#pop', 'more/inner', 'main/inner',
'more/parameters', 'main/parameters')),
(r'\*|\.{3}', Punctuation, '#pop'),
(r'(?i)0x[\da-f]+', Number.Hex, '#pop'),
(r'(\d+\.(?!\.)\d*|\.\d+)([eE][-+]?\d+)?|\d+[eE][-+]?\d+',
Number.Float, '#pop'),
(r'0[0-7]+', Number.Oct, '#pop'),
(r'\d+', Number.Integer, '#pop'),
(r'"""', String.Double, ('#pop', 'tdqs')),
(r"'''", String.Single, ('#pop', 'tsqs')),
(r'"', String.Double, ('#pop', 'dqs')),
(r"'", String.Single, ('#pop', 'sqs')),
(r'R"""', String.Regex, ('#pop', 'tdqr')),
(r"R'''", String.Regex, ('#pop', 'tsqr')),
(r'R"', String.Regex, ('#pop', 'dqr')),
(r"R'", String.Regex, ('#pop', 'sqr')),
# Two-token keywords
(r'(extern)(%s+)(object\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved)),
(r'(function|method)(%s*)(\()' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Punctuation),
('#pop', 'block?', 'more/parameters', 'main/parameters')),
(r'(modify)(%s+)(grammar\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved),
('#pop', 'object-body/no-braces', ':', 'grammar')),
(r'(new)(%s+(?=(?:function|method)\b))' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'))),
(r'(object)(%s+)(template\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'template')),
(r'(string)(%s+)(template\b)' % _ws,
bygroups(Keyword, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'function-name')),
# Keywords
(r'(argcount|definingobj|invokee|replaced|targetobj|targetprop)\b',
Name.Builtin, '#pop'),
(r'(break|continue|goto)\b', Keyword.Reserved, ('#pop', 'label')),
(r'(case|extern|if|intrinsic|return|static|while)\b',
Keyword.Reserved),
(r'catch\b', Keyword.Reserved, ('#pop', 'catch')),
(r'class\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'class')),
(r'(default|do|else|finally|try)\b', Keyword.Reserved, '#pop'),
(r'(dictionary|property)\b', Keyword.Reserved,
('#pop', 'constants')),
(r'enum\b', Keyword.Reserved, ('#pop', 'enum')),
(r'export\b', Keyword.Reserved, ('#pop', 'main')),
(r'(for|foreach)\b', Keyword.Reserved,
('#pop', 'more/inner', 'main/inner')),
(r'(function|method)\b', Keyword.Reserved,
('#pop', 'block?', 'function-name')),
(r'grammar\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'grammar')),
(r'inherited\b', Keyword.Reserved, ('#pop', 'inherited')),
(r'local\b', Keyword.Reserved,
('#pop', 'more/local', 'main/local')),
(r'(modify|replace|switch|throw|transient)\b', Keyword.Reserved,
'#pop'),
(r'new\b', Keyword.Reserved, ('#pop', 'class')),
(r'(nil|true)\b', Keyword.Constant, '#pop'),
(r'object\b', Keyword.Reserved, ('#pop', 'object-body/no-braces')),
(r'operator\b', Keyword.Reserved, ('#pop', 'operator')),
(r'propertyset\b', Keyword.Reserved,
('#pop', 'propertyset', 'main')),
(r'self\b', Name.Builtin.Pseudo, '#pop'),
(r'template\b', Keyword.Reserved, ('#pop', 'template')),
# Operators
(r'(__objref|defined)(%s*)(\()' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator), ('#pop', 'more/__objref', 'main')),
(r'delegated\b', Operator.Word),
# Compiler-defined macros and built-in properties
(r'(__DATE__|__DEBUG|__LINE__|__FILE__|'
r'__TADS_MACRO_FORMAT_VERSION|__TADS_SYS_\w*|__TADS_SYSTEM_NAME|'
r'__TADS_VERSION_MAJOR|__TADS_VERSION_MINOR|__TADS3|__TIME__|'
r'construct|finalize|grammarInfo|grammarTag|lexicalParent|'
r'miscVocab|sourceTextGroup|sourceTextGroupName|'
r'sourceTextGroupOrder|sourceTextOrder)\b', Name.Builtin, '#pop')
],
'main': [
include('main/basic'),
(_name, Name, '#pop'),
default('#pop')
],
'more/basic': [
(r'\(', Punctuation, ('more/list', 'main')),
(r'\[', Punctuation, ('more', 'main')),
(r'\.{3}', Punctuation),
(r'->|\.\.', Punctuation, 'main'),
(r'(?=;)|[:)\]]', Punctuation, '#pop'),
include('whitespace'),
(_operator, Operator, 'main'),
(r'\?', Operator, ('main', 'more/conditional', 'main')),
(r'(is|not)(%s+)(in\b)' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator.Word)),
(r'[^\s!"%-_a-z{-~]+', Error) # Averts an infinite loop
],
'more': [
include('more/basic'),
default('#pop')
],
# Then expression (conditional operator)
'more/conditional': [
(r':(?!:)', Operator, '#pop'),
include('more')
],
# Embedded expressions
'more/embed': [
(r'>>', String.Interpol, '#pop:2'),
include('more')
],
# For/foreach loop initializer or short-form anonymous function
'main/inner': [
(r'\(', Punctuation, ('#pop', 'more/inner', 'main/inner')),
(r'local\b', Keyword.Reserved, ('#pop', 'main/local')),
include('main')
],
'more/inner': [
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, 'main/inner'),
(r'(in|step)\b', Keyword, 'main/inner'),
include('more')
],
# Local
'main/local': [
(_name, Name.Variable, '#pop'),
include('whitespace')
],
'more/local': [
(r',', Punctuation, 'main/local'),
include('more')
],
# List
'more/list': [
(r'[,:]', Punctuation, 'main'),
include('more')
],
# Parameter list
'main/parameters': [
(r'(%s)(%s*)(?=:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace')), '#pop'),
(r'(%s)(%s+)(%s)' % (_name, _ws, _name),
bygroups(Name.Class, using(this, state='whitespace'),
Name.Variable), '#pop'),
(r'\[+', Punctuation),
include('main/basic'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
'more/parameters': [
(r'(:)(%s*(?=[?=,:)]))' % _ws,
bygroups(Punctuation, using(this, state='whitespace'))),
(r'[?\]]+', Punctuation),
(r'[:)]', Punctuation, ('#pop', 'multimethod?')),
(r',', Punctuation, 'main/parameters'),
(r'=', Punctuation, ('more/parameter', 'main')),
include('more')
],
'more/parameter': [
(r'(?=[,)])', Text, '#pop'),
include('more')
],
'multimethod?': [
(r'multimethod\b', Keyword, '#pop'),
include('whitespace'),
default('#pop')
],
# Statements and expressions
'more/__objref': [
(r',', Punctuation, 'mode'),
(r'\)', Operator, '#pop'),
include('more')
],
'mode': [
(r'(error|warn)\b', Keyword, '#pop'),
include('whitespace')
],
'catch': [
(r'\(+', Punctuation),
(_name, Name.Exception, ('#pop', 'variables')),
include('whitespace')
],
'enum': [
include('whitespace'),
(r'token\b', Keyword, ('#pop', 'constants')),
default(('#pop', 'constants'))
],
'grammar': [
(r'\)+', Punctuation),
(r'\(', Punctuation, 'grammar-tag'),
(r':', Punctuation, 'grammar-rules'),
(_name, Name.Class),
include('whitespace')
],
'grammar-tag': [
include('whitespace'),
(r'"""([^\\"<]|""?(?!")|\\"+|\\.|<(?!<))+("{3,}|<<)|'
r'R"""([^\\"]|""?(?!")|\\"+|\\.)+"{3,}|'
r"'''([^\\'<]|''?(?!')|\\'+|\\.|<(?!<))+('{3,}|<<)|"
r"R'''([^\\']|''?(?!')|\\'+|\\.)+'{3,}|"
r'"([^\\"<]|\\.|<(?!<))+("|<<)|R"([^\\"]|\\.)+"|'
r"'([^\\'<]|\\.|<(?!<))+('|<<)|R'([^\\']|\\.)+'|"
r"([^)\s\\/]|/(?![/*]))+|\)", String.Other, '#pop')
],
'grammar-rules': [
include('string'),
include('whitespace'),
(r'(\[)(%s*)(badness)' % _ws,
bygroups(Punctuation, using(this, state='whitespace'), Keyword),
'main'),
(r'->|%s|[()]' % _operator, Punctuation),
(_name, Name.Constant),
default('#pop:2')
],
':': [
(r':', Punctuation, '#pop')
],
'function-name': [
(r'(<<([^>]|>>>|>(?!>))*>>)+', String.Interpol),
(r'(?=%s?%s*[({])' % (_name, _ws), Text, '#pop'),
(_name, Name.Function, '#pop'),
include('whitespace')
],
'inherited': [
(r'<', Punctuation, ('#pop', 'classes', 'class')),
include('whitespace'),
(_name, Name.Class, '#pop'),
default('#pop')
],
'operator': [
(r'negate\b', Operator.Word, '#pop'),
include('whitespace'),
(_operator, Operator),
default('#pop')
],
'propertyset': [
(r'\(', Punctuation, ('more/parameters', 'main/parameters')),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('whitespace')
],
'template': [
(r'(?=;)', Text, '#pop'),
include('string'),
(r'inherited\b', Keyword.Reserved),
include('whitespace'),
(r'->|\?|%s' % _operator, Punctuation),
(_name, Name.Variable)
],
# Identifiers
'class': [
(r'\*|\.{3}', Punctuation, '#pop'),
(r'object\b', Keyword.Reserved, '#pop'),
(r'transient\b', Keyword.Reserved),
(_name, Name.Class, '#pop'),
include('whitespace'),
default('#pop')
],
'classes': [
(r'[:,]', Punctuation, 'class'),
include('whitespace'),
(r'>', Punctuation, '#pop'),
default('#pop')
],
'constants': [
(r',+', Punctuation),
(r';', Punctuation, '#pop'),
(r'property\b', Keyword.Reserved),
(_name, Name.Constant),
include('whitespace')
],
'label': [
(_name, Name.Label, '#pop'),
include('whitespace'),
default('#pop')
],
'variables': [
(r',+', Punctuation),
(r'\)', Punctuation, '#pop'),
include('whitespace'),
(_name, Name.Variable)
],
# Whitespace and comments
'whitespace': [
(r'^%s*#(%s|[^\n]|(?<=\\)\n)*\n?' % (_ws_pp, _comment_multiline),
Comment.Preproc),
(_comment_single, Comment.Single),
(_comment_multiline, Comment.Multiline),
(r'\\+\n+%s*#?|\n+|([^\S\n]|\\)+' % _ws_pp, Text)
],
# Strings
'string': [
(r'"""', String.Double, 'tdqs'),
(r"'''", String.Single, 'tsqs'),
(r'"', String.Double, 'dqs'),
(r"'", String.Single, 'sqs')
],
's/escape': [
(r'\{\{|\}\}|%s' % _escape, String.Escape)
],
's/verbatim': [
(r'<<\s*(as\s+decreasingly\s+likely\s+outcomes|cycling|else|end|'
r'first\s+time|one\s+of|only|or|otherwise|'
r'(sticky|(then\s+)?(purely\s+)?at)\s+random|stopping|'
r'(then\s+)?(half\s+)?shuffled|\|\|)\s*>>', String.Interpol),
(r'<<(%%(_(%s|\\?.)|[\-+ ,#]|\[\d*\]?)*\d*\.?\d*(%s|\\?.)|'
r'\s*((else|otherwise)\s+)?(if|unless)\b)?' % (_escape, _escape),
String.Interpol, ('block/embed', 'more/embed', 'main'))
],
's/entity': [
(r'(?i)&(#(x[\da-f]+|\d+)|[a-z][\da-z]*);?', Name.Entity)
],
'tdqs': _make_string_state(True, True),
'tsqs': _make_string_state(True, False),
'dqs': _make_string_state(False, True),
'sqs': _make_string_state(False, False),
'tdqs/listing': _make_string_state(True, True, 'listing'),
'tsqs/listing': _make_string_state(True, False, 'listing'),
'dqs/listing': _make_string_state(False, True, 'listing'),
'sqs/listing': _make_string_state(False, False, 'listing'),
'tdqs/xmp': _make_string_state(True, True, 'xmp'),
'tsqs/xmp': _make_string_state(True, False, 'xmp'),
'dqs/xmp': _make_string_state(False, True, 'xmp'),
'sqs/xmp': _make_string_state(False, False, 'xmp'),
# Tags
'tdqt': _make_tag_state(True, True),
'tsqt': _make_tag_state(True, False),
'dqt': _make_tag_state(False, True),
'sqt': _make_tag_state(False, False),
'dqs/tdqt': _make_attribute_value_state(r'"', True, True),
'dqs/tsqt': _make_attribute_value_state(r'"', True, False),
'dqs/dqt': _make_attribute_value_state(r'"', False, True),
'dqs/sqt': _make_attribute_value_state(r'"', False, False),
'sqs/tdqt': _make_attribute_value_state(r"'", True, True),
'sqs/tsqt': _make_attribute_value_state(r"'", True, False),
'sqs/dqt': _make_attribute_value_state(r"'", False, True),
'sqs/sqt': _make_attribute_value_state(r"'", False, False),
'uqs/tdqt': _make_attribute_value_state(_no_quote, True, True),
'uqs/tsqt': _make_attribute_value_state(_no_quote, True, False),
'uqs/dqt': _make_attribute_value_state(_no_quote, False, True),
'uqs/sqt': _make_attribute_value_state(_no_quote, False, False),
# Regular expressions
'tdqr': [
(r'[^\\"]+', String.Regex),
(r'\\"*', String.Regex),
(r'"{3,}', String.Regex, '#pop'),
(r'"', String.Regex)
],
'tsqr': [
(r"[^\\']+", String.Regex),
(r"\\'*", String.Regex),
(r"'{3,}", String.Regex, '#pop'),
(r"'", String.Regex)
],
'dqr': [
(r'[^\\"]+', String.Regex),
(r'\\"?', String.Regex),
(r'"', String.Regex, '#pop')
],
'sqr': [
(r"[^\\']+", String.Regex),
(r"\\'?", String.Regex),
(r"'", String.Regex, '#pop')
]
}
def get_tokens_unprocessed(self, text, **kwargs):
pp = r'^%s*#%s*' % (self._ws_pp, self._ws_pp)
if_false_level = 0
for index, token, value in (
RegexLexer.get_tokens_unprocessed(self, text, **kwargs)):
if if_false_level == 0: # Not in a false #if
if (token is Comment.Preproc and
re.match(r'%sif%s+(0|nil)%s*$\n?' %
(pp, self._ws_pp, self._ws_pp), value)):
if_false_level = 1
else: # In a false #if
if token is Comment.Preproc:
if (if_false_level == 1 and
re.match(r'%sel(if|se)\b' % pp, value)):
if_false_level = 0
elif re.match(r'%sif' % pp, value):
if_false_level += 1
elif re.match(r'%sendif\b' % pp, value):
if_false_level -= 1
else:
token = Comment
yield index, token, value
def analyse_text(text):
"""This is a rather generic descriptive language without strong
identifiers. It looks like a 'GameMainDef' has to be present,
and/or a 'versionInfo' with an 'IFID' field."""
result = 0
if '__TADS' in text or 'GameMainDef' in text:
result += 0.2
# This is a fairly unique keyword which is likely used in source as well
if 'versionInfo' in text and 'IFID' in text:
result += 0.1
return result
|
|
# Copyright 2015, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pprint
import time
import uuid
import neutronclient.neutron.client as neutron_client
import novaclient.client as nova_client
import novaclient.exceptions as nova_exceptions
import a10_neutron_lbaas.a10_exceptions as a10_ex
import a10_neutron_lbaas.vthunder.keystone as a10_keystone
pp = pprint.PrettyPrinter(indent=4)
LOG = logging.getLogger(__name__)
CREATE_TIMEOUT = 900
# TODO(mdurrant) - These may need to go into a configuration file.
GLANCE_VERSION = 2
KEYSTONE_VERSION = "2.0"
NOVA_VERSION = "2.1"
NEUTRON_VERSION = "2.0"
OS_INTERFACE_URLS = ["public", "publicURL"]
_default_server = {
"id": None,
"name": None,
"image": None,
"flavor": None,
"meta": {},
"files": {},
"min_count": 1, # optional extension
"max_count": 1, # optional extension
"security_groups": [],
"userdata": None,
"key_name": None, # optional extension
"availability_zone": None,
"block_device_mapping": None, # optional extension
"block_device_mapping_v2": None, # optional extension
"scheduler_hints": {}, # optional extension
"config_drive": False, # optional extension
"disk_config": "AUTO", # AUTO or MANUAL # optional extension
"admin_pass": None # optional extension
}
MISSING_ERR_FORMAT = "{0} with name or id {1} could not be found"
class InstanceManager(object):
def __init__(self, ks_session, network_ks_session=None,
nova_api=None, nova_version=NOVA_VERSION,
glance_api=None, neutron_api=None):
# This is the keystone session that we use for spawning instances,
# aka our "service tenant" user.
self._ks_session = ks_session
# And this is the keystone session that we use for finding the network
# that we are going to plumb into, aka the "end user".
if network_ks_session is not None:
self._network_ks_session = network_ks_session
else:
self._network_ks_session = ks_session
# Yes, we really want both of these to use the "service tenant".
self._nova_api = nova_api or nova_client.Client(
nova_version, session=self._ks_session)
self._neutron_api = neutron_api or neutron_client.Client(
NEUTRON_VERSION, session=self._ks_session)
@classmethod
def _factory_with_service_tenant(cls, config, user_keystone_session):
ks = user_keystone_session
vth = config.get_vthunder_config()
if 'service_tenant' in vth:
service_ks = a10_keystone.KeystoneFromConfig(config)
else:
service_ks = ks
nova_version = config.get('nova_api_version')
return InstanceManager(
ks_session=service_ks.session, network_ks_session=ks.session,
nova_version=nova_version)
@classmethod
def from_config(cls, config, openstack_context=None):
ks = a10_keystone.KeystoneFromContext(config, openstack_context)
return cls._factory_with_service_tenant(config, ks)
@classmethod
def from_cmdline(cls, config, tenant_name, username, password):
ks = a10_keystone.KeystoneFromPassword(config, tenant_name, username, password)
return cls._factory_with_service_tenant(config, ks)
def _build_server(self, instance):
retval = {}
for k in _default_server:
retval[k] = instance.get(k, _default_server[k])
retval['name'] = retval['name'] or 'a10-' + str(uuid.uuid4())
return retval
def list_instances(self, detailed=True, search_opts=None,
marker=None, limit=None, sort_keys=None, sort_dirs=None):
return self._nova_api.servers.list(detailed, search_opts, marker, limit,
sort_keys, sort_dirs)
def create_instance(self, context):
return self._create_instance(context)
def _get_ip_addresses_from_instance(self, addresses, mgmt_network_name):
address_block = addresses[mgmt_network_name]
v4addresses = filter(lambda x: x["version"] == 4,
address_block)
return v4addresses[0]["addr"]
def _create_instance(self, context):
server = self._build_server(context)
image_id = context.get("image", None)
flavor_id = context.get("flavor", None)
net_ids = context.get("networks")
image = self.get_image(identifier=image_id)
flavor = self.get_flavor(identifier=flavor_id)
networks = self.get_networks(net_ids)
if image is None:
raise a10_ex.ImageNotFoundError(MISSING_ERR_FORMAT.format("Image", image_id))
if flavor is None:
raise a10_ex.FlavorNotFoundError(MISSING_ERR_FORMAT.format("Flavor", flavor_id))
if networks is None:
msg = map(lambda x: MISSING_ERR_FORMAT.format("Network", x), net_ids)
raise a10_ex.NetworksNotFoundError(msg)
server["image"] = image.id
server["flavor"] = flavor.id
server["nics"] = [{'net-id': x['id']} for x in networks]
created_instance = self._nova_api.servers.create(**server)
# Next 6 lines - Added due to insane API on the other side
if hasattr(created_instance.manager, 'client'):
# This craziness works around a bug in Liberty.
created_instance.manager.client.last_request_id = None
self._create_server_spinlock(created_instance)
# Get the IP address of the first interface (should be management)
ip_address = self._get_ip_addresses_from_instance(
created_instance.addresses, networks[0]['name'])
return {
'name': server['name'],
'instance': created_instance,
'ip_address': ip_address,
'nova_instance_id': created_instance.id
}
def _create_server_spinlock(self, created_instance):
created_id = created_instance.id
timeout = False
start_time = time.time()
sleep_time = 1
pending_statuses = ["INITALIZED"]
active_statuses = ["ACTIVE"]
fatal_statuses = ["ERROR",
"SOFT_DELETED",
"HARD_DELETED",
"STOPPED",
"PAUSED"]
while not timeout:
get_instance = self._nova_api.servers.get(created_id)
vm_state = getattr(get_instance, "OS-EXT-STS:vm_state").upper()
end_time = time.time()
if ((get_instance.id == created_id and len(get_instance.addresses) > 0
and vm_state in active_statuses + pending_statuses)):
timeout = True
break
elif vm_state in fatal_statuses:
raise Exception("Instance created in error state %s" % (vm_state))
break
if end_time - start_time > CREATE_TIMEOUT:
timeout = True
raise Exception("Timed out creating instance.")
break
time.sleep(sleep_time)
def delete_instance(self, instance_id):
try:
return self._nova_api.servers.delete(instance_id)
except nova_exceptions.NotFound:
pass
def get_instance(self, instance):
return self._nova_api.servers.get(instance)
def get_flavor(self, identifier=None):
result = None
if identifier is None:
raise a10_ex.IdentifierUnspecifiedError(
"Parameter identifier must specify flavor id or name")
flavor_filter = (lambda x: x is not None and
((hasattr(x, "name") and x.name == identifier)
or (hasattr(x, "id") and x.id == identifier)))
flavors = self._nova_api.flavors.list()
filtered = filter(flavor_filter, flavors)
# TODO(mdurrant): What if we accidentally hit multiple flavors?
if filtered and len(filtered) > 0:
result = filtered[0]
return result
def get_image(self, identifier=None):
result = None
images = []
if identifier is None:
raise a10_ex.IdentifierUnspecifiedError(
"Parameter identifier must specify image id or name")
img_filter = (lambda x: x is not None and
((hasattr(x, "name") and x.name is not None and identifier in x.name)
or (hasattr(x, "id") and x.id == identifier)))
try:
images = self._nova_api.images.list()
except Exception as ex:
raise a10_ex.ImageNotFoundError(
"Unable to retrieve images from nova. Error %s" % (ex))
filtered = filter(img_filter, images)
if filtered:
result = filtered[0]
return result
def _handle_missing_networks(self, not_found):
msg_format = "Network {0} was not found by ID or name."
msgs = []
for net in not_found:
msgs.append(msg_format.format(net))
ex_msg = "\n".join(msgs)
LOG.exception(ex_msg)
raise a10_ex.NetworksNotFoundError(ex_msg)
def _get_networks(self, session, networks=[]):
network_list = {"networks": []}
net_list = []
if networks is None or len(networks) < 1:
raise a10_ex.IdentifierUnspecifiedError(
"Parameter networks must be specified.")
try:
# Lookup as user, since names are not unique
q_api = neutron_client.Client(NEUTRON_VERSION, session=session)
network_list = q_api.list_networks()
net_list = network_list.get("networks", [])
# TODO(mdurrant) - Create specific exceptions.
except Exception as ex:
LOG.exception(
"Unable to retrieve networks from neutron.\nError %s" % (ex))
# TODO(mdurrant-jk-cshock) - Look up networks by name too
id_func = (lambda x: x.get("net-id",
x.get("uuid", x.get("id"))) if x is not None else None)
networks_by_id = dict((id_func(x), x) for x in net_list)
networks_by_name = dict((x.get("name"), x) for x in net_list)
available_networks = networks_by_name.copy()
available_networks.update(networks_by_id)
missing_networks = [x for x in networks if x not in available_networks.keys()]
if any(missing_networks):
self._handle_missing_networks(missing_networks)
return [{
'id': id_func(available_networks[x]),
'name': available_networks[x].get('name', '')
} for x in networks]
def get_networks(self, networks=[]):
if self._ks_session != self._network_ks_session:
mgmt = self._get_networks(self._ks_session, networks[:1])
data = self._get_networks(self._network_ks_session, networks[1:])
return mgmt + data
else:
return self._get_networks(self._ks_session, networks)
def _device_instance(self, vthunder_config, name=None):
# Pick an image, any image
image_id = vthunder_config['glance_image']
if image_id is None:
raise a10_ex.FeatureNotConfiguredError("Launching instance requires configured image")
# Get the flavor from config
flavor = vthunder_config['nova_flavor']
if flavor is None:
raise a10_ex.FeatureNotConfiguredError("Launching instance requires configured flavor")
mgmt_network = vthunder_config.get("vthunder_management_network")
networks = [mgmt_network] if mgmt_network else []
networks += vthunder_config.get('vthunder_data_networks')
if networks is None or len(networks) < 1:
raise a10_ex.FeatureNotConfiguredError(
"Launching instance requires configured networks")
return {
'name': name,
'image': image_id,
'flavor': flavor,
'networks': networks
}
def create_device_instance(self, vthunder_config, name=None):
instance_configuration = self._device_instance(vthunder_config, name=name)
return self._create_instance(instance_configuration)
def _plumb_port(self, server, network_id, wrong_ips):
"""Look for an existing port on the network
Add one if it doesn't exist
"""
for attached_interface in server.interface_list():
if attached_interface.net_id == network_id:
if any(map(lambda x: x['ip_address'] in wrong_ips, attached_interface.fixed_ips)):
continue
return attached_interface
return server.interface_attach(None, network_id, None)
def plumb_instance(self, instance_id, network_id, allowed_ips, wrong_ips=[]):
server = self._nova_api.servers.get(instance_id)
interface = self._plumb_port(server, network_id, wrong_ips=wrong_ips)
port = self._neutron_api.show_port(interface.port_id)
allowed_address_pairs = port["port"].get("allowed_address_pairs", [])
new_address_pairs = map(lambda ip: {"ip_address": ip}, allowed_ips)
merged_address_pairs = distinct_dicts(allowed_address_pairs + new_address_pairs)
self._neutron_api.update_port(interface.port_id, {
"port": {
"allowed_address_pairs": merged_address_pairs
}
})
return interface.fixed_ips[0]['ip_address']
def plumb_instance_subnet(self, instance_id, subnet_id, allowed_ips, wrong_ips=[]):
subnet = self._neutron_api.show_subnet(subnet_id)
network_id = subnet["subnet"]["network_id"]
return self.plumb_instance(instance_id, network_id, allowed_ips, wrong_ips=wrong_ips)
def distinct_dicts(dicts):
hashable = map(lambda x: tuple(sorted(x.items())), dicts)
return map(dict, set(hashable))
|
|
import unittest
from harparser import HAR
class TestLog(unittest.TestCase):
def test_json_scaffold_on_derived_classes(self):
class derived(HAR.log):
pass
tmp = derived({
"version":"1.2",
"creator":{"name":"MITMPROXY HARExtractor","version":"0.1","comment":""},
"pages":[],
"entries":[]
})
assert tmp.json().startswith('{"log":')
def test_json_scaffold_on_exclusive_params(self):
class derived(HAR.log):
pass
tmp = derived({
"version": "1.2",
"creator": {
"name": "WebInspector",
"version": "537.1"
},
"pages": [
{
"startedDateTime": "2012-08-28T05:14:24.803Z",
"id": "page_1",
"title": "http://www.igvita.com/",
"pageTimings": {
"onContentLoad": 299,
"onLoad": 301
}
}
],
"entries": [
{
"startedDateTime": "2012-08-28T05:14:24.803Z",
"time": 121,
"request": {
"method": "POST",
"url": "http://www.igvita.com/",
"httpVersion": "HTTP/1.1",
"postData": {
"comment": "hello world",
"mimeType": "multipart/form-data",
"text": "foo=bar",
},
"headers": [
{
"name": "Accept-Encoding",
"value": "gzip,deflate,sdch"
},
{
"name": "Accept-Language",
"value": "en-US,en;q=0.8"
},
{
"name": "Connection",
"value": "keep-alive"
},
{
"name": "Accept-Charset",
"value": "ISO-8859-1,utf-8;q=0.7,*;q=0.3"
},
{
"name": "Host",
"value": "www.igvita.com"
},
{
"name": "User-Agent",
"value": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.82 Safari/537.1"
},
{
"name": "Accept",
"value": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
},
{
"name": "Cache-Control",
"value": "max-age=0"
}
],
"queryString": [],
"cookies": [
],
"headersSize": 678,
"bodySize": 0
},
"response": {
"status": 200,
"statusText": "OK",
"httpVersion": "HTTP/1.1",
"headers": [
{
"name": "Date",
"value": "Tue, 28 Aug 2012 05:14:24 GMT"
},
{
"name": "Via",
"value": "HTTP/1.1 GWA"
},
{
"name": "Transfer-Encoding",
"value": "chunked"
},
{
"name": "Content-Encoding",
"value": "gzip"
},
{
"name": "X-XSS-Protection",
"value": "1; mode=block"
},
{
"name": "X-UA-Compatible",
"value": "IE=Edge,chrome=1"
},
{
"name": "X-Page-Speed",
"value": "50_1_cn"
},
{
"name": "Server",
"value": "nginx/1.0.11"
},
{
"name": "Vary",
"value": "Accept-Encoding"
},
{
"name": "Content-Type",
"value": "text/html; charset=utf-8"
},
{
"name": "Cache-Control",
"value": "max-age=0, no-cache"
},
{
"name": "Expires",
"value": "Tue, 28 Aug 2012 05:14:24 GMT"
}
],
"cookies": [],
"content": {
"size": 9521,
"mimeType": "text/html",
"compression": 5896
},
"redirectURL": "",
"headersSize": 379,
"bodySize": 3625
},
"cache": {},
"timings": {
"blocked": 0,
"dns": -1,
"connect": -1,
"send": 1,
"wait": 112,
"receive": 6,
"ssl": -1
},
"pageref": "page_1"
},
{
"startedDateTime": "2012-08-28T05:14:25.011Z",
"time": 10,
"request": {
"method": "GET",
"url": "http://fonts.googleapis.com/css?family=Open+Sans:400,600",
"httpVersion": "HTTP/1.1",
"headers": [],
"queryString": [
{
"name": "family",
"value": "Open+Sans:400,600"
}
],
"cookies": [],
"headersSize": 71,
"bodySize": 0
},
"response": {
"status": 200,
"statusText": "OK",
"httpVersion": "HTTP/1.1",
"headers": [],
"cookies": [],
"content": {
"size": 542,
"mimeType": "text/css"
},
"redirectURL": "",
"headersSize": 17,
"bodySize": 0
},
"cache": {},
"timings": {
"blocked": 0,
"dns": -1,
"connect": -1,
"send": -1,
"wait": -1,
"receive": 2,
"ssl": -1
},
"pageref": "page_1"
},
{
"startedDateTime": "2012-08-28T05:14:25.017Z",
"time": 31,
"request": {
"method": "GET",
"url": "http://1-ps.googleusercontent.com/h/www.igvita.com/css/style.css.pagespeed.ce.LzjUDNB25e.css",
"httpVersion": "HTTP/1.1",
"headers": [
{
"name": "Accept-Encoding",
"value": "gzip,deflate,sdch"
},
{
"name": "Accept-Language",
"value": "en-US,en;q=0.8"
},
{
"name": "Connection",
"value": "keep-alive"
},
{
"name": "If-Modified-Since",
"value": "Mon, 27 Aug 2012 15:28:34 GMT"
},
{
"name": "Accept-Charset",
"value": "ISO-8859-1,utf-8;q=0.7,*;q=0.3"
},
{
"name": "Host",
"value": "1-ps.googleusercontent.com"
},
{
"name": "User-Agent",
"value": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.82 Safari/537.1"
},
{
"name": "Accept",
"value": "text/css,*/*;q=0.1"
},
{
"name": "Cache-Control",
"value": "max-age=0"
},
{
"name": "If-None-Match",
"value": "W/0"
},
{
"name": "Referer",
"value": "http://www.igvita.com/"
}
],
"queryString": [],
"cookies": [],
"headersSize": 539,
"bodySize": 0
},
"response": {
"status": 304,
"statusText": "Not Modified",
"httpVersion": "HTTP/1.1",
"headers": [
{
"name": "Date",
"value": "Mon, 27 Aug 2012 06:01:49 GMT"
},
{
"name": "Age",
"value": "83556"
},
{
"name": "Server",
"value": "GFE/2.0"
},
{
"name": "ETag",
"value": "W/0"
},
{
"name": "Expires",
"value": "Tue, 27 Aug 2013 06:01:49 GMT"
}
],
"cookies": [],
"content": {
"size": 14679,
"mimeType": "text/css"
},
"redirectURL": "",
"headersSize": 146,
"bodySize": 0
},
"cache": {},
"timings": {
"blocked": 0,
"dns": -1,
"connect": -1,
"send": 1,
"wait": 24,
"receive": 2,
"ssl": -1
},
"pageref": "page_1"
},
{
"startedDateTime": "2012-08-28T05:14:25.021Z",
"time": 30,
"request": {
"method": "GET",
"url": "http://1-ps.googleusercontent.com/h/www.igvita.com/js/libs/modernizr.84728.js.pagespeed.jm._DgXLhVY42.js",
"httpVersion": "HTTP/1.1",
"headers": [
{
"name": "Accept-Encoding",
"value": "gzip,deflate,sdch"
},
{
"name": "Accept-Language",
"value": "en-US,en;q=0.8"
},
{
"name": "Connection",
"value": "keep-alive"
},
{
"name": "If-Modified-Since",
"value": "Sat, 25 Aug 2012 14:30:37 GMT"
},
{
"name": "Accept-Charset",
"value": "ISO-8859-1,utf-8;q=0.7,*;q=0.3"
},
{
"name": "Host",
"value": "1-ps.googleusercontent.com"
},
{
"name": "User-Agent",
"value": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.82 Safari/537.1"
},
{
"name": "Accept",
"value": "*/*"
},
{
"name": "Cache-Control",
"value": "max-age=0"
},
{
"name": "If-None-Match",
"value": "W/0"
},
{
"name": "Referer",
"value": "http://www.igvita.com/"
}
],
"queryString": [],
"cookies": [],
"headersSize": 536,
"bodySize": 0
},
"response": {
"status": 304,
"statusText": "Not Modified",
"httpVersion": "HTTP/1.1",
"headers": [
{
"name": "Date",
"value": "Sat, 25 Aug 2012 14:30:37 GMT"
},
{
"name": "Age",
"value": "225828"
},
{
"name": "Server",
"value": "GFE/2.0"
},
{
"name": "ETag",
"value": "W/0"
},
{
"name": "Expires",
"value": "Sun, 25 Aug 2013 14:30:37 GMT"
}
],
"cookies": [],
"content": {
"size": 11831,
"mimeType": "text/javascript"
},
"redirectURL": "",
"headersSize": 147,
"bodySize": 0
},
"cache": {},
"timings": {
"blocked": 0,
"dns": -1,
"connect": 0,
"send": 1,
"wait": 27,
"receive": 1,
"ssl": -1
},
"pageref": "page_1"
},
{
"startedDateTime": "2012-08-28T05:14:25.103Z",
"time": 0,
"request": {
"method": "GET",
"url": "http://www.google-analytics.com/ga.js",
"httpVersion": "HTTP/1.1",
"headers": [],
"queryString": [],
"cookies": [],
"headersSize": 52,
"bodySize": 0
},
"response": {
"status": 200,
"statusText": "OK",
"httpVersion": "HTTP/1.1",
"headers": [
{
"name": "Date",
"value": "Mon, 27 Aug 2012 21:57:00 GMT"
},
{
"name": "Content-Encoding",
"value": "gzip"
},
{
"name": "X-Content-Type-Options",
"value": "nosniff, nosniff"
},
{
"name": "Age",
"value": "23052"
},
{
"name": "Last-Modified",
"value": "Thu, 16 Aug 2012 07:05:05 GMT"
},
{
"name": "Server",
"value": "GFE/2.0"
},
{
"name": "Vary",
"value": "Accept-Encoding"
},
{
"name": "Content-Type",
"value": "text/javascript"
},
{
"name": "Expires",
"value": "Tue, 28 Aug 2012 09:57:00 GMT"
},
{
"name": "Cache-Control",
"value": "max-age=43200, public"
},
{
"name": "Content-Length",
"value": "14804"
}
],
"cookies": [],
"content": {
"size": 36893,
"mimeType": "text/javascript"
},
"redirectURL": "",
"headersSize": 17,
"bodySize": 0
},
"cache": {},
"timings": {
"blocked": 0,
"dns": -1,
"connect": -1,
"send": -1,
"wait": -1,
"receive": 0,
"ssl": -1
},
"pageref": "page_1"
},
{
"startedDateTime": "2012-08-28T05:14:25.123Z",
"time": 91,
"request": {
"method": "GET",
"url": "http://1-ps.googleusercontent.com/beacon?org=50_1_cn&ets=load:93&ifr=0&hft=32&url=http%3A%2F%2Fwww.igvita.com%2F",
"httpVersion": "HTTP/1.1",
"headers": [
{
"name": "Accept-Encoding",
"value": "gzip,deflate,sdch"
},
{
"name": "Accept-Language",
"value": "en-US,en;q=0.8"
},
{
"name": "Connection",
"value": "keep-alive"
},
{
"name": "Accept-Charset",
"value": "ISO-8859-1,utf-8;q=0.7,*;q=0.3"
},
{
"name": "Host",
"value": "1-ps.googleusercontent.com"
},
{
"name": "User-Agent",
"value": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.82 Safari/537.1"
},
{
"name": "Accept",
"value": "*/*"
},
{
"name": "Referer",
"value": "http://www.igvita.com/"
}
],
"queryString": [
{
"name": "org",
"value": "50_1_cn"
},
{
"name": "ets",
"value": "load:93"
},
{
"name": "ifr",
"value": "0"
},
{
"name": "hft",
"value": "32"
},
{
"name": "url",
"value": "http%3A%2F%2Fwww.igvita.com%2F"
}
],
"cookies": [],
"headersSize": 448,
"bodySize": 0
},
"response": {
"status": 204,
"statusText": "No Content",
"httpVersion": "HTTP/1.1",
"headers": [
{
"name": "Date",
"value": "Tue, 28 Aug 2012 05:14:25 GMT"
},
{
"name": "Content-Length",
"value": "0"
},
{
"name": "X-XSS-Protection",
"value": "1; mode=block"
},
{
"name": "Server",
"value": "PagespeedRewriteProxy 0.1"
},
{
"name": "Content-Type",
"value": "text/plain"
},
{
"name": "Cache-Control",
"value": "no-cache"
}
],
"cookies": [],
"content": {
"size": 0,
"mimeType": "text/plain",
"compression": 0
},
"redirectURL": "",
"headersSize": 202,
"bodySize": 0
},
"cache": {},
"timings": {
"blocked": 0,
"dns": -1,
"connect": -1,
"send": 0,
"wait": 70,
"receive": 7,
"ssl": -1
},
"pageref": "page_1"
}
]
}
)
assert tmp.json().startswith('{"log":')
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.ctdpf_ckl_wfp_sio_mule
@file marine-integrations/mi/dataset/parser/ctdpf_ckl_wfp_sio.py
@author cgoodrich
@brief Parser for the ctdpf_ckl_wfp_sio dataset driver
Release notes:
Initial Release
"""
__author__ = 'cgoodrich'
__license__ = 'Apache 2.0'
import re
import struct
import ntplib
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.dataset_data_particle import DataParticle, DataParticleKey
from mi.dataset.parser.sio_mule_common import SioParser
DATA_RECORD_BYTES = 11 # Number of bytes in a WC-type file
TIME_RECORD_BYTES = 8 # Two four byte timestamps
ETX_BYTE = 1 # The 1 byte ETX marker (\x03)
HEADER_BYTES = 33 # Number of bytes in the SIO header
DECIMATION_SPACER = 2 # This may or may not be present in the input stream
FOOTER_BYTES = DATA_RECORD_BYTES + TIME_RECORD_BYTES + DECIMATION_SPACER + ETX_BYTE
WC_HEADER_REGEX = b'\x01(WC)[0-9]{7}_([0-9a-fA-F]{4})[a-zA-Z]([0-9a-fA-F]{8})_([0-9a-fA-F]{2})_([0-9a-fA-F]{4})\x02'
WC_HEADER_MATCHER = re.compile(WC_HEADER_REGEX)
STD_EOP_REGEX = b'(\xFF{11})([\x00-\xFF]{8})\x03'
STD_EOP_MATCHER = re.compile(STD_EOP_REGEX)
DECI_EOP_REGEX = b'(\xFF{11})([\x00-\xFF]{8})([\x00-\xFF]{2})\x03'
DECI_EOP_MATCHER = re.compile(DECI_EOP_REGEX)
DATA_REGEX = b'([\x00-\xFF]{11})'
DATA_MATCHER = re.compile(DATA_REGEX)
EOP_REGEX = b'(\xFF{11})'
EOP_MATCHER = re.compile(EOP_REGEX)
class DataParticleType(BaseEnum):
DATA = 'ctdpf_ckl_wfp_instrument'
METADATA = 'ctdpf_ckl_wfp_sio_mule_metadata'
RECOVERED_DATA = 'ctdpf_ckl_wfp_instrument_recovered'
RECOVERED_METADATA = 'ctdpf_ckl_wfp_metadata_recovered'
class CtdpfCklWfpSioDataParticleKey(BaseEnum):
CONDUCTIVITY = 'conductivity'
TEMPERATURE = 'temperature'
PRESSURE = 'pressure'
class CtdpfCklWfpSioMetadataParticleKey(BaseEnum):
WFP_TIME_ON = 'wfp_time_on'
WFP_TIME_OFF = 'wfp_time_off'
WFP_NUMBER_SAMPLES = 'wfp_number_samples'
WFP_DECIMATION_FACTOR = 'wfp_decimation_factor'
class CtdpfCklWfpSioDataParticle(DataParticle):
"""
Class for creating the data particle
"""
_data_particle_type = DataParticleType.DATA
def _build_parsed_values(self):
"""
Take something in the data format and turn it into an array of dictionaries defining the data in the particle
with the appropriate tag.
"""
result = [self._encode_value(CtdpfCklWfpSioDataParticleKey.CONDUCTIVITY, self.raw_data[0], int),
self._encode_value(CtdpfCklWfpSioDataParticleKey.TEMPERATURE, self.raw_data[1], int),
self._encode_value(CtdpfCklWfpSioDataParticleKey.PRESSURE, self.raw_data[2], int)
]
return result
class CtdpfCklWfpSioMetadataParticle(DataParticle):
"""
Class for creating the metadata particle
"""
_data_particle_type = DataParticleType.METADATA
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
"""
result = [self._encode_value(CtdpfCklWfpSioMetadataParticleKey.WFP_TIME_ON,
self.raw_data[0], int),
self._encode_value(CtdpfCklWfpSioMetadataParticleKey.WFP_TIME_OFF,
self.raw_data[1], int),
self._encode_value(CtdpfCklWfpSioMetadataParticleKey.WFP_NUMBER_SAMPLES,
self.raw_data[2], int)]
# Have to split the result build due to a bug in the _encode_value code.
if self.raw_data[3] is not None:
result.append(self._encode_value(CtdpfCklWfpSioMetadataParticleKey.WFP_DECIMATION_FACTOR,
self.raw_data[3], int))
else:
result.append({DataParticleKey.VALUE_ID: CtdpfCklWfpSioMetadataParticleKey.WFP_DECIMATION_FACTOR,
DataParticleKey.VALUE: None})
return result
class CtdpfCklWfpSioParser(SioParser):
"""
Make use of the common Sio Mule file parser
"""
def __init__(self,
config,
stream_handle,
exception_callback):
super(CtdpfCklWfpSioParser, self).__init__(config,
stream_handle,
exception_callback)
self._metadataSent = False
self._data_length = 0
self._start_index = HEADER_BYTES + 1
self._end_index = 0
self._good_header = False
self._good_footer = False
self._number_of_records = 0
self._record_number = 0.0
self._time_increment = 1
self._decimation_factor = None
self._start_time = 0
self._end_time = 0
self._start_data = 0
self._footer_data = None
self._record_data = None
def process_header(self, chunk):
"""
Determine if this is the header for a WC file
@retval True (good header), False (bad header)
"""
header = chunk[0:HEADER_BYTES]
match = WC_HEADER_MATCHER.match(header)
if match:
self._data_length = int(match.group(2), 16)
self._start_index = match.start(0)
self._end_index = match.end(0) + self._data_length
self._start_data = match.end(0)
self._good_header = True
else:
self._good_header = False
def process_footer(self, chunk):
"""
Determine if this footer has a decimation factor (and what it is) or not.
Also determine the instrument start/stop times and the number of records in the chunk
@retval True (good footer), False (bad footer)
"""
footer = chunk[((self._end_index - FOOTER_BYTES) + 1):self._end_index + 1]
std_match = STD_EOP_MATCHER.search(footer)
deci_match = DECI_EOP_MATCHER.search(footer)
final_match = deci_match
if deci_match:
self._number_of_records = ((self._data_length + 1) - FOOTER_BYTES) / 11
self._decimation_factor = struct.unpack('>H', final_match.group(3))[0]
self._good_footer = True
elif std_match:
footer_start = std_match.start(0)
footer_end = std_match.end(0)
footer = footer[footer_start:footer_end]
final_match = STD_EOP_MATCHER.search(footer)
self._number_of_records = ((self._data_length + 1) - (FOOTER_BYTES - DECIMATION_SPACER)) / 11
self._decimation_factor = 0
self._good_footer = True
else:
self._good_footer = False
log.warning('CTDPF_CKL_SIO_MULE: Bad footer detected, cannot parse chunk')
if self._good_footer:
time_fields = struct.unpack('>II', final_match.group(2))
self._start_time = int(time_fields[0])
self._end_time = int(time_fields[1])
if self._number_of_records > 0:
self._time_increment = float(self._end_time - self._start_time) / float(self._number_of_records)
else:
self._good_footer = False
log.warning('CTDPF_CKL_SIO_MULE: Bad footer detected, cannot parse chunk')
# Overrides the parse_chunks routine in SioMuleCommon
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If it is a valid data piece, build a particle,
update the position and timestamp. Go until the chunker has no more valid data.
@retval a list of tuples
"""
result_particles = []
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
while chunk is not None:
self.process_header(chunk)
if self._good_header:
self.process_footer(chunk)
if self._good_footer:
timestamp = float(ntplib.system_to_ntp_time(self._start_time))
self._footer_data = (self._start_time,
self._end_time,
self._number_of_records,
self._decimation_factor)
sample = self._extract_sample(CtdpfCklWfpSioMetadataParticle,
None, self._footer_data, internal_timestamp=timestamp)
if sample is not None:
result_particles.append(sample)
more_records = True
data_record = chunk[self._start_data:self._start_data + DATA_RECORD_BYTES]
self._start_data += DATA_RECORD_BYTES
self._record_number = 0.0
timestamp = float(ntplib.system_to_ntp_time(float(self._start_time) +
(self._record_number * self._time_increment)))
while more_records:
data_fields = struct.unpack('>I', '\x00' + data_record[0:3]) + \
struct.unpack('>I', '\x00' + data_record[3:6]) + \
struct.unpack('>I', '\x00' + data_record[6:9]) + \
struct.unpack('>H', data_record[9:11])
self._record_data = (data_fields[0], data_fields[1], data_fields[2])
sample = self._extract_sample(CtdpfCklWfpSioDataParticle,
None, self._record_data, internal_timestamp=timestamp)
if sample is not None:
result_particles.append(sample)
data_record = chunk[self._start_data:self._start_data + DATA_RECORD_BYTES]
self._record_number += 1.0
timestamp = float(ntplib.system_to_ntp_time(float(self._start_time) +
(self._record_number * self._time_increment)))
eop_match = EOP_MATCHER.search(data_record)
if eop_match:
more_records = False
else:
self._start_data += DATA_RECORD_BYTES
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
return result_particles
|
|
import sys, gc, inspect
from pympler import asizeof
from datetime import datetime as dt
from IPython import embed
import numpy as np
import theano
from lasagne.objectives import *
from lasagne.regularization import l1, l2
from lasagne.updates import *
from theano import tensor as tensor
from theano.compile.nanguardmode import NanGuardMode
#from core import Input
from teafacto.core.datafeed import DataFeeder, SplitIdxIterator
from teafacto.util import ticktock as TT, issequence
class DynamicLearningParam(object):
def __init__(self, lr):
self.lr = lr
def __call__(self, lr, epoch, maxiter, terrs, verrs): # get new learning rate based on old one, epoch, maxiter, training error, validation errors
raise NotImplementedError("use subclass")
class thresh_lr(DynamicLearningParam):
def __init__(self, lr, thresh=5):
super(thresh_lr, self).__init__(lr)
self.thresh = thresh
def __call__(self, lr, epoch, maxiter, terrs, verrs):
return lr if epoch < self.thresh else 0.
class ModelTrainer(object):
def __init__(self, model, gold):
self.model = model
self.goldvar = gold
self.validsetmode= False
self.average_err = True # TODO: do we still need this?
self._autosave = False
self._autosavepath = None
self._autosaveblock = None
# training settings
self.numbats = None
self.learning_rate = None
self.dynamic_lr = None
self.objective = None
self.regularizer = None
self._exp_mov_avg_decay = 0.0
self.optimizer = None
self.traindata = None
self.traingold = None
self.gradconstraints = []
self._sampletransformers = []
# validation settings
self._validinter = 1
self.trainstrategy = self._train_full
self.validsplits = 0
self.validrandom = False
self.validata = None
self.validgold = None
self.validation = None
self.validators = []
self.external_validators = []
self.tt = TT("FluentTrainer")
# taking best
self.besttaker = None
self.bestmodel = None
self.savebest = None
self.smallerbetter = True
# writing
self._writeresultspath = None
#region ====================== settings =============================
#region ################### GENERAL ###################
def numbats(self, s):
self.numbats = s
return self
#region ################### LOSSES ##########################
def _set_objective(self, obj):
if self.validsetmode is False:
self.objective = obj
else:
self.validators.append(obj)
def linear_objective(self): # multiplies prediction with gold, assumes prediction is already the loss
# (this is for negative sampling models where the training model already computes the loss)
self._set_objective(lambda x, y: x * y)
return self
def cross_entropy(self):
""" own implementation of categorical cross-entropy """
self._set_objective(self._inner_cross_entropy)
return self
@classmethod
def _inner_cross_entropy(cls, probs, gold, mask=None):
if gold.ndim == 1:
assert(mask is None)
return tensor.nnet.categorical_crossentropy(probs, gold) #-tensor.log(probs[tensor.arange(gold.shape[0]), gold])
elif gold.ndim == 2: # sequences
return cls._inner_seq_neg_log_prob(probs, gold, mask=mask)
def seq_cross_entropy(self): # probs (batsize, seqlen, vocsize) + gold: (batsize, seqlen) ==> sum of neg log-probs of correct seq
""" Own implementation of categorical cross-entropy, applied to a sequence of probabilities that should be multiplied """
self._set_objective(self._inner_seq_neg_log_prob)
return self
@classmethod
def _inner_seq_neg_log_prob(cls, probs, gold, mask=None): # probs: (batsize, seqlen, vocsize) probs, gold: (batsize, seqlen) idxs
#print "using inner seq neg log prob"
def _f(probsmat, goldvec): # probsmat: (seqlen, vocsize), goldvec: (seqlen,)
ce = tensor.nnet.categorical_crossentropy(probsmat, goldvec) #-tensor.log(probsmat[tensor.arange(probsmat.shape[0]), goldvec])
return ce # (seqlen,) ==> (1,)
o, _ = theano.scan(fn=_f, sequences=[probs, gold], outputs_info=None) # out: (batsize, seqlen)
#print "MASK!!" if mask is not None else "NO MASK!!!"
o = o * mask if mask is not None else o # (batsize, seqlen)
o = tensor.sum(o, axis=1)
return o # (batsize,)
def squared_error(self):
self._set_objective(squared_error)
return self
def squared_loss(self):
self._set_objective(lambda x, y: (1 - x * y) ** 2) # [-1, +1](batsize, )
return self
def binary_cross_entropy(self): # theano binary cross entropy (through lasagne), probs: (batsize,) float, gold: (batsize,) float
self._set_objective(binary_crossentropy)
return self
def bin_accuracy(self, sep=0):
self._set_objective(lambda x, y: theano.tensor.eq(x > sep, y > sep))
return self
def accuracy(self, top_k=1):
def categorical_accuracy(predictions, targets, top_k=1): # !!! copied from Lasagne # TODO: import properly
if targets.ndim == predictions.ndim:
targets = theano.tensor.argmax(targets, axis=-1)
elif targets.ndim != predictions.ndim - 1:
raise TypeError('rank mismatch between targets and predictions')
if top_k == 1:
# standard categorical accuracy
top = theano.tensor.argmax(predictions, axis=-1)
return theano.tensor.eq(top, targets)
else:
# top-k accuracy
top = theano.tensor.argsort(predictions, axis=-1)
# (Theano cannot index with [..., -top_k:], we need to simulate that)
top = top[[slice(None) for _ in range(top.ndim - 1)] +
[slice(-top_k, None)]]
targets = theano.tensor.shape_padaxis(targets, axis=-1)
return theano.tensor.any(theano.tensor.eq(top, targets), axis=-1)
self._set_objective(lambda x, y: 1-categorical_accuracy(x, y, top_k=top_k))
return self
def seq_accuracy(self): # sequences must be exactly the same
def inner(probs, gold, mask=None):
if gold.ndim == probs.ndim:
gold = tensor.argmax(gold, axis=-1)
elif gold.ndim != probs.ndim - 1:
raise TypeError('rank mismatch between targets and predictions')
top = tensor.argmax(probs, axis=-1)
assert(gold.ndim == 2 and top.ndim == 2)
assert(mask is None or mask.ndim == 2)
if mask is not None:
gold *= mask
top *= mask
diff = tensor.sum(abs(top - gold), axis=1)
return tensor.eq(diff, tensor.zeros_like(diff))
self._set_objective(inner)
return self
def hinge_loss(self, margin=1., labelbin=True): # gold must be -1 or 1 if labelbin if False, otherwise 0 or 1
def inner(preds, gold): # preds: (batsize,), gold: (batsize,)
if labelbin is True:
gold = 2 * gold - 1
return tensor.nnet.relu(margin - gold * preds)
self._set_objective(inner)
return self
def multiclass_hinge_loss(self, margin=1.):
def inner(preds, gold): # preds: (batsize, numclasses) scores, gold: int:(batsize)
pass
self._set_objective(inner)
return self
def log_loss(self):
""" NOT cross-entropy, BUT log(1+e^(-t*y))"""
def inner(preds, gold): # preds: (batsize,) float, gold: (batsize,) float
return tensor.nnet.softplus(-gold*preds)
self._set_objective(inner)
return self
#endregion
#region ################### GRADIENT CONSTRAINTS ############ --> applied in the order that they were added
def grad_total_norm(self, max_norm, epsilon=1e-7):
self.gradconstraints.append(lambda allgrads: total_norm_constraint(allgrads, max_norm, epsilon=epsilon))
return self
def grad_add_constraintf(self, f):
self.gradconstraints.append(f)
return self
def _gradconstrain(self, allgrads):
ret = allgrads
for gcf in self.gradconstraints:
ret = gcf(ret)
return ret
# !!! can add more
#endregion
#region #################### REGULARIZERS ####################
def _regul(self, regf, amount, params):
return amount * reduce(lambda x, y: x+y, [regf(x.d)*x.regmul for x in params], 0)
def l2(self, amount):
self.regularizer = lambda x: self._regul(l2, amount, x)
return self
def l1(self, amount):
self.regularizer = lambda x: self._regul(l1, amount, x)
return self
def exp_mov_avg(self, decay=0.0):
self._exp_mov_avg_decay = decay
return self
#endregion
#region ################### LEARNING RATE ###################
def lr(self, lr):
self._setlr(lr)
return self
def _setlr(self, lr):
if isinstance(lr, DynamicLearningParam):
self.dynamic_lr = lr
lr = lr.lr
self.learning_rate = theano.shared(np.cast[theano.config.floatX](lr))
def _update_lr(self, epoch, maxepoch, terrs, verrs):
if self.dynamic_lr is not None:
self.learning_rate.set_value(
np.cast[theano.config.floatX](
self.dynamic_lr(self.learning_rate.get_value(),
epoch, maxepoch, terrs, verrs)))
def dlr_thresh(self, thresh=5):
self.dynamic_lr = thresh_lr(self.learning_rate, thresh=thresh)
return self
def dlr_exp_decay(self, decay=0.5):
pass
#endregion
#region #################### OPTIMIZERS ######################
def sgd(self, lr):
self._setlr(lr)
self.optimizer = lambda x, y, l: sgd(x, y, learning_rate=l)
return self
def momentum(self, lr, mome=0.9):
self._setlr(lr)
self.optimizer = lambda x, y, l: momentum(x, y, learning_rate=l, momentum=mome)
return self
def nesterov_momentum(self, lr, momentum=0.9):
self._setlr(lr)
self.optimizer = lambda x, y, l: nesterov_momentum(x, y, learning_rate=l, momentum=momentum)
return self
def adagrad(self, lr=1.0, epsilon=1e-6):
self._setlr(lr)
self.optimizer = lambda x, y, l: adagrad(x, y, learning_rate=l, epsilon=epsilon)
return self
def rmsprop(self, lr=1., rho=0.9, epsilon=1e-6):
self._setlr(lr)
self.optimizer = lambda x, y, l: rmsprop(x, y, learning_rate=l, rho=rho, epsilon=epsilon)
return self
def adadelta(self, lr=1., rho=0.95, epsilon=1e-6):
self._setlr(lr)
self.optimizer = lambda x, y, l: adadelta(x, y, learning_rate=l, rho=rho, epsilon=epsilon)
return self
def adam(self, lr=0.001, b1=0.9, b2=0.999, epsilon=1e-8):
self._setlr(lr)
self.optimizer = lambda x, y, l: adam(x, y, learning_rate=l, beta1=b1, beta2=b2, epsilon=epsilon)
return self
#endregion
#region ################### VALIDATION ####################### --> use one of following
def validinter(self, validinter=1):
self._validinter = validinter
return self
def autovalidate(self, splits=5, random=True): # validates on the same data as training data
self.validate_on(self.traindata, self.traingold, splits=splits, random=random)
self.validsetmode = True
return self
def split_validate(self, splits=5, random=True):
self.trainstrategy = self._train_split
self.validsplits = splits
self.validrandom = random
self.validsetmode = True
return self
def validate_on(self, data, gold=None, splits=1, random=True):
self.trainstrategy = self._train_validdata
self.validdata = data
self.validgold = gold
self.validsplits = splits
self.validrandom = random
self.validsetmode = True
return self
def cross_validate(self, splits=5, random=False):
self.trainstrategy = self._train_cross_valid
self.validsplits = splits
self.validrandom = random
self.validsetmode = True
return self
def extvalid(self, evaluator):
self.external_validators.append(evaluator)
return self
#endregion
#region ######################### SELECTING THE BEST ######################
def takebest(self, f=None, save=False, smallerbetter=True):
if f is None:
f = lambda x: x[1] # pick the model with the best first validation score
self.besttaker = f
self.bestmodel = (None, float("inf"))
self.savebest = save
self.smallerbetter = smallerbetter
return self
#endregion
#endregion
#region ====================== execution ============================
#region ######################### ACTUAL TRAINING #########################
def traincheck(self):
assert(self.optimizer is not None)
assert(self.objective is not None)
assert(self.traindata is not None)
assert(self.traingold is not None)
def train(self, numbats, epochs, returnerrors=False, _skiptrain=False):
self.traincheck()
self.numbats = numbats
self.maxiter = epochs
errors = self.trainstrategy(_skiptrain=_skiptrain) # trains according to chosen training strategy, returns errors
if self.besttaker is not None and self.savebest is None: # unfreezes best model if best choosing was chosen
self.model = self.model.__class__.unfreeze(self.bestmodel[0])
self.tt.tock("unfroze best model (%.3f) - " % self.bestmodel[1]).tick()
ret = self.model
if returnerrors:
ret = (ret,) + errors
return ret
def train_lambda(self, numbats, batprop=1): # TODO: _skiptrain???
self.traincheck()
self.numbats = numbats
if self.trainstrategy == self._train_cross_valid:
raise NotImplementedError("CV training not supported with lambda training yet")
trainf, validf, traind, validd = self.trainstrategy(_lambda=True)
return ProtoTrainer(trainf, validf, traind, validd, batprop, self)
def get_learning_rate(self):
return self.learning_rate
def autobuild_model(self, model, *traindata, **kw):
return model.autobuild(*traindata, **kw)
def buildtrainfun(self, model, batsize):
self.tt.tick("training - autobuilding")
with model.trainmode(True):
inps, outps = self.autobuild_model(model, *self.traindata, _trainmode=True, _batsize=batsize)
assert(len(outps) == 1)
outp = outps[0]
self.tt.tock("training - autobuilt")
self.tt.tick("compiling training function")
params = outp.allparams
nonparams = [p for p in params if not p.lrmul > 0]
params = [p for p in params if p.lrmul > 0]
scanupdates = outp.allupdates
inputs = inps
loss, newinp = self.buildlosses(outp, [self.objective])
loss = loss[0]
if newinp is not None:
inputs = newinp
if self.regularizer is not None:
reg = self.regularizer(params)
cost = loss+reg
else:
cost = loss
# theano.printing.debugprint(cost)
# theano.printing.pydotprint(cost, outfile="pics/debug.png")
updates = []
print "params:\n " + "".join(
map(lambda x: "\t%s\n" % str(x),
sorted(params, key=lambda x: str(x))))
if len(nonparams) > 0:
print "non-params:\n " + "".join(
map(lambda x: "\t%s\n" % str(x),
sorted(nonparams, key=lambda x: str(x))))
print "\n\t\t (in buildtrainfun(), trainer.py) \n"
self.tt.msg("computing gradients")
#grads = []
#for x in params:
# self.tt.msg("computing gradient for %s" % str(x))
# grads.append(tensor.grad(cost, x.d))
grads = tensor.grad(cost, [x.d for x in params]) # compute gradient
self.tt.msg("computed gradients")
grads = self._gradconstrain(grads)
for param, grad in zip(params, grads):
upds = self.optimizer([grad], [param.d], self.get_learning_rate() * param.lrmul)
newparamval = None
for upd in upds:
broken = False
for para in params:
if para.d == upd:
newparamval = upds[upd]
newparamval = para.constraintf()(newparamval)
updates.append((upd, newparamval))
broken = True
break
if not broken:
updates.append((upd, upds[upd]))
if self._exp_mov_avg_decay > 0:
# initialize ema_value in params and add EMA updates
param.ema_value = theano.shared(param.value.get_value())
updates.append((param.ema_value,
param.ema_value * self._exp_mov_avg_decay + newparamval * (1 - self._exp_mov_avg_decay)))
#print updates
#embed()
finputs = [x.d for x in inputs] + [self.goldvar]
allupdates = updates + scanupdates.items()
trainf = theano.function(
inputs=finputs,
outputs=[cost],
updates=allupdates,
#mode=NanGuardMode(nan_is_error=True, inf_is_error=False, big_is_error=False)
# TODO: enabling NanGuard with Dropout doesn't work --> see Theano.git/issues/4823
)
self.tt.tock("training function compiled")
return trainf
def buildlosses(self, output, objs):
acc = []
for objective in objs:
if "mask" in inspect.getargspec(objective)[0]:
mask = output.mask.d if output.mask is not None else None
obj = objective(output.d, self.goldvar, mask=mask)
else:
assert(output.mask is None)
obj = objective(output.d, self.goldvar)
objagg = aggregate(obj, mode="mean" if self.average_err is True else "sum")
acc.append(objagg)
return acc, None
def getvalidfun(self, model, batsize):
symbolic_validfun = self.buildvalidfun(model, batsize)
if len(self.external_validators) == 0:
return symbolic_validfun
else:
extravalid = self.external_validators
def validfun(*sampleinps):
ret = symbolic_validfun(*sampleinps)
for ev in extravalid:
a = ev(*sampleinps)
if not issequence(a):
a = [a]
else:
if isinstance(a, tuple):
a = list(a)
ret += a
return ret
return validfun
def buildvalidfun(self, model, batsize):
self.tt.tick("validation - autobuilding")
inps, outps = self.autobuild_model(model, *self.traindata, _trainmode=False, _batsize=batsize)
assert(len(outps) == 1)
outp = outps[0]
self.tt.tock("validation - autobuilt")
self.tt.tick("compiling validation function")
metrics, newinp = self.buildlosses(outp, self.validators)
inputs = newinp if newinp is not None else inps
ret = None
if len(metrics) > 0:
ret = theano.function(inputs=[x.d for x in inputs] + [self.goldvar],
outputs=metrics,
mode=NanGuardMode(nan_is_error=True, inf_is_error=False, big_is_error=True)
)
else:
self.tt.msg("NO VALIDATION METRICS DEFINED, RETURNS NONE")
self.tt.tock("validation function compiled")
return ret
#endregion
#region ################## TRAINING STRATEGIES ############
def _train_full(self, _lambda=False, _skiptrain=False): # on all data, no validation
df = DataFeeder(*(self.traindata + [self.traingold])).numbats(self.numbats)
trainf = self.buildtrainfun(self.model, df.batsize)
if _lambda:
return trainf, None, df, None
else:
err, _ = self.trainloop(
trainf=self.getbatchloop(trainf, df, phase="TRAIN"),
_skiptrain=_skiptrain)
return err, None, None, None
def _train_validdata(self, _lambda=False, _skiptrain=False):
df = DataFeeder(*(self.traindata + [self.traingold])).numbats(self.numbats)
vdf = DataFeeder(*(self.validdata + [self.validgold]), random=False)
vdf.batsize = df.batsize
trainf = self.buildtrainfun(self.model, df.batsize)
validf = self.getvalidfun(self.model, vdf.batsize)
#embed()
#dfvalid = df.osplit(split=self.validsplits, random=self.validrandom)
if _lambda:
return trainf, validf, df, vdf
else:
err, verr = self.trainloop(
trainf=self.getbatchloop(trainf, df, phase="TRAIN"),
validf=self.getbatchloop(validf, vdf, phase="VALID"),
_skiptrain=_skiptrain)
return err, verr, None, None
def _train_split(self, _lambda=False, _skiptrain=False):
df = DataFeeder(*(self.traindata + [self.traingold]))
dftrain, dfvalid = df.split(self.validsplits, self.validrandom, df_randoms=(True, False))
dftrain.numbats(self.numbats)
dfvalid.batsize = dftrain.batsize
trainf = self.buildtrainfun(self.model, dftrain.batsize)
validf = self.getvalidfun(self.model, dfvalid.batsize)
if _lambda:
return trainf, validf, dftrain, dfvalid
else:
err, verr = self.trainloop(
trainf=self.getbatchloop(trainf, dftrain, phase="TRAIN"),
validf=self.getbatchloop(validf, dfvalid, phase="VALID"),
_skiptrain=_skiptrain)
return err, verr, None, None
def _train_cross_valid(self, _skiptrain=False):
df = DataFeeder(*(self.traindata + [self.traingold]))
splitter = SplitIdxIterator(df.size, split=self.validsplits, random=self.validrandom, folds=self.validsplits)
err = []
verr = []
c = 0
for splitidxs in splitter:
tf, vf = df.isplit(splitidxs, df_randoms=(True, False))
tf.numbats(self.numbats)
vf.batsize = tf.batsize
trainf = self.buildtrainfun(self.model, tf.batsize)
validf = self.getvalidfun(self.model, vf.batsize)
serr, sverr = self.trainloop(
trainf=self.getbatchloop(trainf, tf, phase="TRAIN"),
validf=self.getbatchloop(validf, vf, phase="VALID"),
_skiptrain=_skiptrain)
err.append(serr)
verr.append(sverr)
self.resetmodel(self.model)
err = np.asarray(err)
avgerr = np.mean(err, axis=0)
verr = np.asarray(verr)
avgverr = np.mean(verr, axis=0)
self.tt.tock("done")
return avgerr, avgverr, err, verr
#endregion
def resetmodel(self, model): # TODO: very hacky
_, outs = model.autobuild(*self.traindata)
params = outs[0].allparams
for param in params:
param.reset()
#region ############# TRAINING LOOPS ##################
def trainloop(self, trainf, validf=None, _skiptrain=False):
self.tt.tick("training")
err = []
verr = []
stop = self.maxiter == 0
self.currentiter = 1
evalinter = self._validinter
evalcount = evalinter
tt = TT("iter")
prevverre = [float("inf")] * len(self.validators)
writeresf = None
if self._writeresultspath is not None:
writeresf = open(self._writeresultspath, "w", 1)
while not stop:
tt.tick("%d/%d" % (self.currentiter, int(self.maxiter)))
if _skiptrain:
tt.msg("skipping training")
erre = [0.]
else:
erre = trainf()
if self.currentiter == self.maxiter:
stop = True
self.currentiter += 1
err.append(erre)
#print "done training"
verre = prevverre
restowrite = ""
if self._autosave:
self.save()
if validf is not None and self.currentiter % evalinter == 0: # validate and print
verre = validf()
prevverre = verre
verr.append(verre)
ttmsg = "training error: %s \t validation error: %s" \
% ("%.4f" % erre[0],
" - ".join(map(lambda x: "%.4f" % x, verre)))
restowrite = "\t".join(map(str, erre[0:1] + verre))
else:
ttmsg = "training error: %s" % " - ".join(map(lambda x: "%.4f" % x, erre))
restowrite = str(erre[0])
if writeresf is not None:
writeresf.write("{}\t{}\n".format(self.currentiter - 1, restowrite))
# retaining the best
if self.besttaker is not None:
modelscore = self.besttaker(([erre]+verre+[self.currentiter]))
smallerbetter = 1 if self.smallerbetter else -1
if smallerbetter * modelscore < smallerbetter * self.bestmodel[1]:
if self.savebest:
self.save(suffix=".best")
self.bestmodel = (None, modelscore)
else:
#tt.tock("freezing best with score %.3f (prev: %.3f)" % (modelscore, self.bestmodel[1]), prefix="-").tick()
self.bestmodel = (self.save(freeze=True, filepath=False), modelscore)
tt.tock(ttmsg + "\t", prefix="-")
self._update_lr(self.currentiter, self.maxiter, err, verr)
evalcount += 1
if writeresf is not None:
writeresf.close()
self.tt.tock("trained").tick()
return err, verr
def getbatchloop(self, trainf, datafeeder, verbose=True, phase="TEST"):
'''
returns the batch loop, loaded with the provided trainf training function and samplegen sample generator
'''
sampletransf = self._transformsamples
this = self
def batchloop():
c = 0
numex = 0
prevperc = -1.
terr = [0.0]
numdigs = 2
tt = TT("iter progress", verbose=verbose)
tt.tick()
datafeeder.reset()
while datafeeder.hasnextbatch():
perc = round(c*100.*(10**numdigs)/datafeeder.getnumbats())/(10**numdigs)
if perc > prevperc:
terr0 = terr[0] * 1.0 / numex if numex > 0 else 0.0
s = ("%." + str(numdigs) + "f%% \t error: %.3f") % (perc, terr0)
tt.live(s)
prevperc = perc
sampleinps, batsize = datafeeder.nextbatch(withbatchsize=True)
numex += batsize
#embed()
sampleinps = sampletransf(*sampleinps, phase=phase)
try:
eterr = trainf(*sampleinps)
if len(terr) != len(eterr) and terr.count(0.0) == len(terr):
terr = [0.0]*len(eterr)
except Exception, e:
raise e
if self.average_err is True:
terr = [xterr + xeterr * batsize for xterr, xeterr in zip(terr, eterr)]
else:
terr = [xterr + xeterr for xterr, xeterr in zip(terr, eterr)]
c += 1
tt.stoplive()
if self.average_err is True:
terr = [xterr * 1.0 / numex for xterr in terr]
return terr
return batchloop
def _transformsamples(self, *s, **kw):
phase = kw["phase"] if "phase" in kw else None
if len(self._sampletransformers) == 0:
return s
else:
for sampletransformer in self._sampletransformers:
s = sampletransformer(*s, phase=phase)
return s
def sampletransform(self, *f):
self._sampletransformers = f
return self
#endregion
#endregion
@property
def autosave(self):
self._autosave = True
return self
def autosavethis(self, block, p):
self._autosave = True
self._autosaveblock = block
self._autosavepath = p
return self
def writeresultstofile(self, p):
self._writeresultspath = p
return self
def save(self, model=None, filepath=None, suffix="", freeze=False):
model = model if model is not None else \
self.model if self._autosaveblock is None else \
self._autosaveblock
if filepath is not False:
filepath = filepath if filepath is not None else self._autosavepath
model.save(filepath=filepath + suffix)
else:
return model.freeze()
class ProtoTrainer(object):
def __init__(self, trainf, validf, traind, validd, batprop, trainer):
self.trainf, self.validf, self.traind, self.validd = trainf, validf, traind, validd
self.batprop = batprop
self.original = trainer
def interleave(self, *otherprototrainers):
return InterleavedTrainer(self, *otherprototrainers)
class InterleavedTrainer(object):
def __init__(self, maintrainer, *othertrainers):
self.spts = [maintrainer] + list(othertrainers)
self.tt = TT("InterleavedTrainer")
self.currentiter = 0
self._validinter = self.spts[0].original._validinter
def train(self, epochs=10, verbose=True):
self.maxiter = epochs
tf = self.getbatchloop([spt.trainf for spt in self.spts],
[spt.traind for spt in self.spts],
verbose=verbose, phase="TRAIN")
subvfs = []
for spt in self.spts:
if spt.validf is not None and spt.validd is not None:
subvf = spt.original.getbatchloop(spt.validf, spt.validd, verbose=verbose, phase="TEST")
subvfs.append(subvf)
else:
subvfs.append(None)
def vf():
return [subvf() if subvf is not None else None for subvf in subvfs]
return self.trainloop(tf, vf)
# region ############# TRAINING LOOPS ##################
def trainloop(self, tf, vf):
self.tt.tick("training")
stop = self.maxiter == 0
self.currentiter = 1
evalinter = self._validinter
evalcount = evalinter
tt = TT("iter")
err = []
verr = []
prevverre = [[float("inf")] * len(subt.original.validators)
for subt in self.spts]
while not stop:
tt.tick("%d/%d" % (self.currentiter, int(self.maxiter)))
erre = tf()
if self.currentiter == self.maxiter:
stop = True
self.currentiter += 1
err.append(erre)
# print "done training"
verre = prevverre
if self.currentiter % evalinter == 0: # validate and print
verre = vf()
prevverre = verre
verr.append(verre)
#embed() # TODO
# retaining the best of main trainer
if self.spts[0].original.besttaker is not None:
modelscore = self.spts[0].original.besttaker(([erre[0]] + verre[0] + [self.currentiter]))
if modelscore < self.spts[0].original.bestmodel[1]:
# tt.tock("freezing best with score %.3f (prev: %.3f)" % (modelscore, self.bestmodel[1]), prefix="-").tick()
self.spts[0].original.bestmodel = (self.spts[0].original.model.freeze(), modelscore)
ttlines = []
for i in range(len(erre)):
if verre[i] is not None:
ttlines.append("\t%s:\ttraining error: %s \t validation error: %s" \
% (i+1, "%.4f" % erre[i][0],
" - ".join(map(lambda x: "%.4f" % x, verre[i]))))
else:
ttlines.append("\t%s:\ttraining error: %s"
% (i+1, " - ".join(map(lambda x: "%.4f" % x, erre[i]))))
tt.tock("\n".join(ttlines) + "\n", prefix="-")
for i, subt in enumerate(self.spts):
subt.original._update_lr(self.currentiter, self.maxiter,
[errx[i] for errx in err],
[verrx[i] for verrx in verr])
evalcount += 1
# embed()
for subt in self.spts:
if subt.original._autosave:
subt.original.save()
self.tt.tock("trained").tick()
return err, verr
def getbatchloop(self, trainfs, datafeeders, verbose=True, phase="TEST"):
'''
returns the batch loop, loaded with the provided trainf training function and samplegen sample generator
'''
sampletransfs = [spt.original._transformsamples for spt in self.spts]
this = self
def batchloop():
c = 0
prevperc = -1.
terrs = [[0.0] if tf is not None else None for tf in trainfs]
numdigs = 2
tt = TT("iter progress", verbose=verbose)
tt.tick()
for dataf in datafeeders:
if dataf is not None:
dataf.reset()
while datafeeders[0].hasnextbatch():
perc = round(c * 100. * (10 ** numdigs) / datafeeders[0].getnumbats()) / (10 ** numdigs)
if perc > prevperc:
s = ("%." + str(numdigs) + "f%% \t error: %s") \
% (perc, " - ".join(map(lambda x: "%.3f" % x[0], terrs)))
tt.live(s)
prevperc = perc
for df in datafeeders:
if not df.hasnextbatch():
df.reset()
sampleinps = [df.nextbatch() for df in datafeeders]
# embed()
sampleinps = [stf(*si, phase=phase) for (stf, si) in zip(sampletransfs, sampleinps)]
try:
eterrs = [tf(*si) for (tf, si) in zip(trainfs, sampleinps)]
for i in range(len(terrs)):
if len(terrs[i]) != len(eterrs[i]) and terrs[i].count(0.0) == len(terrs[i]):
terrs[i] = [0.0] * len(eterrs[i])
except Exception, e:
raise e
for i, subt in enumerate(this.spts):
if subt.original.average_err is True:
terrs[i] = [xterr * (1.0 * (c) / (c + 1)) + xeterr * (1.0 / (c + 1))
for xterr, xeterr in zip(terrs[i], eterrs[i])]
else:
terrs[i] = [xterr + xeterr for xterr, xeterr in zip(terrs[i], eterrs[i])]
c += 1
tt.stoplive()
return terrs
return batchloop
class NSModelTrainer(ModelTrainer):
""" Model trainer using negative sampling """
def __init__(self, model, gold, nrate, nsamgen, nrate_valid=None):
super(NSModelTrainer, self).__init__(model, gold)
self.ns_nrate = nrate
self.ns_nrate_valid = nrate if nrate_valid is None else nrate_valid
self.ns_nsamgen = nsamgen
def _transformsamples(self, *s, **kw):
# phase in kw
""" apply negative sampling function and neg sam rate """
psams = s[:-1]
acc = []
for i in range(self.ns_nrate):
nsams = self.ns_nsamgen(*psams)
news = psams + nsams + (s[-1],)
ret = []
if len(acc) == 0: # first one
ret = news
else:
for x, y in zip(acc, news):
ret.append(np.concatenate([x, y], axis=0))
acc = ret
return acc
def autobuild_model(self, model, *traindata, **kw):
return model.autobuild(*(traindata + traindata))
|
|
# coding: utf-8
from distutils.version import LooseVersion
from dtest import Tester, debug, create_ks
from tools.decorators import since
from tools.jmxutils import make_mbean, JolokiaAgent, remove_perf_disable_shared_mem
class TestCqlTracing(Tester):
"""
Smoke test that the default implementation for tracing works. Also test
that Cassandra falls back to the default tracing implementation when the
user specifies an invalid implementation.
# TODO write a mock Tracing implementation and assert, at least, it can be
# instantiated when specified as a custom tracing implementation.
"""
def prepare(self, create_keyspace=True, nodes=3, rf=3, protocol_version=3, jvm_args=None, random_partitioner=False, **kwargs):
if jvm_args is None:
jvm_args = []
jvm_args.append('-Dcassandra.wait_for_tracing_events_timeout_secs=15')
cluster = self.cluster
if random_partitioner:
cluster.set_partitioner("org.apache.cassandra.dht.RandomPartitioner")
else:
cluster.set_partitioner("org.apache.cassandra.dht.Murmur3Partitioner")
cluster.populate(nodes)
node1 = cluster.nodelist()[0]
remove_perf_disable_shared_mem(node1) # necessary for jmx
cluster.start(wait_for_binary_proto=True, jvm_args=jvm_args)
session = self.patient_cql_connection(node1, protocol_version=protocol_version)
if create_keyspace:
create_ks(session, 'ks', rf)
return session
def trace(self, session):
"""
* CREATE a table
* enable TRACING
* SELECT on a known system table and assert it ran with tracing by checking the output
* INSERT a row into the created system table and assert it ran with tracing
* SELECT from the table and assert it ran with tracing
@param session The Session object to use to create a table.
@jira_ticket CASSANDRA-10392
"""
node1 = self.cluster.nodelist()[0]
# Create
session.execute("""
CREATE TABLE ks.users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
out, err, _ = node1.run_cqlsh('TRACING ON')
self.assertIn('Tracing is enabled', out)
out, err, _ = node1.run_cqlsh('TRACING ON; SELECT * from system.peers')
self.assertIn('Tracing session: ', out)
self.assertIn('Request complete ', out)
# Inserts
out, err, _ = node1.run_cqlsh(
"CONSISTENCY ALL; TRACING ON; "
"INSERT INTO ks.users (userid, firstname, lastname, age) "
"VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
debug(out)
self.assertIn('Tracing session: ', out)
self.assertIn('/127.0.0.1', out)
self.assertIn('/127.0.0.2', out)
self.assertIn('/127.0.0.3', out)
self.assertIn('Parsing INSERT INTO ks.users ', out)
self.assertIn('Request complete ', out)
# Queries
out, err, _ = node1.run_cqlsh('CONSISTENCY ALL; TRACING ON; '
'SELECT firstname, lastname '
'FROM ks.users WHERE userid = 550e8400-e29b-41d4-a716-446655440000')
debug(out)
self.assertIn('Tracing session: ', out)
self.assertIn(' 127.0.0.1 ', out)
self.assertIn(' 127.0.0.2 ', out)
self.assertIn(' 127.0.0.3 ', out)
self.assertIn('Request complete ', out)
self.assertIn(" Frodo | Baggins", out)
@since('2.2')
def tracing_simple_test(self):
"""
Test tracing using the default tracing class. See trace().
@jira_ticket CASSANDRA-10392
@jira_ticket CASSANDRA-11598
# Restricted to 2.2+ due to flakiness on 2.1. See CASSANDRA-11598 and CASSANDRA-12407 for details.
"""
session = self.prepare()
self.trace(session)
@since('3.4')
def tracing_unknown_impl_test(self):
"""
Test that Cassandra logs an error, but keeps its default tracing
behavior, when a nonexistent tracing class is specified.
* set a nonexistent custom tracing class
* run trace()
* if running the test on a version with custom tracing classes
implemented, check that an error about the nonexistent class was
logged.
@jira_ticket CASSANDRA-10392
"""
expected_error = 'Cannot use class junk for tracing'
self.ignore_log_patterns = [expected_error]
session = self.prepare(jvm_args=['-Dcassandra.custom_tracing_class=junk'])
self.trace(session)
errs = self.cluster.nodelist()[0].grep_log_for_errors()
debug('Errors after attempted trace with unknown tracing class: {errs}'.format(errs=errs))
self.assertEqual(len(errs), 1)
if self.cluster.version() >= LooseVersion('3.10'):
# See CASSANDRA-11706 and PR #1281
self.assertTrue(len(errs[0]) > 0)
else:
self.assertEqual(len(errs[0]), 1)
err = errs[0][0]
self.assertIn(expected_error, err)
@since('3.4')
def tracing_default_impl_test(self):
"""
Test that Cassandra logs an error, but keeps its default tracing
behavior, when the default tracing class is specified.
This doesn't work because the constructor for the default
implementation isn't accessible.
* set the default tracing class as a custom tracing class
* run trace()
* if running the test on a version with custom tracing classes
implemented, check that an error about the class was
logged.
@jira_ticket CASSANDRA-10392
"""
expected_error = 'Cannot use class org.apache.cassandra.tracing.TracingImpl'
self.ignore_log_patterns = [expected_error]
session = self.prepare(jvm_args=['-Dcassandra.custom_tracing_class=org.apache.cassandra.tracing.TracingImpl'])
self.trace(session)
errs = self.cluster.nodelist()[0].grep_log_for_errors()
debug('Errors after attempted trace with default tracing class: {errs}'.format(errs=errs))
self.assertEqual(len(errs), 1)
if self.cluster.version() >= LooseVersion('3.10'):
# See CASSANDRA-11706 and PR #1281
self.assertTrue(len(errs[0]) > 0)
else:
self.assertEqual(len(errs[0]), 1)
err = errs[0][0]
self.assertIn(expected_error, err)
# make sure it logged the error for the correct reason. this isn't
# part of the expected error to avoid having to escape parens and
# periods for regexes.
if self.cluster.version() >= LooseVersion('3.10'):
# See CASSANDRA-11706 and PR #1281
check_for_errs_in = errs[0][1]
else:
check_for_errs_in = err
self.assertIn("Default constructor for Tracing class "
"'org.apache.cassandra.tracing.TracingImpl' is inaccessible.",
check_for_errs_in)
@since('3.0')
def test_tracing_does_not_interfere_with_digest_calculation(self):
"""
Test that enabling tracing doesn't interfere with digest responses when using RandomPartitioner.
The use of a threadlocal MessageDigest for generating both DigestResponse messages and for
calculating tokens meant that the DigestResponse was always incorrect when both RP and tracing
were enabled, leading to unnecessary data reads.
@jira_ticket CASSANDRA-13964
"""
session = self.prepare(random_partitioner=True)
self.trace(session)
node1 = self.cluster.nodelist()[0]
rr_count = make_mbean('metrics', type='ReadRepair', name='RepairedBlocking')
with JolokiaAgent(node1) as jmx:
# the MBean may not have been initialized, in which case Jolokia agent will return
# a HTTP 404 response. If we receive such, we know that no digest mismatch was reported
# If we are able to read the MBean attribute, assert that the count is 0
if jmx.has_mbean(rr_count):
# expect 0 digest mismatches
self.assertEqual(0, jmx.read_attribute(rr_count, 'Count'))
else:
pass
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.conversations.v1.service.user.user_conversation import UserConversationList
class UserList(ListResource):
def __init__(self, version, chat_service_sid):
"""
Initialize the UserList
:param Version version: Version that contains the resource
:param chat_service_sid: The SID of the Conversation Service that the resource is associated with
:returns: twilio.rest.conversations.v1.service.user.UserList
:rtype: twilio.rest.conversations.v1.service.user.UserList
"""
super(UserList, self).__init__(version)
# Path Solution
self._solution = {'chat_service_sid': chat_service_sid, }
self._uri = '/Services/{chat_service_sid}/Users'.format(**self._solution)
def create(self, identity, friendly_name=values.unset, attributes=values.unset,
role_sid=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Create the UserInstance
:param unicode identity: The string that identifies the resource's User
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode attributes: The JSON Object string that stores application-specific data
:param unicode role_sid: The SID of a service-level Role to assign to the user
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The created UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserInstance
"""
data = values.of({
'Identity': identity,
'FriendlyName': friendly_name,
'Attributes': attributes,
'RoleSid': role_sid,
})
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
payload = self._version.create(method='POST', uri=self._uri, data=data, headers=headers, )
return UserInstance(self._version, payload, chat_service_sid=self._solution['chat_service_sid'], )
def stream(self, limit=None, page_size=None):
"""
Streams UserInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.conversations.v1.service.user.UserInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists UserInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.conversations.v1.service.user.UserInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of UserInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return UserPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of UserInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return UserPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a UserContext
:param sid: The SID of the User resource to fetch
:returns: twilio.rest.conversations.v1.service.user.UserContext
:rtype: twilio.rest.conversations.v1.service.user.UserContext
"""
return UserContext(self._version, chat_service_sid=self._solution['chat_service_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a UserContext
:param sid: The SID of the User resource to fetch
:returns: twilio.rest.conversations.v1.service.user.UserContext
:rtype: twilio.rest.conversations.v1.service.user.UserContext
"""
return UserContext(self._version, chat_service_sid=self._solution['chat_service_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Conversations.V1.UserList>'
class UserPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the UserPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param chat_service_sid: The SID of the Conversation Service that the resource is associated with
:returns: twilio.rest.conversations.v1.service.user.UserPage
:rtype: twilio.rest.conversations.v1.service.user.UserPage
"""
super(UserPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of UserInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.conversations.v1.service.user.UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserInstance
"""
return UserInstance(self._version, payload, chat_service_sid=self._solution['chat_service_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Conversations.V1.UserPage>'
class UserContext(InstanceContext):
def __init__(self, version, chat_service_sid, sid):
"""
Initialize the UserContext
:param Version version: Version that contains the resource
:param chat_service_sid: The SID of the Conversation Service to fetch the resource from
:param sid: The SID of the User resource to fetch
:returns: twilio.rest.conversations.v1.service.user.UserContext
:rtype: twilio.rest.conversations.v1.service.user.UserContext
"""
super(UserContext, self).__init__(version)
# Path Solution
self._solution = {'chat_service_sid': chat_service_sid, 'sid': sid, }
self._uri = '/Services/{chat_service_sid}/Users/{sid}'.format(**self._solution)
# Dependents
self._user_conversations = None
def update(self, friendly_name=values.unset, attributes=values.unset,
role_sid=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Update the UserInstance
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode attributes: The JSON Object string that stores application-specific data
:param unicode role_sid: The SID of a service-level Role to assign to the user
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The updated UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserInstance
"""
data = values.of({'FriendlyName': friendly_name, 'Attributes': attributes, 'RoleSid': role_sid, })
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
payload = self._version.update(method='POST', uri=self._uri, data=data, headers=headers, )
return UserInstance(
self._version,
payload,
chat_service_sid=self._solution['chat_service_sid'],
sid=self._solution['sid'],
)
def delete(self, x_twilio_webhook_enabled=values.unset):
"""
Deletes the UserInstance
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
return self._version.delete(method='DELETE', uri=self._uri, headers=headers, )
def fetch(self):
"""
Fetch the UserInstance
:returns: The fetched UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return UserInstance(
self._version,
payload,
chat_service_sid=self._solution['chat_service_sid'],
sid=self._solution['sid'],
)
@property
def user_conversations(self):
"""
Access the user_conversations
:returns: twilio.rest.conversations.v1.service.user.user_conversation.UserConversationList
:rtype: twilio.rest.conversations.v1.service.user.user_conversation.UserConversationList
"""
if self._user_conversations is None:
self._user_conversations = UserConversationList(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
user_sid=self._solution['sid'],
)
return self._user_conversations
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Conversations.V1.UserContext {}>'.format(context)
class UserInstance(InstanceResource):
class WebhookEnabledType(object):
TRUE = "true"
FALSE = "false"
def __init__(self, version, payload, chat_service_sid, sid=None):
"""
Initialize the UserInstance
:returns: twilio.rest.conversations.v1.service.user.UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserInstance
"""
super(UserInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'chat_service_sid': payload.get('chat_service_sid'),
'role_sid': payload.get('role_sid'),
'identity': payload.get('identity'),
'friendly_name': payload.get('friendly_name'),
'attributes': payload.get('attributes'),
'is_online': payload.get('is_online'),
'is_notifiable': payload.get('is_notifiable'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'chat_service_sid': chat_service_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UserContext for this UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserContext
"""
if self._context is None:
self._context = UserContext(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def chat_service_sid(self):
"""
:returns: The SID of the Conversation Service that the resource is associated with
:rtype: unicode
"""
return self._properties['chat_service_sid']
@property
def role_sid(self):
"""
:returns: The SID of a service-level Role assigned to the user
:rtype: unicode
"""
return self._properties['role_sid']
@property
def identity(self):
"""
:returns: The string that identifies the resource's User
:rtype: unicode
"""
return self._properties['identity']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def attributes(self):
"""
:returns: The JSON Object string that stores application-specific data
:rtype: unicode
"""
return self._properties['attributes']
@property
def is_online(self):
"""
:returns: Whether the User is actively connected to this Conversations Service and online
:rtype: bool
"""
return self._properties['is_online']
@property
def is_notifiable(self):
"""
:returns: Whether the User has a potentially valid Push Notification registration for this Conversations Service
:rtype: bool
"""
return self._properties['is_notifiable']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: An absolute URL for this user.
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
def update(self, friendly_name=values.unset, attributes=values.unset,
role_sid=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Update the UserInstance
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode attributes: The JSON Object string that stores application-specific data
:param unicode role_sid: The SID of a service-level Role to assign to the user
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The updated UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
attributes=attributes,
role_sid=role_sid,
x_twilio_webhook_enabled=x_twilio_webhook_enabled,
)
def delete(self, x_twilio_webhook_enabled=values.unset):
"""
Deletes the UserInstance
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete(x_twilio_webhook_enabled=x_twilio_webhook_enabled, )
def fetch(self):
"""
Fetch the UserInstance
:returns: The fetched UserInstance
:rtype: twilio.rest.conversations.v1.service.user.UserInstance
"""
return self._proxy.fetch()
@property
def user_conversations(self):
"""
Access the user_conversations
:returns: twilio.rest.conversations.v1.service.user.user_conversation.UserConversationList
:rtype: twilio.rest.conversations.v1.service.user.user_conversation.UserConversationList
"""
return self._proxy.user_conversations
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Conversations.V1.UserInstance {}>'.format(context)
|
|
from __future__ import absolute_import
import datetime
import pytz
from django.test import TestCase
from schedule.conf.settings import FIRST_DAY_OF_WEEK
from schedule.models import Event, Rule, Calendar
from schedule.periods import Period, Month, Day, Year, Week
from six.moves import range
from six.moves import zip
class TestPeriod(TestCase):
def setUp(self):
rule = Rule(frequency = "WEEKLY")
rule.save()
cal = Calendar(name="MyCal")
cal.save()
data = {
'title': 'Recent Event',
'start': datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
'end': datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
'end_recurring_period' : datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
'rule': rule,
'calendar': cal
}
recurring_event = Event(**data)
recurring_event.save()
self.period = Period(events=Event.objects.all(),
start = datetime.datetime(2008, 1, 4, 7, 0, tzinfo=pytz.utc),
end = datetime.datetime(2008, 1, 21, 7, 0, tzinfo=pytz.utc))
def test_get_occurrences(self):
occurrence_list = self.period.occurrences
self.assertEqual(["%s to %s" %(o.start, o.end) for o in occurrence_list],
['2008-01-05 08:00:00+00:00 to 2008-01-05 09:00:00+00:00',
'2008-01-12 08:00:00+00:00 to 2008-01-12 09:00:00+00:00',
'2008-01-19 08:00:00+00:00 to 2008-01-19 09:00:00+00:00'])
def test_get_occurrence_partials(self):
occurrence_dicts = self.period.get_occurrence_partials()
self.assertEqual(
[(occ_dict["class"],
occ_dict["occurrence"].start,
occ_dict["occurrence"].end)
for occ_dict in occurrence_dicts],
[
(1,
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc)),
(1,
datetime.datetime(2008, 1, 12, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 12, 9, 0, tzinfo=pytz.utc)),
(1,
datetime.datetime(2008, 1, 19, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 19, 9, 0, tzinfo=pytz.utc))
])
def test_has_occurrence(self):
self.assert_( self.period.has_occurrences() )
slot = self.period.get_time_slot( datetime.datetime(2008, 1, 4, 7, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 4, 7, 12, tzinfo=pytz.utc) )
self.failIf( slot.has_occurrences() )
class TestYear(TestCase):
def setUp(self):
self.year = Year(events=[], date=datetime.datetime(2008, 4, 1, tzinfo=pytz.utc))
def test_get_months(self):
months = self.year.get_months()
self.assertEqual([month.start for month in months],
[datetime.datetime(2008, i, 1, tzinfo=pytz.utc) for i in range(1,13)])
class TestMonth(TestCase):
def setUp(self):
rule = Rule(frequency = "WEEKLY")
rule.save()
cal = Calendar(name="MyCal")
cal.save()
data = {
'title': 'Recent Event',
'start': datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
'end': datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
'end_recurring_period' : datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
'rule': rule,
'calendar': cal
}
recurring_event = Event(**data)
recurring_event.save()
self.month = Month(events=Event.objects.all(),
date=datetime.datetime(2008, 2, 7, 9, 0, tzinfo=pytz.utc))
def test_get_weeks(self):
weeks = self.month.get_weeks()
actuals = [(week.start, week.end) for week in weeks]
if FIRST_DAY_OF_WEEK == 0:
expecteds = [
(datetime.datetime(2008, 1, 27, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 10, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2008, 2, 10, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 17, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2008, 2, 17, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 24, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2008, 2, 24, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 3, 2, 0, 0, tzinfo=pytz.utc))
]
else:
expecteds = [
(datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 4, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2008, 2, 4, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 11, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2008, 2, 11, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 18, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2008, 2, 18, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 25, 0, 0, tzinfo=pytz.utc)),
(datetime.datetime(2008, 2, 25, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 3, 3, 0, 0, tzinfo=pytz.utc))
]
for actual, expected in zip(actuals, expecteds):
self.assertEqual(actual, expected)
def test_get_days(self):
weeks = self.month.get_weeks()
week = list(weeks)[0]
days = week.get_days()
actuals = [(len(day.occurrences), day.start,day.end) for day in days]
if FIRST_DAY_OF_WEEK == 0:
expecteds = [
(
0,
datetime.datetime(2008, 1, 27, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc)
),
(
0,
datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc)
),
(
0,
datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc)
),
(
0,
datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc)
),
(
0,
datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc)
),
(
0,
datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc)
),
(
1,
datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc)
),
]
else:
expecteds = [
(0, datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc)),
(0, datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc)),
(0, datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc)),
(0, datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc)),
(0, datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc)),
(1, datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc)),
(0, datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 2, 4, 0, 0, tzinfo=pytz.utc))
]
for actual, expected in zip(actuals, expecteds):
self.assertEqual(actual, expected)
def test_month_convenience_functions(self):
self.assertEqual( self.month.prev_month().start, datetime.datetime(2008, 1, 1, 0, 0, tzinfo=pytz.utc))
self.assertEqual( self.month.next_month().start, datetime.datetime(2008, 3, 1, 0, 0, tzinfo=pytz.utc))
self.assertEqual( self.month.current_year().start, datetime.datetime(2008, 1, 1, 0, 0, tzinfo=pytz.utc))
self.assertEqual( self.month.prev_year().start, datetime.datetime(2007, 1, 1, 0, 0, tzinfo=pytz.utc))
self.assertEqual( self.month.next_year().start, datetime.datetime(2009, 1, 1, 0, 0, tzinfo=pytz.utc))
class TestDay(TestCase):
def setUp(self):
self.day = Day(events=Event.objects.all(),
date=datetime.datetime(2008, 2, 7, 9, 0, tzinfo=pytz.utc))
def test_day_setup(self):
self.assertEqual( self.day.start, datetime.datetime(2008, 2, 7, 0, 0, tzinfo=pytz.utc))
self.assertEqual( self.day.end, datetime.datetime(2008, 2, 8, 0, 0, tzinfo=pytz.utc))
def test_day_convenience_functions(self):
self.assertEqual( self.day.prev_day().start, datetime.datetime(2008, 2, 6, 0, 0, tzinfo=pytz.utc))
self.assertEqual( self.day.next_day().start, datetime.datetime(2008, 2, 8, 0, 0, tzinfo=pytz.utc))
def test_time_slot(self):
slot_start = datetime.datetime(2008, 2, 7, 13, 30, tzinfo=pytz.utc)
slot_end = datetime.datetime(2008, 2, 7, 15, 0, tzinfo=pytz.utc)
period = self.day.get_time_slot( slot_start, slot_end )
self.assertEqual( period.start, slot_start )
self.assertEqual( period.end, slot_end )
class TestOccurrencePool(TestCase):
def setUp(self):
rule = Rule(frequency = "WEEKLY")
rule.save()
cal = Calendar(name="MyCal")
cal.save()
data = {
'title': 'Recent Event',
'start': datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
'end': datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
'end_recurring_period' : datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
'rule': rule,
'calendar': cal
}
self.recurring_event = Event(**data)
self.recurring_event.save()
def testPeriodFromPool(self):
"""
Test that period initiated with occurrence_pool returns the same occurrences as "straigh" period
in a corner case whereby a period's start date is equal to the occurrence's end date
"""
start = datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc)
end = datetime.datetime(2008, 1, 5, 10, 0, tzinfo=pytz.utc)
parent_period = Period(Event.objects.all(), start, end)
period = Period(parent_period.events, start, end, parent_period.get_persisted_occurrences(), parent_period.occurrences)
self.assertEquals(parent_period.occurrences, period.occurrences)
class TestAwareDay(TestCase):
def setUp(self):
self.timezone = pytz.timezone('Europe/Amsterdam')
start = self.timezone.localize(datetime.datetime(2008, 2, 7, 0, 20))
end = self.timezone.localize(datetime.datetime(2008, 2, 7, 0, 21))
self.event = Event(
title='One minute long event on january seventh 2008 at 00:20 in Amsterdam.',
start=start,
end=end,
)
self.event.save()
self.day = Day(
events=Event.objects.all(),
date=self.timezone.localize(datetime.datetime(2008, 2, 7, 9, 0)),
tzinfo=self.timezone,
)
def test_day_range(self):
start = datetime.datetime(2008, 2, 6, 23, 0, tzinfo=pytz.utc)
end = datetime.datetime(2008, 2, 7, 23, 0, tzinfo=pytz.utc)
self.assertEqual(start, self.day.start)
self.assertEqual(end, self.day.end)
def test_occurence(self):
self.assertEqual(self.event in [o.event for o in self.day.occurrences], True)
class TestTzInfoPersistence(TestCase):
def setUp(self):
self.timezone = pytz.timezone('Europe/Amsterdam')
self.day = Day(
events=Event.objects.all(),
date=self.timezone.localize(datetime.datetime(2013, 12, 17, 9, 0)),
tzinfo=self.timezone
)
self.week = Week(
events=Event.objects.all(),
date=self.timezone.localize(datetime.datetime(2013, 12, 17, 9, 0)),
tzinfo=self.timezone,
)
self.month = Month(
events=Event.objects.all(),
date=self.timezone.localize(datetime.datetime(2013, 12, 17, 9, 0)),
tzinfo=self.timezone,
)
self.year = Year(
events=Event.objects.all(),
date=self.timezone.localize(datetime.datetime(2013, 12, 17, 9, 0)),
tzinfo=self.timezone,
)
def test_persistence(self):
self.assertEqual(self.day.tzinfo, self.timezone)
self.assertEqual(self.week.tzinfo, self.timezone)
self.assertEqual(self.month.tzinfo, self.timezone)
self.assertEqual(self.year.tzinfo, self.timezone)
class TestAwareWeek(TestCase):
def setUp(self):
self.timezone = pytz.timezone('Europe/Amsterdam')
self.week = Week(
events=Event.objects.all(),
date=self.timezone.localize(datetime.datetime(2013, 12, 17, 9, 0)),
tzinfo=self.timezone,
)
def test_week_range(self):
start = self.timezone.localize(datetime.datetime(2013, 12, 15, 0, 0))
end = self.timezone.localize(datetime.datetime(2013, 12, 22, 0, 0))
self.assertEqual(self.week.tzinfo, self.timezone)
self.assertEqual(start, self.week.start)
self.assertEqual(end, self.week.end)
class TestAwareMonth(TestCase):
def setUp(self):
self.timezone = pytz.timezone('Europe/Amsterdam')
self.month = Month(
events=Event.objects.all(),
date=self.timezone.localize(datetime.datetime(2013, 11, 17, 9, 0)),
tzinfo=self.timezone,
)
def test_month_range(self):
start = self.timezone.localize(datetime.datetime(2013, 11, 1, 0, 0))
end = self.timezone.localize(datetime.datetime(2013, 12, 1, 0, 0))
self.assertEqual(self.month.tzinfo, self.timezone)
self.assertEqual(start, self.month.start)
self.assertEqual(end, self.month.end)
class TestAwareYear(TestCase):
def setUp(self):
self.timezone = pytz.timezone('Europe/Amsterdam')
self.year = Year(
events=Event.objects.all(),
date=self.timezone.localize(datetime.datetime(2013, 12, 17, 9, 0)),
tzinfo=self.timezone,
)
def test_year_range(self):
start = self.timezone.localize(datetime.datetime(2013, 1, 1, 0, 0))
end = self.timezone.localize(datetime.datetime(2014, 1, 1, 0, 0))
self.assertEqual(self.year.tzinfo, self.timezone)
self.assertEqual(start, self.year.start)
self.assertEqual(end, self.year.end)
class TestStrftimeRefactor(TestCase):
"""
Test for the refactor of strftime
"""
def test_years_before_1900(self):
d = datetime.date(year=1899, month=1, day=1)
m = Month([], d)
try:
m.name()
except ValueError as value_error:
self.fail(value_error)
|
|
import discord_logging
from sqlalchemy.sql import func
from classes.subscription import Subscription
from classes.subreddit import Subreddit
from classes.user import User
from classes.notification import Notification
log = discord_logging.get_logger()
class _DatabaseSubscriptions:
def __init__(self):
self.session = self.session # for pycharm linting
self.log_debug = self.log_debug
def add_subscription(self, subscription):
if self.log_debug:
log.debug("Saving new subscription")
self.session.add(subscription)
def get_subscription_by_fields(self, subscriber, author, subreddit, tag=None):
if self.log_debug:
log.debug(
f"Fetching subscription by fields: {subscriber.name} : {author.name if author is not None else '-all'} "
f": {subreddit.name}: {tag}")
subscription = self.session.query(Subscription)\
.filter(Subscription.subscriber == subscriber)\
.filter(Subscription.author == author)\
.filter(Subscription.subreddit == subreddit)\
.filter(Subscription.tag == tag)\
.first()
return subscription
def get_count_tagged_subscriptions_by_fields(self, subscriber, author, subreddit):
if self.log_debug:
log.debug(
f"Fetching count of tagged subscription by fields: {subscriber.name} : "
f"{author.name if author is not None else 'None'} : {subreddit.name}")
count_subscriptions = self.session.query(Subscription)\
.filter(Subscription.subscriber == subscriber)\
.filter(Subscription.author == author)\
.filter(Subscription.subreddit == subreddit)\
.filter(Subscription.tag != None)\
.count()
return count_subscriptions
def get_count_subscriptions_for_author_subreddit(self, author, subreddit, tag=None):
if self.log_debug:
log.debug(f"Fetching count subscriptions for author and subreddit: {author.name} : {subreddit.name}: {tag}")
count_subscriptions = self.session.query(Subscription)\
.filter(Subscription.author == author)\
.filter(Subscription.subreddit == subreddit)\
.filter(Subscription.tag == tag)\
.count()
return count_subscriptions
def get_count_subscriptions_for_subreddit(self, subreddit):
if self.log_debug:
log.debug(f"Fetching count subscriptions for subreddit: {subreddit.name}")
count_subscriptions = self.session.query(Subscription)\
.filter(Subscription.subreddit == subreddit)\
.count()
return count_subscriptions
def get_subscriptions_for_author_subreddit(self, author, subreddit, tag=None):
if self.log_debug:
log.debug(f"Fetching subscriptions by author and subreddit: {author.name} : {subreddit.name} : {tag}")
subscriptions = self.session.query(Subscription)\
.join(
Notification,
(Subscription.recurring == False) & (Notification.subscription_id == Subscription.id),
isouter=True) \
.filter((Subscription.author == author) | (Subscription.author == None))\
.filter(Subscription.subreddit == subreddit)\
.filter((Subscription.tag == None) | (Subscription.tag == tag))\
.filter(Notification.id == None) \
.all()
return sorted(
subscriptions,
key=lambda subscription: (
subscription.subscriber.name == (subscription.author.name if subscription.author is not None else ""),
subscription.subscriber.id
)
)
def get_user_subscriptions_by_name(self, user_name, only_enabled=True):
user = self.session.query(User).filter_by(name=user_name).first()
if user is None:
return []
else:
return self.get_user_subscriptions(user, only_enabled)
def get_user_subscriptions(self, user, only_enabled=True):
if self.log_debug:
log.debug(f"Fetching user subscriptions u/{user.name}")
if only_enabled:
subscriptions = self.session.query(Subscription)\
.join(Subreddit)\
.filter(Subreddit.is_enabled == True)\
.filter(Subscription.subscriber == user)\
.all()
else:
subscriptions = self.session.query(Subscription)\
.filter(Subscription.subscriber == user)\
.all()
return sorted(
subscriptions,
key=lambda subscription: (
subscription.subreddit.name,
subscription.author.name if subscription.author is not None else None,
subscription.tag
)
)
def get_count_subscriptions_for_author(self, user):
if self.log_debug:
log.debug(f"Getting count subscriptions for u/{user}")
return self.session.query(Subscription)\
.filter(Subscription.author == user)\
.count()
def delete_user_subscriptions(self, user):
if self.log_debug:
log.debug(f"Deleting all subscriptions for u/{user.name}")
user_subscriptions = self.session.query(Subscription)\
.filter(Subscription.subscriber == user)\
.all()
count_deleted = len(user_subscriptions)
for subscription in user_subscriptions:
self.delete_subscription(subscription)
return count_deleted
def delete_tagged_subreddit_author_subscriptions(self, subscriber, author, subreddit):
if self.log_debug:
log.debug(f"Deleting all tagged subscriptions for u/{subscriber.name} : {subscriber.name} : {author.name} : {subreddit.name}")
user_subscriptions = self.session.query(Subscription)\
.filter(Subscription.subscriber == subscriber)\
.filter(Subscription.author == author)\
.filter(Subscription.subreddit == subreddit)\
.filter(Subscription.tag != None)\
.all()
count_deleted = len(user_subscriptions)
for subscription in user_subscriptions:
self.delete_subscription(subscription)
return count_deleted
def delete_subscription(self, subscription):
if self.log_debug:
log.debug(f"Deleting subscription by id: {subscription.id}")
self.session.query(Notification) \
.filter(Notification.subscription == subscription) \
.delete(synchronize_session='fetch')
self.session.delete(subscription)
def get_all_subscriptions(self):
if self.log_debug:
log.debug("Fetching all author subreddit subscriptions")
subscriptions = self.session.query(Subscription)\
.all()
return subscriptions
def delete_author_subscriptions(self, author):
if self.log_debug:
log.debug(f"Deleting all subscriptions to u/{author.name}")
author_subscriptions = self.session.query(Subscription)\
.filter(Subscription.author == author)\
.all()
count_deleted = len(author_subscriptions)
for subscription in author_subscriptions:
self.delete_subscription(subscription)
return count_deleted
def get_count_all_subscriptions(self):
if self.log_debug:
log.debug("Fetching count of all subscriptions")
count = self.session.query(Subscription).count()
if self.log_debug:
log.debug(f"Count subscriptions: {count}")
return count
|
|
"""
This module defines the mpf, mpc classes, and standard functions for
operating with them.
"""
__docformat__ = 'plaintext'
import re
from string import strip
from ctx_base import StandardBaseContext
import libmp
from libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
ComplexResult, to_pickable, from_pickable, normalize,
from_int, from_float, from_str, to_int, to_float, to_str,
from_rational, from_man_exp,
fone, fzero, finf, fninf, fnan,
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
mpf_hash, mpf_rand,
mpf_sum,
bitcount, to_fixed,
mpc_to_str,
mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
mpc_mpf_div,
mpf_pow,
mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
mpf_glaisher, mpf_twinprime, mpf_mertens,
int_types)
import function_docs
import rational
new = object.__new__
get_complex = re.compile(r'^\(?(?P<re>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?)??'
r'(?P<im>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?j)?\)?$')
try:
from sage.libs.mpmath.ext_main import Context as BaseMPContext
# pickle hack
import sage.libs.mpmath.ext_main as _mpf_module
except ImportError:
from ctx_mp_python import PythonMPContext as BaseMPContext
import ctx_mp_python as _mpf_module
from ctx_mp_python import _mpf, _mpc, mpnumeric
class MPContext(BaseMPContext, StandardBaseContext):
"""
Context for multiprecision arithmetic with a global precision.
"""
def __init__(ctx):
BaseMPContext.__init__(ctx)
ctx.trap_complex = False
ctx.pretty = False
ctx.types = [ctx.mpf, ctx.mpc, ctx.constant]
ctx._mpq = rational.mpq
ctx.default()
StandardBaseContext.__init__(ctx)
ctx.mpq = rational.mpq
ctx.init_builtins()
ctx.hyp_summators = {}
ctx._init_aliases()
# XXX: automate
ctx.bernoulli.im_func.func_doc = function_docs.bernoulli
ctx.primepi.im_func.func_doc = function_docs.primepi
ctx.psi.im_func.func_doc = function_docs.psi
ctx.atan2.im_func.func_doc = function_docs.atan2
ctx.digamma.func_doc = function_docs.digamma
ctx.cospi.func_doc = function_docs.cospi
ctx.sinpi.func_doc = function_docs.sinpi
def init_builtins(ctx):
mpf = ctx.mpf
mpc = ctx.mpc
# Exact constants
ctx.one = ctx.make_mpf(fone)
ctx.zero = ctx.make_mpf(fzero)
ctx.j = ctx.make_mpc((fzero,fone))
ctx.inf = ctx.make_mpf(finf)
ctx.ninf = ctx.make_mpf(fninf)
ctx.nan = ctx.make_mpf(fnan)
eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1),
"epsilon of working precision", "eps")
ctx.eps = eps
# Approximate constants
ctx.pi = ctx.constant(mpf_pi, "pi", "pi")
ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2")
ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10")
ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi")
ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e")
ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler")
ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan")
ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin")
ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher")
ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery")
ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree")
ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime")
ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens")
# Standard functions
ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt)
ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt)
ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log)
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp)
ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj)
ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi)
ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin)
ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos)
ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan)
ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh)
ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh)
ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh)
ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin)
ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos)
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh)
ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh)
ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh)
ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi)
ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi)
ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor)
ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil)
ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint)
ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac)
ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci)
ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma)
ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma)
ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma)
ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial)
ctx.gamma_old = ctx._wrap_libmp_function(libmp.mpf_gamma_old, libmp.mpc_gamma_old)
ctx.fac_old = ctx.factorial_old = ctx._wrap_libmp_function(libmp.mpf_factorial_old, libmp.mpc_factorial_old)
ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0)
ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic)
ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei)
ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1)
ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci)
ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si)
ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk)
ctx.ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe)
ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1)
ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None)
ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None)
ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta)
ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta)
def to_fixed(ctx, x, prec):
return x.to_fixed(prec)
def hypot(ctx, x, y):
r"""
Computes the Euclidean norm of the vector `(x, y)`, equal
to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real."""
x = ctx.convert(x)
y = ctx.convert(y)
return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding))
def _gamma_upper_int(ctx, n, z):
n = int(ctx._re(n))
if n == 0:
return ctx.e1(z)
if not hasattr(z, '_mpf_'):
raise NotImplementedError
prec, rounding = ctx._prec_rounding
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True)
if imag is None:
return ctx.make_mpf(real)
else:
return ctx.make_mpc((real, imag))
def _expint_int(ctx, n, z):
n = int(n)
if n == 1:
return ctx.e1(z)
if not hasattr(z, '_mpf_'):
raise NotImplementedError
prec, rounding = ctx._prec_rounding
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding)
if imag is None:
return ctx.make_mpf(real)
else:
return ctx.make_mpc((real, imag))
def _nthroot(ctx, x, n):
if hasattr(x, '_mpf_'):
try:
return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding))
except ComplexResult:
if ctx.trap_complex:
raise
x = (x._mpf_, libmp.fzero)
else:
x = x._mpc_
return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding))
def _besselj(ctx, n, z):
prec, rounding = ctx._prec_rounding
if hasattr(z, '_mpf_'):
return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding))
elif hasattr(z, '_mpc_'):
return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding))
def _agm(ctx, a, b=1):
prec, rounding = ctx._prec_rounding
if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'):
try:
v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding)
return ctx.make_mpf(v)
except ComplexResult:
pass
if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero)
else: a = a._mpc_
if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero)
else: b = b._mpc_
return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding))
def bernoulli(ctx, n):
return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding))
def _zeta_int(ctx, n):
return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding))
def atan2(ctx, y, x):
x = ctx.convert(x)
y = ctx.convert(y)
return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding))
def psi(ctx, m, z):
z = ctx.convert(z)
m = int(m)
if ctx._is_real_type(z):
return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding))
else:
return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding))
def cos_sin(ctx, x, **kwargs):
if type(x) not in ctx.types:
x = ctx.convert(x)
prec, rounding = ctx._parse_prec(kwargs)
if hasattr(x, '_mpf_'):
c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding)
return ctx.make_mpf(c), ctx.make_mpf(s)
elif hasattr(x, '_mpc_'):
c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding)
return ctx.make_mpc(c), ctx.make_mpc(s)
else:
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
def cospi_sinpi(ctx, x, **kwargs):
if type(x) not in ctx.types:
x = ctx.convert(x)
prec, rounding = ctx._parse_prec(kwargs)
if hasattr(x, '_mpf_'):
c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding)
return ctx.make_mpf(c), ctx.make_mpf(s)
elif hasattr(x, '_mpc_'):
c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding)
return ctx.make_mpc(c), ctx.make_mpc(s)
else:
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
def clone(ctx):
"""
Create a copy of the context, with the same working precision.
"""
a = ctx.__class__()
a.prec = ctx.prec
return a
# Several helper methods
# TODO: add more of these, make consistent, write docstrings, ...
def _is_real_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return False
return True
def _is_complex_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return True
return False
def isnpint(ctx, x):
if not x:
return True
if hasattr(x, '_mpf_'):
sign, man, exp, bc = x._mpf_
return sign and exp >= 0
if hasattr(x, '_mpc_'):
return not x.imag and ctx.isnpint(x.real)
if type(x) in int_types:
return x <= 0
if isinstance(x, ctx.mpq):
# XXX: WRONG
p, q = x._mpq_
if not p:
return True
return (not (q % p)) and p <= 0
return ctx.isnpint(ctx.convert(x))
def __str__(ctx):
lines = ["Mpmath settings:",
(" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]",
(" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]",
(" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]",
]
return "\n".join(lines)
@property
def _repr_digits(ctx):
return repr_dps(ctx._prec)
@property
def _str_digits(ctx):
return ctx._dps
def extraprec(ctx, n, normalize_output=False):
"""
The block
with extraprec(n):
<code>
increases the precision n bits, executes <code>, and then
restores the precision.
extraprec(n)(f) returns a decorated version of the function f
that increases the working precision by n bits before execution,
and restores the parent precision afterwards. With
normalize_output=True, it rounds the return value to the parent
precision.
"""
return PrecisionManager(ctx, lambda p: p + n, None, normalize_output)
def extradps(ctx, n, normalize_output=False):
"""
This function is analogous to extraprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: d + n, normalize_output)
def workprec(ctx, n, normalize_output=False):
"""
The block
with workprec(n):
<code>
sets the precision to n bits, executes <code>, and then restores
the precision.
workprec(n)(f) returns a decorated version of the function f
that sets the precision to n bits before execution,
and restores the precision afterwards. With normalize_output=True,
it rounds the return value to the parent precision.
"""
return PrecisionManager(ctx, lambda p: n, None, normalize_output)
def workdps(ctx, n, normalize_output=False):
"""
This function is analogous to workprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: n, normalize_output)
def autoprec(ctx, f, maxprec=None, catch=(), verbose=False):
"""
Return a wrapped copy of *f* that repeatedly evaluates *f*
with increasing precision until the result converges to the
full precision used at the point of the call.
This heuristically protects against rounding errors, at the cost of
roughly a 2x slowdown compared to manually setting the optimal
precision. This method can, however, easily be fooled if the results
from *f* depend "discontinuously" on the precision, for instance
if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec`
should be used judiciously.
**Examples**
Many functions are sensitive to perturbations of the input arguments.
If the arguments are decimal numbers, they may have to be converted
to binary at a much higher precision. If the amount of required
extra precision is unknown, :func:`~mpmath.autoprec` is convenient::
>>> from mpmath import *
>>> mp.dps = 15
>>> mp.pretty = True
>>> besselj(5, 125 * 10**28) # Exact input
-8.03284785591801e-17
>>> besselj(5, '1.25e30') # Bad
7.12954868316652e-16
>>> autoprec(besselj)(5, '1.25e30') # Good
-8.03284785591801e-17
The following fails to converge because `\sin(\pi) = 0` whereas all
finite-precision approximations of `\pi` give nonzero values::
>>> autoprec(sin)(pi)
Traceback (most recent call last):
...
NoConvergence: autoprec: prec increased to 2910 without convergence
As the following example shows, :func:`~mpmath.autoprec` can protect against
cancellation, but is fooled by too severe cancellation::
>>> x = 1e-10
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
1.00000008274037e-10
1.00000000005e-10
1.00000000005e-10
>>> x = 1e-50
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
0.0
1.0e-50
0.0
With *catch*, an exception or list of exceptions to intercept
may be specified. The raised exception is interpreted
as signaling insufficient precision. This permits, for example,
evaluating a function where a too low precision results in a
division by zero::
>>> f = lambda x: 1/(exp(x)-1)
>>> f(1e-30)
Traceback (most recent call last):
...
ZeroDivisionError
>>> autoprec(f, catch=ZeroDivisionError)(1e-30)
1.0e+30
"""
def f_autoprec_wrapped(*args, **kwargs):
prec = ctx.prec
if maxprec is None:
maxprec2 = ctx._default_hyper_maxprec(prec)
else:
maxprec2 = maxprec
try:
ctx.prec = prec + 10
try:
v1 = f(*args, **kwargs)
except catch:
v1 = ctx.nan
prec2 = prec + 20
while 1:
ctx.prec = prec2
try:
v2 = f(*args, **kwargs)
except catch:
v2 = ctx.nan
if v1 == v2:
break
err = ctx.mag(v2-v1) - ctx.mag(v2)
if err < (-prec):
break
if verbose:
print "autoprec: target=%s, prec=%s, accuracy=%s" \
% (prec, prec2, -err)
v1 = v2
if prec2 >= maxprec2:
raise ctx.NoConvergence(\
"autoprec: prec increased to %i without convergence"\
% prec2)
prec2 += int(prec2*2)
prec2 = min(prec2, maxprec2)
finally:
ctx.prec = prec
return +v2
return f_autoprec_wrapped
def nstr(ctx, x, n=6, **kwargs):
"""
Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n*
significant digits. The small default value for *n* is chosen to
make this function useful for printing collections of numbers
(lists, matrices, etc).
If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively
to each element. For unrecognized classes, :func:`~mpmath.nstr`
simply returns ``str(x)``.
The companion function :func:`~mpmath.nprint` prints the result
instead of returning it.
>>> from mpmath import *
>>> nstr([+pi, ldexp(1,-500)])
'[3.14159, 3.05494e-151]'
>>> nprint([+pi, ldexp(1,-500)])
[3.14159, 3.05494e-151]
"""
if isinstance(x, list):
return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
if isinstance(x, tuple):
return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
if hasattr(x, '_mpf_'):
return to_str(x._mpf_, n, **kwargs)
if hasattr(x, '_mpc_'):
return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")"
if isinstance(x, basestring):
return repr(x)
if isinstance(x, ctx.matrix):
return x.__nstr__(n, **kwargs)
return str(x)
def _convert_fallback(ctx, x, strings):
if strings and isinstance(x, basestring):
if 'j' in x.lower():
x = x.lower().replace(' ', '')
match = get_complex.match(x)
re = match.group('re')
if not re:
re = 0
im = match.group('im').rstrip('j')
return ctx.mpc(ctx.convert(re), ctx.convert(im))
if hasattr(x, "_mpi_"):
a, b = x._mpi_
if a == b:
return ctx.make_mpf(a)
else:
raise ValueError("can only create mpf from zero-width interval")
raise TypeError("cannot create mpf from " + repr(x))
def mpmathify(ctx, *args, **kwargs):
return ctx.convert(*args, **kwargs)
def _parse_prec(ctx, kwargs):
if kwargs:
if kwargs.get('exact'):
return 0, 'f'
prec, rounding = ctx._prec_rounding
if 'rounding' in kwargs:
rounding = kwargs['rounding']
if 'prec' in kwargs:
prec = kwargs['prec']
if prec == ctx.inf:
return 0, 'f'
else:
prec = int(prec)
elif 'dps' in kwargs:
dps = kwargs['dps']
if dps == ctx.inf:
return 0, 'f'
prec = dps_to_prec(dps)
return prec, rounding
return ctx._prec_rounding
_exact_overflow_msg = "the exact result does not fit in memory"
_hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy
using a working precision of %i bits. Try with a higher maxprec,
maxterms, or set zeroprec."""
def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs):
if hasattr(z, "_mpf_"):
key = p, q, flags, 'R'
v = z._mpf_
elif hasattr(z, "_mpc_"):
key = p, q, flags, 'C'
v = z._mpc_
if key not in ctx.hyp_summators:
ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1]
summator = ctx.hyp_summators[key]
prec = ctx.prec
maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec))
extraprec = 50
epsshift = 25
# Jumps in magnitude occur when parameters are close to negative
# integers. We must ensure that these terms are included in
# the sum and added accurately
magnitude_check = {}
max_total_jump = 0
for i, c in enumerate(coeffs):
if flags[i] == 'Z':
if i >= p and c <= 0:
ok = False
for ii, cc in enumerate(coeffs[:p]):
# Note: c <= cc or c < cc, depending on convention
if flags[ii] == 'Z' and cc <= 0 and c <= cc:
ok = True
if not ok:
raise ZeroDivisionError("pole in hypergeometric series")
continue
n, d = ctx.nint_distance(c)
n = -int(n)
d = -d
if i >= p and n >= 0 and d > 4:
if n in magnitude_check:
magnitude_check[n] += d
else:
magnitude_check[n] = d
extraprec = max(extraprec, d - prec + 60)
max_total_jump += abs(d)
while 1:
if extraprec > maxprec:
raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec))
wp = prec + extraprec
if magnitude_check:
mag_dict = dict((n,None) for n in magnitude_check)
else:
mag_dict = {}
zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \
epsshift, mag_dict, **kwargs)
cancel = -magnitude
jumps_resolved = True
if extraprec < max_total_jump:
for n in mag_dict.values():
if (n is None) or (n < prec):
jumps_resolved = False
break
accurate = (cancel < extraprec-25-5 or not accurate_small)
if jumps_resolved:
if accurate:
break
# zero?
zeroprec = kwargs.get('zeroprec')
if zeroprec is not None:
if cancel > zeroprec:
if have_complex:
return ctx.mpc(0)
else:
return ctx.zero
# Some near-singularities were not included, so increase
# precision and repeat until they are
extraprec *= 2
# Possible workaround for bad roundoff in fixed-point arithmetic
epsshift += 5
extraprec += 5
if have_complex:
z = ctx.make_mpc(zv)
else:
z = ctx.make_mpf(zv)
return z
def ldexp(ctx, x, n):
r"""
Computes `x 2^n` efficiently. No rounding is performed.
The argument `x` must be a real floating-point number (or
possible to convert into one) and `n` must be a Python ``int``.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> ldexp(1, 10)
mpf('1024.0')
>>> ldexp(1, -3)
mpf('0.125')
"""
x = ctx.convert(x)
return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n))
def frexp(ctx, x):
r"""
Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`,
`n` a Python integer, and such that `x = y 2^n`. No rounding is
performed.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> frexp(7.5)
(mpf('0.9375'), 3)
"""
x = ctx.convert(x)
y, n = libmp.mpf_frexp(x._mpf_)
return ctx.make_mpf(y), n
def fneg(ctx, x, **kwargs):
"""
Negates the number *x*, giving a floating-point result, optionally
using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
An mpmath number is returned::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fneg(2.5)
mpf('-2.5')
>>> fneg(-5+2j)
mpc(real='5.0', imag='-2.0')
Precise control over rounding is possible::
>>> x = fadd(2, 1e-100, exact=True)
>>> fneg(x)
mpf('-2.0')
>>> fneg(x, rounding='f')
mpf('-2.0000000000000004')
Negating with and without roundoff::
>>> n = 200000000000000000000001
>>> print int(-mpf(n))
-200000000000000016777216
>>> print int(fneg(n))
-200000000000000016777216
>>> print int(fneg(n, prec=log(n,2)+1))
-200000000000000000000001
>>> print int(fneg(n, dps=log(n,10)+1))
-200000000000000000000001
>>> print int(fneg(n, prec=inf))
-200000000000000000000001
>>> print int(fneg(n, dps=inf))
-200000000000000000000001
>>> print int(fneg(n, exact=True))
-200000000000000000000001
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
if hasattr(x, '_mpf_'):
return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding))
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fadd(ctx, x, y, **kwargs):
"""
Adds the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
The default precision is the working precision of the context.
You can specify a custom precision in bits by passing the *prec* keyword
argument, or by providing an equivalent decimal precision with the *dps*
keyword argument. If the precision is set to ``+inf``, or if the flag
*exact=True* is passed, an exact addition with no rounding is performed.
When the precision is finite, the optional *rounding* keyword argument
specifies the direction of rounding. Valid options are ``'n'`` for
nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'``
for down, ``'u'`` for up.
**Examples**
Using :func:`~mpmath.fadd` with precision and rounding control::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fadd(2, 1e-20)
mpf('2.0')
>>> fadd(2, 1e-20, rounding='u')
mpf('2.0000000000000004')
>>> nprint(fadd(2, 1e-20, prec=100), 25)
2.00000000000000000001
>>> nprint(fadd(2, 1e-20, dps=15), 25)
2.0
>>> nprint(fadd(2, 1e-20, dps=25), 25)
2.00000000000000000001
>>> nprint(fadd(2, 1e-20, exact=True), 25)
2.00000000000000000001
Exact addition avoids cancellation errors, enforcing familiar laws
of numbers such as `x+y-x = y`, which don't hold in floating-point
arithmetic with finite precision::
>>> x, y = mpf(2), mpf('1e-1000')
>>> print x + y - x
0.0
>>> print fadd(x, y, prec=inf) - x
1.0e-1000
>>> print fadd(x, y, exact=True) - x
1.0e-1000
Exact addition can be inefficient and may be impossible to perform
with large magnitude differences::
>>> fadd(1, '1e-100000000000000000000', prec=inf)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fsub(ctx, x, y, **kwargs):
"""
Subtracts the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
Using :func:`~mpmath.fsub` with precision and rounding control::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fsub(2, 1e-20)
mpf('2.0')
>>> fsub(2, 1e-20, rounding='d')
mpf('1.9999999999999998')
>>> nprint(fsub(2, 1e-20, prec=100), 25)
1.99999999999999999999
>>> nprint(fsub(2, 1e-20, dps=15), 25)
2.0
>>> nprint(fsub(2, 1e-20, dps=25), 25)
1.99999999999999999999
>>> nprint(fsub(2, 1e-20, exact=True), 25)
1.99999999999999999999
Exact subtraction avoids cancellation errors, enforcing familiar laws
of numbers such as `x-y+y = x`, which don't hold in floating-point
arithmetic with finite precision::
>>> x, y = mpf(2), mpf('1e1000')
>>> print x - y + y
0.0
>>> print fsub(x, y, prec=inf) + y
2.0
>>> print fsub(x, y, exact=True) + y
2.0
Exact addition can be inefficient and may be impossible to perform
with large magnitude differences::
>>> fsub(1, '1e-100000000000000000000', prec=inf)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fmul(ctx, x, y, **kwargs):
"""
Multiplies the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
The result is an mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fmul(2, 5.0)
mpf('10.0')
>>> fmul(0.5j, 0.5)
mpc(real='0.0', imag='0.25')
Avoiding roundoff::
>>> x, y = 10**10+1, 10**15+1
>>> print x*y
10000000001000010000000001
>>> print mpf(x) * mpf(y)
1.0000000001e+25
>>> print int(mpf(x) * mpf(y))
10000000001000011026399232
>>> print int(fmul(x, y))
10000000001000011026399232
>>> print int(fmul(x, y, dps=25))
10000000001000010000000001
>>> print int(fmul(x, y, exact=True))
10000000001000010000000001
Exact multiplication with complex numbers can be inefficient and may
be impossible to perform with large magnitude differences between
real and imaginary parts::
>>> x = 1+2j
>>> y = mpc(2, '1e-100000000000000000000')
>>> fmul(x, y)
mpc(real='2.0', imag='4.0')
>>> fmul(x, y, rounding='u')
mpc(real='2.0', imag='4.0000000000000009')
>>> fmul(x, y, exact=True)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fdiv(ctx, x, y, **kwargs):
"""
Divides the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
The result is an mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fdiv(3, 2)
mpf('1.5')
>>> fdiv(2, 3)
mpf('0.66666666666666663')
>>> fdiv(2+4j, 0.5)
mpc(real='4.0', imag='8.0')
The rounding direction and precision can be controlled::
>>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits
mpf('0.6666259765625')
>>> fdiv(2, 3, rounding='d')
mpf('0.66666666666666663')
>>> fdiv(2, 3, prec=60)
mpf('0.66666666666666667')
>>> fdiv(2, 3, rounding='u')
mpf('0.66666666666666674')
Checking the error of a division by performing it at higher precision::
>>> fdiv(2, 3) - fdiv(2, 3, prec=100)
mpf('-3.7007434154172148e-17')
Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not
allowed since the quotient of two floating-point numbers generally
does not have an exact floating-point representation. (In the
future this might be changed to allow the case where the division
is actually exact.)
>>> fdiv(2, 3, exact=True)
Traceback (most recent call last):
...
ValueError: division is not an exact operation
"""
prec, rounding = ctx._parse_prec(kwargs)
if not prec:
raise ValueError("division is not an exact operation")
x = ctx.convert(x)
y = ctx.convert(y)
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding))
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def nint_distance(ctx, x):
r"""
Return `(n,d)` where `n` is the nearest integer to `x` and `d` is
an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision
(measured in bits) lost to cancellation when computing `x-n`.
>>> from mpmath import *
>>> n, d = nint_distance(5)
>>> print n, d
5 -inf
>>> n, d = nint_distance(mpf(5))
>>> print n, d
5 -inf
>>> n, d = nint_distance(mpf(5.00000001))
>>> print n, d
5 -26
>>> n, d = nint_distance(mpf(4.99999999))
>>> print n, d
5 -26
>>> n, d = nint_distance(mpc(5,10))
>>> print n, d
5 4
>>> n, d = nint_distance(mpc(5,0.000001))
>>> print n, d
5 -19
"""
if hasattr(x, "_mpf_"):
re = x._mpf_
im_dist = ctx.ninf
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
isign, iman, iexp, ibc = im
if iman:
im_dist = iexp + ibc
elif im == fzero:
im_dist = ctx.ninf
else:
raise ValueError("requires a finite number")
elif isinstance(x, int_types):
return int(x), ctx.ninf
elif isinstance(x, rational.mpq):
p, q = x._mpq_
n, r = divmod(p, q)
if 2*r >= q:
n += 1
elif not r:
return n, ctx.ninf
# log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q)
d = bitcount(abs(p-n*q)) - bitcount(q)
return n, d
else:
x = ctx.convert(x)
if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
return ctx.nint_distance(x)
else:
raise TypeError("requires an mpf/mpc")
sign, man, exp, bc = re
shift = exp+bc
if sign:
man = -man
if shift < -1:
n = 0
re_dist = shift
elif man:
if exp >= 0:
n = man << exp
re_dist = ctx.ninf
else:
if shift >= 0:
xfixed = man << shift
else:
xfixed = man >> (-shift)
n1 = xfixed >> bc
n2 = -((-xfixed) >> bc)
dist1 = abs(xfixed - (n1<<bc))
dist2 = abs(xfixed - (n2<<bc))
if dist1 < dist2:
re_dist = dist1
n = n1
else:
re_dist = dist2
n = n2
if re_dist:
re_dist = bitcount(re_dist) - bc
else:
re_dist = ctx.ninf
elif re == fzero:
re_dist = ctx.ninf
n = 0
else:
raise ValueError("requires a finite number")
return n, max(re_dist, im_dist)
def fprod(ctx, factors):
r"""
Calculates a product containing a finite number of factors (for
infinite products, see :func:`~mpmath.nprod`). The factors will be
converted to mpmath numbers.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fprod([1, 2, 0.5, 7])
mpf('7.0')
"""
orig = ctx.prec
try:
v = ctx.one
for p in factors:
v *= p
finally:
ctx.prec = orig
return +v
def rand(ctx):
"""
Returns an ``mpf`` with value chosen randomly from `[0, 1)`.
The number of randomly generated bits in the mantissa is equal
to the working precision.
"""
return ctx.make_mpf(mpf_rand(ctx._prec))
def fraction(ctx, p, q):
"""
Given Python integers `(p, q)`, returns a lazy ``mpf`` representing
the fraction `p/q`. The value is updated with the precision.
>>> from mpmath import *
>>> mp.dps = 15
>>> a = fraction(1,100)
>>> b = mpf(1)/100
>>> print a; print b
0.01
0.01
>>> mp.dps = 30
>>> print a; print b # a will be accurate
0.01
0.0100000000000000002081668171172
>>> mp.dps = 15
"""
return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd),
'%s/%s' % (p, q))
def absmin(ctx, x):
return abs(ctx.convert(x))
def absmax(ctx, x):
return abs(ctx.convert(x))
def _as_points(ctx, x):
# XXX: remove this?
if hasattr(x, '_mpi_'):
a, b = x._mpi_
return [ctx.make_mpf(a), ctx.make_mpf(b)]
return x
'''
def _zetasum(ctx, s, a, b):
"""
Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small
integers.
"""
a = int(a)
b = int(b)
s = ctx.convert(s)
prec, rounding = ctx._prec_rounding
if hasattr(s, '_mpf_'):
v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec))
elif hasattr(s, '_mpc_'):
v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec))
return v
'''
def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False):
if not (ctx.isint(a) and hasattr(s, "_mpc_")):
raise NotImplementedError
a = int(a)
prec = ctx._prec
xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec)
xs = map(ctx.make_mpc, xs)
ys = map(ctx.make_mpc, ys)
return xs, ys
class PrecisionManager:
def __init__(self, ctx, precfun, dpsfun, normalize_output=False):
self.ctx = ctx
self.precfun = precfun
self.dpsfun = dpsfun
self.normalize_output = normalize_output
def __call__(self, f):
def g(*args, **kwargs):
orig = self.ctx.prec
try:
if self.precfun:
self.ctx.prec = self.precfun(self.ctx.prec)
else:
self.ctx.dps = self.dpsfun(self.ctx.dps)
if self.normalize_output:
v = f(*args, **kwargs)
if type(v) is tuple:
return tuple([+a for a in v])
return +v
else:
return f(*args, **kwargs)
finally:
self.ctx.prec = orig
g.__name__ = f.__name__
g.__doc__ = f.__doc__
return g
def __enter__(self):
self.origp = self.ctx.prec
if self.precfun:
self.ctx.prec = self.precfun(self.ctx.prec)
else:
self.ctx.dps = self.dpsfun(self.ctx.dps)
def __exit__(self, exc_type, exc_val, exc_tb):
self.ctx.prec = self.origp
return False
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
#!/usr/bin/env python
"""
Matrix-vector multiplication using blocks of 256 threads
(and global memory).
It is designed for computing x = A.dot(b) where:
A is "skinny-tall"
b is a vector
# of rows in A is large (e.g., numnber of pixels in an image)
# of columns in A (or rows in b) is not too large (e.g., 100 or even 1000
# is fine, but 10000 it will be too slow).
Created on Fri May 9 10:24:08 2014
Author: Oren Freifeld
Email: freifeld@csail.mit.edu
"""
import time
import numpy as np
from pycuda import compiler, gpuarray
from of.gpu.init_the_device_if_needed import init_the_device_if_needed
class MatTimesVec(object):
"""
When an object of this class is called (using __call__), it computes
A_gpu.dot(b_gpu) and stores the result in out_gpu where
A_gpu.shape = (self.nRowsA,self.nColsA)
b_gpu.shape = (self.nColsA,)
out_gpu.shape = (self.nRowsA,)
"""
init_the_device_if_needed()
__kernel = """
__global__ void mat_vec_krnl(const double* A,const double* b,double* x, const int nRowsA)
{
//int tx = threadIdx.x;
//const int tid = threadIdx.x;
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < nRowsA)
{
double Ab_col = 0;
const double * Acol = A+idx*N_COLS;
#pragma unroll
for (int k = 0; k < N_COLS; ++k) {
Ab_col += Acol[k] * b[k];
}
// Write to device memory;
x[idx] = Ab_col;
}
}
"""
def __init__(self,nRowsA,nColsA,my_dtype=np.float64):
"""
Initializes a MatTimesVec object.
When the object is called, it computes
A_gpu.dot(b_gpu) and stores the result in out_gpu where
A_gpu.shape = (self.nRowsA,self.nColsA)
b_gpu.shape = (self.nColsA,)
out_gpu.shape = (self.nRowsA,)
"""
self.nRowsA = nRowsA
self.nColsA = nColsA
self._kernel = self.__kernel
# self._kernel =(' '*4 + '#define N_ROWS {} // seems we do not use it since we pass N\n'.format(nRowsA)
# +self._kernel)
self._kernel =(' '*4 + '#define N_COLS {}\n'.format(nColsA)
+self._kernel)
if my_dtype == np.float64:
pass
elif my_dtype == np.float32:
self._kernel = self._kernel.replace('double','float')
else:
raise NotImplementedError
# compile the kernel code
mod = compiler.SourceModule(self._kernel)
# get the kernel function from the compiled module
matrixmul = mod.get_function("mat_vec_krnl")
threadsPerBlock = 256
nBlocks = int(np.ceil(float(nRowsA) / float(threadsPerBlock)))
self._matrixmul = matrixmul
self._nBlocks = nBlocks
self._threadsPerBlock = threadsPerBlock
self._my_dtype = my_dtype
def __repr__(self):
s="A wrapper around the cuda kernel below.\n\n"
return s + self._kernel
def __call__(self,A_gpu,b_gpu,out_gpu,do_checks=True):
"""
Computes A_gpu.dot(b_gpu).
A_gpu.shape = (self.nRowsA,self.nColsA)
b_gpu.shape = (self.nColsA,)
out_gpu.shape = (self.nRowsA,)
Modifies out_gpu.
"""
nRowsA,nColsA = self.nRowsA,self.nColsA
if do_checks:
#types
if not isinstance(A_gpu,gpuarray.GPUArray):
raise TypeError(type(A_gpu))
if not isinstance(b_gpu,gpuarray.GPUArray):
raise TypeError(type(b_gpu))
if not isinstance(out_gpu,gpuarray.GPUArray):
raise TypeError(type(out_gpu))
# nDims
if len(A_gpu.shape)!=2:
raise ValueError(A_gpu.shape)
if len(b_gpu.shape)!=1:
raise ValueError(b_gpu.shape)
if len(out_gpu.shape)!=1:
raise ValueError(out_gpu.shape)
# shapes
if A_gpu.shape != (nRowsA,nColsA):
raise ValueError(A_gpu.shape , (nRowsA,nColsA))
if len(b_gpu) != nColsA:
raise ValueError(len(b_gpu) , nColsA)
if len(out_gpu) != nRowsA:
raise ValueError(len(out_gpu) , nRowsA)
# dtypes
my_dtype=self._my_dtype
if my_dtype != A_gpu.dtype:
raise ValueError(my_dtype , A_gpu.dtype)
if my_dtype != b_gpu.dtype:
raise ValueError(my_dtype , b_gpu.dtype)
if my_dtype != out_gpu.dtype:
raise ValueError(my_dtype , out_gpu.dtype)
nBlocks = self._nBlocks
threadsPerBlock = self._threadsPerBlock
# Now to the actual work.
self._matrixmul(
# inputs
A_gpu, b_gpu,
# output
out_gpu,
# parameter
np.int32(nRowsA),
# params for pycuda
grid = (nBlocks,1,1),block = (threadsPerBlock,1,1))
if __name__ == "__main__":
nRowsA,nColsA = 640*48,100
my_dtype = np.float64
compute_cpu=True
# create a random matrix and a random vector
A_cpu = np.random.randn(nRowsA,nColsA).astype(my_dtype)
b_cpu = np.random.randn(nColsA).astype(my_dtype)
out_cpu = np.empty(nRowsA)
if compute_cpu:
tic = time.clock()
# compute reference on the CPU to verify GPU computation
np.dot(A_cpu, b_cpu,out=out_cpu)
toc = time.clock()
cpu_time = toc - tic
# transfer host (CPU) memory to device (GPU) memory
A_gpu = gpuarray.to_gpu(A_cpu)
b_gpu = gpuarray.to_gpu(b_cpu)
# create empty gpu array for the result
out_gpu = gpuarray.empty(nRowsA, my_dtype)
mat_times_vec = MatTimesVec(nRowsA,nColsA,my_dtype=my_dtype)
print "Computing A times b"
print 'A.shape:',A_gpu.shape
print 'b.shape:',b_gpu.shape
print 'Calling the GPU code'
tic = time.clock()
nIterations = 1000
for i in range(nIterations):
if i%200==0:
print 'iter=',i
mat_times_vec(A_gpu,b_gpu,out_gpu,do_checks=True)
toc = time.clock()
print 'Done'
gpu_time = (toc-tic)/nIterations
print 'GPU time (mean over #iterations={0}):'.format(nIterations),gpu_time
if compute_cpu:
print 'CPU time:',cpu_time
print "cpu_time / gpu_time:",cpu_time/gpu_time
# print the results
#print "-" * 80
#print "Matrix A (GPU):"
#print A_gpu.get()
#print "-" * 80
#print "Matrix B (GPU):"
#print b_gpu.get()
#print "-" * 80
#print "Matrix C (GPU):"
#print out_gpu.get()
if compute_cpu:
print "-" * 80
# print "CPU-GPU difference:"
# print out_cpu - out_gpu.get()
print
tic =time.clock()
out_gpu_get = out_gpu.get()
toc =time.clock()
print "Time for getting result back to cpu:",toc-tic
print
print 'np.allclose(out_cpu, out_gpu.get()) =',np.allclose(out_cpu, out_gpu_get)
|
|
"""The test for sensor device automation."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.sensor import DOMAIN
from homeassistant.components.sensor.device_condition import ENTITY_CONDITIONS
from homeassistant.const import CONF_PLATFORM, STATE_UNKNOWN
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.testing_config.custom_components.test.sensor import DEVICE_CLASSES
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in DEVICE_CLASSES
for condition in ENTITY_CONDITIONS[device_class]
if device_class != "none"
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert conditions == expected_conditions
async def test_get_condition_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a sensor condition."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES["battery"].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_capabilities = {
"extra_fields": [
{
"description": {"suffix": "%"},
"name": "above",
"optional": True,
"type": "float",
},
{
"description": {"suffix": "%"},
"name": "below",
"optional": True,
"type": "float",
},
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert len(conditions) == 1
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_get_condition_capabilities_none(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a sensor condition."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
conditions = [
{
"condition": "device",
"device_id": "8770c43885354d5fa27604db6817f63f",
"domain": "sensor",
"entity_id": "sensor.beer",
"type": "is_battery_level",
},
{
"condition": "device",
"device_id": "8770c43885354d5fa27604db6817f63f",
"domain": "sensor",
"entity_id": platform.ENTITIES["none"].entity_id,
"type": "is_battery_level",
},
]
expected_capabilities = {}
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_if_state_not_above_below(hass, calls, caplog):
"""Test for bad value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
}
],
"action": {"service": "test.automation"},
}
]
},
)
assert "must contain at least one of below, above" in caplog.text
async def test_if_state_above(hass, calls):
"""Test for value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
"above": 10,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event1"
async def test_if_state_below(hass, calls):
"""Test for value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
"below": 10,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event1"
async def test_if_state_between(hass, calls):
"""Test for value conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_battery_level",
"above": 10,
"below": 20,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event1"
hass.states.async_set(sensor1.entity_id, 21)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(sensor1.entity_id, 19)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "event - test_event1"
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from collections import defaultdict
import os
import shutil
import time
from pants import binary_util
from pants.backend.jvm.ivy_utils import IvyUtils
from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.cache_manager import VersionedTargetSet
from pants.base.exceptions import TaskError
from pants.ivy.bootstrapper import Bootstrapper
from pants.util.dirutil import safe_mkdir
class IvyResolve(NailgunTask, IvyTaskMixin, JvmToolTaskMixin):
_CONFIG_SECTION = 'ivy-resolve'
@classmethod
def register_options(cls, register):
super(IvyResolve, cls).register_options(register)
register('--override', action='append',
help='Specifies a jar dependency override in the form: '
'[org]#[name]=(revision|url) '
'Multiple overrides can be specified using repeated invocations of this flag. '
'For example, to specify 2 overrides: '
'--override=com.foo#bar=0.1.2 '
'--override=com.baz#spam=file:///tmp/spam.jar ')
register('--report', action='store_true', default=False,
help='Generate an ivy resolve html report')
register('--open', action='store_true', default=False,
help='Attempt to open the generated ivy resolve report '
'in a browser (implies --report)')
register('--outdir', help='Emit ivy report outputs in to this directory.')
register('--args', action='append',
help='Pass these extra args to ivy.')
register('--mutable-pattern',
help='If specified, all artifact revisions matching this pattern will be treated as '
'mutable unless a matching artifact explicitly marks mutable as False.')
@classmethod
def product_types(cls):
return ['ivy_jar_products', 'jar_dependencies']
def __init__(self, *args, **kwargs):
super(IvyResolve, self).__init__(*args, **kwargs)
self._ivy_bootstrapper = Bootstrapper.instance()
self._cachedir = self._ivy_bootstrapper.ivy_cache_dir
self._confs = self.context.config.getlist(self._CONFIG_SECTION, 'confs', default=['default'])
self._classpath_dir = os.path.join(self.workdir, 'mapped')
self._outdir = self.get_options().outdir or os.path.join(self.workdir, 'reports')
self._open = self.get_options().open
self._report = self._open or self.get_options().report
self._ivy_bootstrap_key = 'ivy'
self.register_jvm_tool_from_config(self._ivy_bootstrap_key, self.context.config,
ini_section=self._CONFIG_SECTION,
ini_key='bootstrap-tools',
default=['//:xalan'])
self._ivy_utils = IvyUtils(config=self.context.config, log=self.context.log)
# Typically this should be a local cache only, since classpaths aren't portable.
self.setup_artifact_cache_from_config(config_section=self._CONFIG_SECTION)
@property
def config_section(self):
return self._CONFIG_SECTION
def prepare(self, round_manager):
round_manager.require_data('exclusives_groups')
def execute(self):
"""Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path).
"""
groups = self.context.products.get_data('exclusives_groups')
executor = self.create_java_executor()
targets = self.context.targets()
# Below, need to take the code that actually execs ivy, and invoke it once for each
# group. Then after running ivy, we need to take the resulting classpath, and load it into
# the build products.
# The set of groups we need to consider is complicated:
# - If there are no conflicting exclusives (ie, there's only one entry in the map),
# then we just do the one.
# - If there are conflicts, then there will be at least three entries in the groups map:
# - the group with no exclusives (X)
# - the two groups that are in conflict (A and B).
# In the latter case, we need to do the resolve twice: Once for A+X, and once for B+X,
# because things in A and B can depend on things in X; and so they can indirectly depend
# on the dependencies of X.
# (I think this well be covered by the computed transitive dependencies of
# A and B. But before pushing this change, review this comment, and make sure that this is
# working correctly.)
for group_key in groups.get_group_keys():
# Narrow the groups target set to just the set of targets that we're supposed to build.
# Normally, this shouldn't be different from the contents of the group.
group_targets = groups.get_targets_for_group_key(group_key) & set(targets)
# NOTE(pl): The symlinked ivy.xml (for IDEs, particularly IntelliJ) in the presence of
# multiple exclusives groups will end up as the last exclusives group run. I'd like to
# deprecate this eventually, but some people rely on it, and it's not clear to me right now
# whether telling them to use IdeaGen instead is feasible.
classpath = self.ivy_resolve(group_targets,
executor=executor,
symlink_ivyxml=True,
workunit_name='ivy-resolve')
if self.context.products.is_required_data('ivy_jar_products'):
self._populate_ivy_jar_products(group_targets)
for conf in self._confs:
# It's important we add the full classpath as an (ordered) unit for code that is classpath
# order sensitive
classpath_entries = map(lambda entry: (conf, entry), classpath)
groups.update_compatible_classpaths(group_key, classpath_entries)
if self._report:
self._generate_ivy_report(group_targets)
# TODO(ity): populate a Classpath object instead of mutating exclusives_groups
create_jardeps_for = self.context.products.isrequired('jar_dependencies')
if create_jardeps_for:
genmap = self.context.products.get('jar_dependencies')
for target in filter(create_jardeps_for, targets):
# TODO: Add mapjars to IvyTaskMixin? Or get rid of the mixin? It's weird that we use
# self.ivy_resolve for some ivy invocations but this for others.
self._ivy_utils.mapjars(genmap, target, executor=executor,
workunit_factory=self.context.new_workunit)
def check_artifact_cache_for(self, invalidation_check):
# Ivy resolution is an output dependent on the entire target set, and is not divisible
# by target. So we can only cache it keyed by the entire target set.
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
return [global_vts]
def _populate_ivy_jar_products(self, targets):
"""Populate the build products with an IvyInfo object for each generated ivy report."""
ivy_products = self.context.products.get_data('ivy_jar_products') or defaultdict(list)
for conf in self._confs:
ivyinfo = IvyUtils.parse_xml_report(targets, conf)
if ivyinfo:
# Value is a list, to accommodate multiple exclusives groups.
ivy_products[conf].append(ivyinfo)
self.context.products.safe_create_data('ivy_jar_products', lambda: ivy_products)
def _generate_ivy_report(self, targets):
def make_empty_report(report, organisation, module, conf):
no_deps_xml_template = """
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="ivy-report.xsl"?>
<ivy-report version="1.0">
<info
organisation="%(organisation)s"
module="%(module)s"
revision="latest.integration"
conf="%(conf)s"
confs="%(conf)s"
date="%(timestamp)s"/>
</ivy-report>
"""
no_deps_xml = no_deps_xml_template % dict(organisation=organisation,
module=module,
conf=conf,
timestamp=time.strftime('%Y%m%d%H%M%S'))
with open(report, 'w') as report_handle:
print(no_deps_xml, file=report_handle)
classpath = self.tool_classpath(self._ivy_bootstrap_key, self.create_java_executor())
reports = []
org, name = IvyUtils.identify(targets)
xsl = os.path.join(self._cachedir, 'ivy-report.xsl')
# Xalan needs this dir to exist - ensure that, but do no more - we have no clue where this
# points.
safe_mkdir(self._outdir, clean=False)
for conf in self._confs:
params = dict(org=org, name=name, conf=conf)
xml = IvyUtils.xml_report_path(targets, conf)
if not os.path.exists(xml):
make_empty_report(xml, org, name, conf)
out = os.path.join(self._outdir, '%(org)s-%(name)s-%(conf)s.html' % params)
args = ['-IN', xml, '-XSL', xsl, '-OUT', out]
if 0 != self.runjava(classpath=classpath, main='org.apache.xalan.xslt.Process',
args=args, workunit_name='report'):
raise TaskError
reports.append(out)
css = os.path.join(self._outdir, 'ivy-report.css')
if os.path.exists(css):
os.unlink(css)
shutil.copy(os.path.join(self._cachedir, 'ivy-report.css'), self._outdir)
if self._open:
binary_util.ui_open(*reports)
|
|
"""
LRU caches
- LRUCache: base class. No constraints. Will grow indefinitely.
- FixedSizeLRUCache: fixed number of entries in the cache.
- MemSizeLRUCache: fixed amount of memory (according to getsize)
Not thread safe. Could be made thread-save easily with a lock
but that's overhead, and if we're not using multi-threading
then why take the hit?
"""
import sys
import dllist
class LRUCache(object):
def __init__(self, initial_cache=None):
"""
>>> c = LRUCache()
>>> c.stats()['size']
0
>>> c['key1'] = 'value1'
>>> c.keys()
['key1']
>>> c['key1']
'value1'
>>> del c['key1']
"""
self._cache = initial_cache or {}
self._size = 0
self._hits = 0
self._misses = 0
self._order = dllist.DLL()
def get(self, key):
"""
>>> c = LRUCache()
>>> c.get('toto')
Traceback (most recent call last):
...
KeyError: 'toto'
>>> c.stats()['misses']
1
>>> c.put('toto', 'tata')
>>> c.get('toto')
'tata'
>>> c.stats()['hits']
1
"""
try:
value = self._cache[key]
self._order.push(key)
self._hits += 1
return value
except KeyError, e:
self._misses += 1
raise
def put(self, key, value):
"""
>>> c = LRUCache()
>>> c.put(1, 'one')
>>> c.get(1)
'one'
>>> c.size()
1
>>> c.put(2, 'two')
>>> c.put(3, 'three')
>>> c.put(4, 'four')
>>> c.put(5, 'five')
>>> c.get(5)
'five'
>>> c.size()
5
"""
self._cache[key] = value
self._order.push(key)
self._size += 1
def delete(self, key):
"""
>>> c = LRUCache()
>>> c.put(1, 'one')
>>> c.get(1)
'one'
>>> c.delete(1)
>>> c.get(1)
Traceback (most recent call last):
...
KeyError: 1
>>> c.delete(1)
Traceback (most recent call last):
...
KeyError: 1
"""
del self._cache[key]
self._order.delete(key)
self._size -= 1
def list(self):
"""
>>> c = LRUCache()
>>> c.put(1, 'one')
>>> c.put(2, 'two')
>>> sorted(c.list())
[1, 2]
"""
return self._cache.keys()
def last(self):
return self._order.last().value
def size(self):
"""
>>> c = LRUCache()
>>> c.size()
0
"""
return self._size
def stats(self):
"""
>>> c = LRUCache()
>>> sorted(c.stats().keys())
['hits', 'misses', 'size']
"""
return {'size': self._size, 'hits': self._hits, 'misses': self._misses}
# Mapping interface
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.put(key, value)
def __delitem__(self, key):
return self.delete(key)
def keys(self):
return self.list()
class MemSizeLRUCache(LRUCache):
"""A fixed memory size LRU cache.
"""
def __init__(self, maxmem=64*1024):
"""
>>> c = MemSizeLRUCache()
"""
super(MemSizeLRUCache, self).__init__()
self._maxmem = maxmem
self._mem = 0
def mem(self):
"""
>>> c = MemSizeLRUCache()
>>> c.mem()
0
"""
return self._mem
def put(self, key, value):
"""
>>> c = MemSizeLRUCache(maxmem=24*4)
>>> c.put(1, 1)
>>> c.mem() # 24-bytes per integer
24
>>> c.put(2, 2)
>>> c.put(3, 3)
>>> c.put(4, 4)
>>> c.get(1)
1
>>> c.mem()
96
>>> c.size()
4
>>> c.put(5, 5)
>>> c.size()
4
>>> c.get(2)
Traceback (most recent call last):
...
KeyError: 2
"""
mem = sys.getsizeof(value)
if self._mem + mem > self._maxmem:
self.delete(self.last())
LRUCache.put(self, key, (value, mem))
self._mem += mem
def get(self, key):
(value, _mem) = LRUCache.get(self, key)
return value
def delete(self, key):
"""
>>> c = MemSizeLRUCache()
>>> c.put(1, 1)
>>> c.mem()
24
>>> c.delete(1)
>>> c.mem()
0
"""
(_value, mem) = LRUCache.get(self, key)
self._mem -= mem
LRUCache.delete(self, key)
class FixedSizeLRUCache(LRUCache):
"""LRU cache with maximum number of entries.
"""
def __init__(self, maxsize=1024):
super(FixedSizeLRUCache, self).__init__()
self._maxsize = maxsize
def put(self, key, value):
"""
>>> c = FixedSizeLRUCache(maxsize=5)
>>> c.put(1, 'one')
>>> c.get(1)
'one'
>>> c.size()
1
>>> c.put(2, 'two')
>>> c.put(3, 'three')
>>> c.put(4, 'four')
>>> c.put(5, 'five')
>>> c.get(5)
'five'
>>> c.size()
5
>>> c.put(6, 'six')
>>> c.size()
5
>>> c.get(1)
Traceback (most recent call last):
...
KeyError: 1
>>> c.get(2)
'two'
>>> c.put(7, 'seven')
>>> c.get(2)
'two'
>>> c.get(3)
Traceback (most recent call last):
...
KeyError: 3
"""
# check if we're maxed out first
if self.size() == self._maxsize:
# need to kick something out...
self.delete(self.last())
LRUCache.put(self, key, value)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger (tfdbg) Stepper Module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.debug import debug_data
from tensorflow.python.framework import ops
from tensorflow.python.ops import session_ops
class NodeStepper(object):
"""TensorFlow Debugger (tfdbg) stepper.
The stepper provides ability to perform "continue to" actions on a graph,
given fetch and feeds. The stepper calculates the transitive closure of the
fetch. cont() (continue to) calls can only be performed on members of the
transitive closure.
On a cont() call, the stepper performs depth-first tracing of the input
tree of the target. When it reaches an input where one of the following is
available, it will supply the available value to the feed_dict of the cont()
call:
(1) TensorHandles from previous cont() calls.
(2) Overriding (injected) values from the client.
(3) Feeds supplied during the construction of the stepper instance.
Once the tracing is complete, it will issue a run() call on the
underlying session, using the aforementioned feed_dict prepared by the input
tracing, to achieve the "continue-to" action. The above process takes into
account whether the transitive closure of an input contains Variables that
are updated during previous cont() calls on this stepper instance. If such
updates exist, we say the transitive closure is "dirty" and the stepper
can restore the "clean" state of the Variable and avoid using the
TensorHandle.
Example of basic usage:
a = tf.Variable(1.0, name="a")
b = tf.Variable(2.0, anme="b")
c = tf.add(a, b, name="c")
d = tf.mul(a, c, name="d")
sess = tf.Session()
sess.run(tf.initialize_all_varialbes())
stepper = NodeStepper(sess, d)
stepper.cont(c) # Caches the handle to Tensor c:0.
stepper.cont(d) # Uses handle to Tensor c:0, avoiding recomputing c.
"""
# Possible types of feed used during cont() calls.
FEED_TYPE_CLIENT = "client"
FEED_TYPE_HANDLE = "handle"
FEED_TYPE_OVERRIDE = "override"
# TODO(cais): The following member constant is currently unused. Use it when
# the stepper is capable of using dumped intermediate tensors.
FEED_TYPE_INTERMEDIATE = "intermediate"
def __init__(self, sess, fetch, feed_dict=None):
"""Constructor for Debugger.
Args:
sess: (Session) the TensorFlow Session to step in.
fetch: (str or TensorFlow graph element) A single fetched Tensor or Op,
or a name (str) representing the Tensor or Op. In the case of a name
str, the graph will be searched to find the corresponding Tensor or Op.
feed_dict: (dict or None) feed dict to be used in this stepper instance.
TODO(cais): Currently the stepper supports a single fetch. Support list,
tuple or dict of feeds, as in the Session run() interface.
"""
self._sess = sess
if isinstance(fetch, str):
# Fetch target is a string. Assume it is the name of the Tensor or Op and
# will attempt to find it in the Session's graph.
self._fetch_name = fetch
elif isinstance(fetch, list) or isinstance(fetch, tuple) or isinstance(
fetch, dict):
raise NotImplementedError(
"list, tuple or dict fetches are not supported yet.")
else:
self._fetch_name = fetch.name
self._fetch = self._sess.graph.as_graph_element(self._fetch_name)
# A map from Variable name to initializer op.
self._variable_initializers = {}
# A map from Variable name to initial value, used when overriding or
# restoring Variable values.
self._variable_initial_values = {}
# Initialize the map for output recipients (targets).
self._non_control_output_targets = {}
# Sorted transitive closure of the fetched node.
self._sorted_transitive_closure = self._dfs_visit(self._sess.graph,
self._fetch)
self._transitive_closure_set = set(self._sorted_transitive_closure)
# A map from Variable name to the old values (before any cont() calls).
self._cached_variable_values = {}
# A cache map from tensor name to what variables may invalidate the tensor
self._cached_invalidation_path = {}
# Keep track of which variables are in a dirty state.
self._dirty_variables = set()
# Cached tensor handles: a dict with keys as tensor names and values as
# tensor handles.
self._tensor_handles = {}
# Feed dict from the client.
self._client_feed_dict = feed_dict
if not self._client_feed_dict:
self._client_feed_dict = {}
# Overriding tensor values.
self._override_tensors = {}
# What the feed types were used by the last cont() call.
self._last_feed_types = {}
def _dfs_visit(self, graph, elem):
"""Trace back the input of a graph element, using depth-first search.
Uses non-recursive implementation to prevent stack overflow for deep
graphs.
Also performs the following action(s):
1) When encountering a Variable, obtain its initializer op, to
facilitate possible subsequent restoration / overriding of variable
value.
Args:
graph: A TF graph instance.
elem: A graph element: a Tensor or an Operation.
Returns:
(list of str) A topologically-sorted list of all graph element names
in the transitive closure of elem. Obviously, the topological sort is
not unique in general. The return value here is just an arbitrary one
of potentially many possible topological sorts.
"""
# These set should hold only strings, i.e, names of the nodes.
done = set() # Keep track of visited nodes.
# A list of str: Names of the topologically-sorted graph elements.
sorted_node_list = [elem.name]
elem_stack = [elem]
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
# Iterate through the (non-control) inputs.
for inp in all_inputs:
is_non_control_input = inp in non_control_inputs
# Set up the non-control output map.
if is_non_control_input:
if inp.name not in self._non_control_output_targets:
self._non_control_output_targets[inp.name] = set([curr_elem.name])
else:
self._non_control_output_targets[inp.name].add(curr_elem.name)
if (inp.op.type == "Variable" and
inp.name not in self._variable_initializers):
# Obtain the initializer op of the variable, in case the Variable's
# value needs to be restored later.
initializer = graph.as_graph_element(inp.op.name + "/Assign")
self._variable_initializers[inp.name] = initializer
self._variable_initial_values[inp.name] = initializer.inputs[1]
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
sorted_node_list.append(inp.name)
sorted_node_list.reverse()
return sorted_node_list
def sorted_transitive_closure(self):
"""Get a sorted list of transitive inputs to the fetch of the stepper.
Returns:
(list of str): Sorted transitive inputs to the fetch of the stepper
instance. The fetch itself is included in the list.
"""
return self._sorted_transitive_closure
def is_feedable(self, name):
"""Determine if a graph element if feedable.
Args:
name: (str) name of the graph element (Tensor or Operation)
Returns:
(bool) whether the graph element is feedable.
"""
if not isinstance(name, str):
raise TypeError("Expected type str; got type %s" % type(name))
elem = self._sess.graph.as_graph_element(name)
return self._sess.graph.is_feedable(elem)
def override_tensor(self, tensor_name, overriding_val):
"""Override the value of a tensor.
Args:
tensor_name: (str) Name of the tensor to override.
overriding_val: (numpy.ndarray) Overriding tensor value.
Raises:
ValueError: If tensor_name does not correspond to a tensor in the input
tree to the fetched graph element of this stepper instance.
"""
if not isinstance(tensor_name, str):
raise TypeError("Expected type str; got type %s" % type(tensor_name))
if tensor_name not in self._transitive_closure_set:
raise ValueError(
"Cannot override tensor \"%s\" because it does not exist in the "
"input tree to the fetch \"%s\"" % (tensor_name, self._fetch_name))
self._override_tensors[tensor_name] = overriding_val
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def remove_override(self, tensor_name):
"""Remove the overriding value on a tensor.
Args:
tensor_name: (str) name of the tensor to remove the overriding value
from.
Raises:
ValueError: If no overriding value exists for tensor_name.
"""
if tensor_name not in self._override_tensors:
raise ValueError("No overriding value exists for tensor \"%s\"." %
tensor_name)
del self._override_tensors[tensor_name]
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def last_feed_types(self):
"""Obtain information about the feed in the last cont() call.
Returns:
(dict) A dict mapping tensor names to feed types.
"""
return self._last_feed_types
def cont(self,
target,
use_tensor_handles=True,
use_overrides=True,
restore_variable_values=False):
"""Continue till the completion of the specified target tensor.
Args:
target: A single fetched Tensor or Op, or a name (str) representing the
Tensor or Op. In the case of a name str, the graph will be searched
to find the corresponding Tensor or Op.
# TODO(cais): Support multiple fetches as in Session.run() interface.
use_tensor_handles: (bool) Whether this cont() run will use cached tensor
handles to avoid recomputation. Default: True.
use_overrides: (bool) Whether the overriding tensor values supplied by
the client are to be used in this cont() call. Default: True.
restore_variable_values: (bool) Whether the old values of the variables
(before any cont() calls in this object) are to be restored.
Returns:
Value from Session.run() of the target.
Raises:
ValueError: If the target is specified as a string and the string does
not correspond to any tensors in the Session graph.
Or if the target of this cont() is not in the input list of the Stepper
object's target.
Or if target is a Placeholder.
"""
self._last_feed_types = {}
if isinstance(target, str):
# Fetch target is a string. Assume it is the name of the Tensor or Op and
# will attempt to find it in the Session's graph.
target_name = target
else:
target_name = target.name
graph_element = self._sess.graph.as_graph_element(target_name)
if (isinstance(graph_element, ops.Tensor) and
graph_element.op.type == "Placeholder"):
raise ValueError("Should not call cont() on a Placeholder")
# Verify that the target is in the transitive closure of the stepper's
# fetch.
if target_name not in self._transitive_closure_set:
raise ValueError(
"Target \"%s\" is not in the transitive closure for the fetch of the "
"stepper: \"%s\"." % (target_name, self._fetch_name))
# Check if a cached tensor handle can be used on the fetch directly.
if use_tensor_handles and target_name in self._tensor_handles:
self._last_feed_types[target_name] = self.FEED_TYPE_HANDLE
return self._tensor_handles[target_name].eval()
# Check if an overriding tensor value can be used directly.
if use_overrides and target_name in self._override_tensors:
# Override is available. Return the value right away.
self._last_feed_types[target_name] = self.FEED_TYPE_OVERRIDE
return self._override_tensors[target_name]
# The feeds to be used in the Session.run() call.
feeds = {}
# Keep track of which variables are restored in this cont() call.
restored_variables = set()
# Keep track of which variables are "touched" (i.e., possibly updated) in
# this cont() call.
touched_variables = set()
# =========================================================================
# Use a non-recursive method to trace the inputs from the node and set up
# the feeds.
fetched = self._sess.graph.as_graph_element(target_name)
elem_stack = [fetched]
done = set()
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Determine whether the input is feedable. Reference-type tensors,
# e.g., Variables, should not be fed, because they can change.
if isinstance(inp, ops.Tensor):
is_inp_ref = inp.dtype._is_ref_dtype # pylint: disable=protected-access
can_feed = self._sess.graph.is_feedable(inp) and not is_inp_ref
else:
is_inp_ref = False
can_feed = False
if (restore_variable_values and inp.name in self._dirty_variables and
inp.name not in restored_variables and
inp.name not in touched_variables):
# Do not restore Variables touched or restored previously in this
# cont() call.
initializer_op = self._variable_initializers[inp.name]
initial_value_tensor = self._variable_initial_values[inp.name]
self._sess.run(initializer_op,
feed_dict={
initial_value_tensor:
self._cached_variable_values[inp.name]
})
# Mark the variable as restored.
restored_variables.add(inp.name)
# Determine if this is a reference-type input from a variable, and
# the recipient node is not Identity. In that case, the Variable
# needs to be marked as dirty and its current value recorded, due to
# the fact that the receiving op may mutate the value of the Variable.
if (is_inp_ref and inp.op.type == "Variable" and
curr_node.type != "Identity"):
# Mark the variable as dirty.
touched_variables.add(inp.name)
# Obtain the old value of the variable and cache it.
if inp.name not in self._cached_variable_values:
old_value = self._sess.run(inp)
self._cached_variable_values[inp.name] = old_value
# N.B.: The order of the logical branches matters. For example,
# _client_feed_dict comes after _tensor_handles, so that tensor
# handles stored in cont() calls can override the original client
# feeds. Also for example, _override_tensors comes the first, so
# the manual overriding, if exists, can always take effect.
if use_overrides and can_feed and inp.name in self._override_tensors:
# Use client-supplied overriding tensor value.
feeds[inp] = self._override_tensors[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_OVERRIDE
elif (use_tensor_handles and can_feed and
inp.name in self._tensor_handles and inp not in feeds):
# Tensor handle found in cache.
feeds[inp] = self._tensor_handles[inp.name].eval()
self._last_feed_types[inp.name] = self.FEED_TYPE_HANDLE
elif inp in self._client_feed_dict:
# This input is available in the client feed_dict.
feeds[inp] = self._client_feed_dict[inp]
self._last_feed_types[inp.name] = self.FEED_TYPE_CLIENT
else:
# There is no feed available for this input. So keep tracing its
# input(s).
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
done.add(inp_node.name)
# =========================================================================
if touched_variables:
self._dirty_variables.update(touched_variables)
for variable in restored_variables:
self._dirty_variables.remove(variable)
# Prepare RunOptions for DebugTensorWatches
run_options = config_pb2.RunOptions()
# TODO(cais): Add fields for watching intermediate tensors.
if isinstance(fetched, ops.Operation):
# The fetched is an Operation: Will not get tensor handle.
self._sess.run(fetched, feed_dict=feeds, options=run_options)
# No return value for a run of an Operation
else:
# This is a Tensor: Will get tensor handle and cache it.
target_handle = self._sess.run(session_ops.get_session_handle(fetched),
feed_dict=feeds,
options=run_options)
self._tensor_handles[target_name] = target_handle
return target_handle.eval()
# Invalidate caches at the end.
for touched_variable in touched_variables:
self._invalidate_transitively_outgoing_cache(touched_variable)
def _invalidate_transitively_outgoing_cache(self, source_element):
"""Invalidate the cached tensor handles by tracing output.
This method is used to invalidate caches such as cached TensorHandles
and intermediate tensor values when Variable mutation happens or when
client overrides tensor values.
Uses non-recursive implementation to avoid stack overflow on deep networks.
TODO(cais): Currently, only TensorHandle caches are invalidated. Invalidate
cached intermediate tensor values from dumps when dumps are added.
Args:
source_element: The source graph element (e.g., a Variable output slot)
to trace the output from.
"""
if not self._tensor_handles:
return
# First, use cached invalidation paths to eliminate some cached tensor
# handles.
for handle_name in self._tensor_handles:
if (handle_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[handle_name]):
del self._tensor_handles[handle_name]
if not self._tensor_handles:
return
stack = [source_element]
done = set()
while stack:
curr_element = stack.pop()
done.add(curr_element)
if curr_element in self._tensor_handles:
# Cache the invalidation path for potential future use.
if curr_element not in self._cached_invalidation_path:
self._cached_invalidation_path[curr_element] = set([source_element])
else:
self._cached_invalidation_path[curr_element].add(source_element)
del self._tensor_handles[curr_element]
targets = self._non_control_output_targets.get(curr_element, [])
for target in targets:
if target in done:
continue
else:
stack.append(target)
def finalize(self):
"""Run the final fetch(es).
Restore the dirty variables; ignore the client-supplied overriding tensor
values.
Returns:
The same return value as self.cont() as called on the final fetch.
"""
return self.cont(
self._fetch,
use_tensor_handles=False,
use_overrides=False,
restore_variable_values=True)
def handle_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensors for which TensorHandle is available.
"""
return [name for name in self._tensor_handles]
def dirty_variables(self):
"""Get the set of variables that are currently "dirty".
"dirty" means:
previous cont() calls have updated the value of the Variable,
and the Variable's old value (the value before any cont() calls
happened) was not restored.
Returns:
(set) A set of dirty variables.
"""
return self._dirty_variables
def get_tensor_value(self, tensor_name):
"""Get the value of a tensor that the stepper has access to.
Args:
tensor_name: (str) Name of the tensor.
Returns:
Value of the tensor, from overriding values or cached tensor handles.
Raises:
ValueError: If the value is not available as an overriding value
or through a TensorHandle.
"""
if tensor_name in self._override_tensors:
return self._override_tensors[tensor_name]
elif tensor_name in self._tensor_handles:
return self._tensor_handles[tensor_name].eval()
else:
raise ValueError(
"This stepper instance does not have access to the value of "
"tensor \"%s\"" % tensor_name)
def get_fetch_result(self):
return self.get_tensor_value(self._fetch_name)
def override_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensor for which overriding tensor values are
available.
"""
return [name for name in self._override_tensors]
def _get_node(self, element):
"""Get the node of a graph element.
Args:
element: A graph element (Op, Tensor or Node)
Returns:
The node associated with element in the graph.
"""
node_name, _ = debug_data.parse_node_or_tensor_name(element.name)
return self._sess.graph.as_graph_element(node_name)
|
|
import os
import json
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import *
# Import the project module to calculate directories relative to the module
# location.
PROJECT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../..')
PROJECT_ROOT, PROJECT_MODULE_NAME = os.path.split(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
# List all Django apps here. Note that standard Python libraries should not
# be added to this list since Django will not recognize them as apps anyway.
# An app is really only an "app" if a `models` module or package is defined.
# Read more about projects vs. apps here:
# https://docs.djangoproject.com/en/1.3/intro/tutorial01/#creating-models
INSTALLED_APPS = (
'omop_harvest',
'south',
'serrano',
'avocado',
'modeltree',
'haystack',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'chopauth',
'registration',
)
#
# ADMINISTRATIVE
#
# Admins receive any error messages by email if DEBUG is False
ADMINS = ()
# Managers receive broken link emails if SEND_BROKEN_LINK_EMAILS is True
MANAGERS = ADMINS
# List of IP addresses which will show debug comments
INTERNAL_IPS = ('127.0.0.1', '::1')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DIR = os.path.abspath(os.path.dirname(__file__))
#
# DATABASES
# Each database can be specified here, but passwords should be in a separate
# file that is not versioned. Use ``local_settings.py``.
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_PATH, 'omop_harvest.db')
}
}
#
# LOCALITY
#
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
#
# STATIC AND MEDIA
# The application's static files should be placed in the STATIC_ROOT in
# addition to other static files found in third-party apps. The MEDIA_ROOT
# is intended for user uploaded files.
#
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, '_site/media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, '_site/static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
STATICFILES_DIRS = ()
#
# TEMPLATES
#
# Project level templates and template directories that override
# third-party app templates.
TEMPLATE_DIRS = ()
# Context processors are simply functions that return a dict which augments the
# template context.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
'omop_harvest.context_processors.static',
)
#
# URLS
#
# FORCE_SCRIPT_NAME overrides the interpreted 'SCRIPT_NAME' provided by the
# web server. since the URLs below are used for various purposes outside of
# the WSGI application (static and media files), these need to be updated to
# reflect this discrepancy.
FORCE_SCRIPT_NAME = ''
LOGIN_URL = FORCE_SCRIPT_NAME + '/login/'
LOGIN_REDIRECT_URL = FORCE_SCRIPT_NAME + '/query/'
LOGOUT_URL = '/logout/'
ROOT_URLCONF = 'omop_harvest.conf.urls'
# For non-publicly accessible applications, the siteauth app can be used to
# restrict access site-wide.
# SITEAUTH_ACCESS_ORDER = 'allow/deny'
#
SITEAUTH_ALLOW_URLS = (
r'^log(in|out)/',
r'^password/reset/',
r'^(register|verify)/',
)
SITEAUTH_DENY_URLS = (
r'^workspace/',
r'^workspace/discover/',
r'^query/',
r'^results/+',
r'^api/+',
r'^details/\d+/',
r'^moderate/+',
r'^verify/+',
)
#
# MIDDLEWARE
#
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'siteauth.middleware.SiteAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'serrano.middleware.SessionMiddleware',
)
#
# EMAIL
#
SUPPORT_EMAIL = 'cbmisupport@email.chop.edu'
DEFAULT_FROM_EMAIL = 'cbmisupport@email.chop.edu'
EMAIL_SUBJECT_PREFIX = '[omop_harvest] '
SEND_BROKEN_LINK_EMAILS = False
#
# LOGGING
#
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters' :{
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'default': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': 'logs/omop.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'request_handler': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': 'logs/omop_request.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'DEBUG',
'class':'logging.StreamHandler',
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': False
},
'avocado':{
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'serrano':{
'handlers':['console'],
'level': 'DEBUG',
'propagate':True,
},
}
}
#
# CACHE
#
# For production environments, the memcached backend is highly recommended
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique',
'KEY_PREFIX': 'omop_harvest',
'VERSION': 1,
}
}
CACHE_MIDDLEWARE_SECONDS = 0
# This is not necessary to set if the above `KEY_PREFIX` value is set since
# the `KEY_PREFIX` namespaces all cache set by this application
CACHE_MIDDLEWARE_KEY_PREFIX = 'omop_harvest'
#
# SESSIONS AND COOKIES
#
CSRF_COOKIE_NAME = 'omop_harvest_csrftoken'
# SESSION_COOKIE_AGE = 60 * 20
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_COOKIE_NAME = 'omop_harvest_sessionid'
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_SAVE_EVERY_REQUEST = False
#
# OTHER PROJECT SETTINGS
#
# USE_ETAGS = True
IGNORABLE_404_PATHS = (
r'robots.txt$',
r'favicon.ico$',
)
#
# VARIOUS APP SETTINGS
#
# The primary key of the ``Site`` object for the Sites Framework
SITE_ID = 1
#
# ModelTrees Configuration
#
MODELTREES = {
'default': {
'model': 'omop_harvest.Person',
}
}
#
# Haystack Configuration
#
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh.index')
}
}
AVOCADO = {
'METADATA_MIGRATION_APP': 'omop_harvest',
}
try:
from chopauth.settings import *
except ImportError:
pass
curdir = os.path.dirname(os.path.abspath(__file__))
project_settings = json.loads(open(os.path.join(curdir, '../../.project_config.json'), 'r').read())['project_settings']
from base import get_env_variable
environment = get_env_variable('APP_ENV')
if environment not in project_settings.keys():
error_msg = "Settings for {0} environment not found in project configuration.".format(environment)
raise ImproperlyConfigured(error_msg)
# LDAP
LDAP = {}
LDAP['DEBUG'] = project_settings[environment]['django']['LDAP']['DEBUG']
LDAP['PREBINDDN'] = project_settings[environment]['django']['LDAP']['PREBINDDN']
LDAP['SEARCHDN'] = project_settings[environment]['django']['LDAP']['SEARCHDN']
LDAP['SEARCH_FILTER'] = project_settings[environment]['django']['LDAP']['SEARCH_FILTER']
LDAP['SERVER_URI'] = project_settings[environment]['django']['LDAP']['SERVER_URI']
LDAP['PREBINDPW'] = project_settings[environment]['django']['LDAP']['PREBINDPW']
#REGISTRATION_MODERATORS = project_settings[environment]['django']['REGISTRATION_MODERATORS']
|
|
#!/usr/bin/env python
# this script:
# 1) outputs each TE insertion call into new files based on family
# 2) collapses TEs of same famly within 50 base pairs of one another
# 3) outputs all the unqiue TE positions to a new file
# 4) calculates the coverage for each sample at each insertion postion +/- 25 base pairs
# 5) for each unique position scores strains where a TE was not found as a 0 if coverage was above 8, or "NA" if coverage below 8
# 6) outputs a kinship matrix
# USE: kin_hash.py <CtCp_ll_nonredundant.txt>
# ex: kin_hash.py /lscr2/andersenlab/kml436/git_repos2/Transposons2/kin_hash/CtCp_all_nonredundant.txt
import sys
import re
import os
import statistics
from collections import defaultdict
from subprocess import Popen, PIPE
#python ../scripts/kin_mean.py /lscr2/andersenlab/kml436/git_repos2/Transposons2/kintest/CtCp_all_nonredundant.txt
kin_step2="/lscr2/andersenlab/kml436/git_repos2/Transposons2/scripts/kin_step2.sh"
##NEED TO EDIT SAMPLE FILE IN BELOWSCRIPT TOO
kin_step3="/lscr2/andersenlab/kml436/git_repos2/Transposons2/scripts/kin_temp.py"
transpose_matrix="/lscr2/andersenlab/kml436/git_repos2/Transposons2/scripts/transpose_matrix.sh"
bam_dir="/lscr2/andersenlab/dec211/RUN/v2_snpset/bam"
#
#
#
#
#change smaple list to full one later
sample_list=sys.argv[1]
#sample_list="/lscr2/andersenlab/kml436/git_repos2/Transposons2/data/test_list.txt"
####
#
#
#
#
os.system("mkdir TE_matrix")
dir=os.getcwd() # get current directory
os.chdir("{dir}/TE_matrix".format(**locals()))
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
##########################################################
# PULL POSITIONS BASED ON FAMILY AND METHOD
##########################################################
all_nonredundant=sys.argv[2]
ALL_NONREDUNDANT=open(all_nonredundant, "r")
output_files={}
output_files=defaultdict(list)
# create dictonary(key=file name of TE family and method, value=list of detected transposon info)
for line in ALL_NONREDUNDANT:
line=line.rstrip('\n')
items=re.split("[\t]",line)
te_info=items[3]
match=re.search("(.*)_(\w+-)?reference", te_info)
family=match.group(1)
method=items[6]
file_name="{method}_{family}".format(**locals())
if method=="new": #only need this method for the insertions
output_files[file_name].append(line)
ALL_NONREDUNDANT.close()
##########################################################
# COLLAPSE INTO UNIQUE POSITIONS
##########################################################
FINAL_SAMPLES={}
FINAL_SAMPLES=defaultdict(list)
FINAL_POSITIONS={}
FINAL_POSITIONS=defaultdict(list)
MATRIX={}
MATRIX=defaultdict(list)
POSITION_COUNTER=0
for i in output_files.keys():
TE_positions={}
TE_positions=defaultdict(list)
TE_samples={}
TE_samples=defaultdict(list)
value=output_files[i]
OUT_FILE=open(i,"w")
for te in value:
OUT_FILE.write(te + '\n')
OUT_FILE.close()
#sort file by position
#os.system("""sort -k1,1 -k2,2n {i} > tmp && mv tmp {i}""".format(**locals()))
result, err = Popen(["""sort -k1,1 -k2,2n {i} > tmp && mv tmp {i}""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
first_line = True
collapsed_transposons={}
te_ID=0
IND_FILE=open(i, "r")
for line in IND_FILE:
line=line.rstrip('\n')
items=re.split("[\t]",line)
chromosome=items[0]
start_pos=items[1]
end_pos=items[2]
method=items[6]
sample=items[7]
# ADD IN STRANDEDNESS
if first_line == False:
if chromosome == prev_chromosome:
if method == "new": # allow for bp differences in insertion calls
distance = 50
else:
distance = 0 # do not allow for bp differences in absence or reference call--should already be correct/exact
if (int(start_pos)-int(prev_end_pos)) <= int(distance) :
line=prevLine
TE_positions[te_ID].append(start_pos)
TE_samples[te_ID].append(sample)
###add another dictiionaty
#prevLine = rreplace(prevLine, "\t{prev_end_pos}".format(**locals()), "\t{end_pos}".format(**locals()), 1) # replace last occurence...need to avoid number after
#
#
#
#
#
#
#
#REDO
#collapsed_transposons[te_ID] = prevLine
#reset prev end position
prev_end_pos=end_pos
#don't increase te_ID
else:
te_ID+=1
prev_chromosome=chromosome
prev_start_pos=start_pos
prev_end_pos=end_pos
collapsed_transposons[te_ID]=line
TE_positions[te_ID].append(start_pos)
TE_samples[te_ID].append(sample)
prevLine=line
else:
te_ID+=1
prev_chromosome=chromosome
prev_start_pos=start_pos
prev_end_pos=end_pos
collapsed_transposons[te_ID]=line
TE_positions[te_ID].append(start_pos)
TE_samples[te_ID].append(sample)
prevLine=line
else:
prev_chromosome=chromosome
prev_start_pos=start_pos
prev_end_pos=end_pos
collapsed_transposons[te_ID]=line
TE_positions[te_ID].append(start_pos)
TE_samples[te_ID].append(sample)
prevLine=line
first_line=False
#print collapsed transposons to a new file
IND_FILE.close()
final_out = "final" + "_" + i
FINAL_OUT=open(final_out, "w")
for ID in collapsed_transposons.keys():
###TAKE MIDPOINT HERE?!?!?!??!?!?!
##
#
#
TE = collapsed_transposons[ID]
items=re.split("[\t]", TE)
chromosome=items[0]
info='\t'.join(items[3:8]) # don't include end position in here because it should be the same as the start position
TE_positions[ID] = map(float, TE_positions[ID]) #convert strings in list to floating point number
# take the mean of the start positions,round it, and use the integer value
average_start=int(round(statistics.mean(TE_positions[ID])))
FINAL_OUT.write("{chromosome}\t{average_start}\t{average_start}\t{info}\n".format(**locals()))
#add to the parent hashes because above block is only for individual TE families
TE_samples[ID]=[i+":1" for i in TE_samples[ID]] # mark "1" for samples where that TE was found
FINAL_SAMPLES[POSITION_COUNTER]=TE_samples[ID]
FINAL_POSITIONS[POSITION_COUNTER].extend((chromosome, average_start, info))
POSITION_COUNTER+=1
FINAL_OUT.close()
result, err = Popen(["""sort -k1,1 -k2,2n {final_out} > tmp && mv tmp {final_out}""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
##########################################################
# SORT POSITION FILES
##########################################################
result, err = Popen(["""cat final_* > cleaned_positions.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(["""sort -k1,1 -k2,2n cleaned_positions.gff > tmp && mv tmp cleaned_positions.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(["""cat final_new* > cleaned_positions_new.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(["""sort -k1,1 -k2,2n cleaned_positions_new.gff > tmp && mv tmp cleaned_positions_new.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(["""cat final_reference* > cleaned_positions_reference.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(["""sort -k1,1 -k2,2n cleaned_positions_reference.gff > tmp && mv tmp cleaned_positions_reference.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(["""cat final_absent* > cleaned_positions_absent.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(["""sort -k1,1 -k2,2n cleaned_positions_absent.gff > tmp && mv tmp cleaned_positions_absent.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
##########################################################
# RUN BEDTOOLS WINDOW ON ALL SAMPLES
##########################################################
#result, err = Popen(["""bash {kin_step2} {sample_list}""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
#output files from above step are named "insertions_bedt.txt" "absences_bedt.txt" "references_bedt.txt"
##########################################################
# GENERATE KINSHIP MATRIX
##########################################################
#ensure that the family found in bedtools matches that of the unique postion in the gff and output a matrix:
#result, err = Popen(["""python {kin_step3} insertions_bedt.txt cleaned_positions_new.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
#result, err = Popen(["""python {kin_step3} references_bedt.txt cleaned_positions_reference.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
#result, err = Popen(["""python {kin_step3} absences_bedt.txt cleaned_positions_absent.gff""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
#transpose matrices:
#result, err = Popen(["""bash {transpose_matrix} Samples_insertions_bedt.txt T_Samples_insertions_bedt.txt""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
#result, err = Popen(["""bash {transpose_matrix} Samples_references_bedt.txt T_Samples_references_bedt.txt""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
#result, err = Popen(["""bash {transpose_matrix} Samples_absences_bedt.txt T_Samples_absences_bedt.txt""".format(**locals())], stdout=PIPE, stderr=PIPE, shell=True).communicate()
#for each unique transposon postion, in the coverage interval fil record its chromomse and interval 25 base pair up and down stream
FINAL_INTERVALS={}
FINAL_INTERVALS=defaultdict(list)
COVERAGE_INTERVALS=open("coverage_intervals.txt", "w")
for key,value in FINAL_POSITIONS.items():
chromosome,av_pos=value[0:2]
#av_pos=items[1]
upstream=int(int(av_pos)-25)
downstream=int(int(av_pos)+25)
FINAL_INTERVALS[key].extend((av_pos,chromosome,upstream,downstream))
COVERAGE_INTERVALS.write("{key}\t{av_pos}\t{chromosome}\t{upstream}\t{downstream}\n".format(**locals()))
COVERAGE_INTERVALS.close()
#sort the coverage interval file
result, err = Popen(["cat coverage_intervals.txt | sort -k3,3 -k4,4n > tmp && mv tmp coverage_intervals.txt"], stdout=PIPE, stderr=PIPE, shell=True).communicate()
print "Calculating Coverages....."
#for each sample, calculate the coverage at each of those intervals
SAMPLE_COV=open("sample_coverages_and_positions.txt", 'w')
with open(sample_list,'r') as IN:
for line in IN:
sample=line.rstrip('\n')
for key,value in FINAL_INTERVALS.items():
av_pos,chromosome,start,end=value[0:4]
result, err = Popen(["""samtools depth {bam_dir}/{sample}.bam -r {chromosome}:{start}-{end}|datamash mean 3""".format(**locals())],stdout=PIPE, stderr=PIPE, shell=True).communicate()
if result:
coverage=round(float(result),2)
else:
coverage=0
#find the strains that have already been scored for that TE within a 1, these need to further scoring so analyzing the coerage at that position can be skipped
strain_list=[(re.split(":", x))[0] for x in FINAL_SAMPLES[key]]
# if the sample has not already been scored and the coverage is greater than 8, that sample is marked 0, if less than 8 it's marked with "NA"
if sample not in strain_list:
if coverage >= 8:
FINAL_SAMPLES[key].append("{sample}:0".format(**locals()))
elif coverage <8:
FINAL_SAMPLES[key].append("{sample}:NA".format(**locals()))
else:
print("coverage not found...exiting....")
sys.exit
SAMPLE_COV.write("{key}\t{chromosome}\t{av_pos}\t{sample}\t{coverage}\n".format(**locals()))
SAMPLE_COV.close()
print "Generating Kinship matrix for insertion calls....."
KIN_MATRIX=open("kin_matrix_ins.txt", 'w')
KIN_MATRIX.write("TE")
#print headers
value=FINAL_SAMPLES[1]
value=sorted(value)
for i in value:
strain=(re.split(":", i))[0]
KIN_MATRIX.write("\t{strain}".format(**locals()))
KIN_MATRIX.write('\n')
#output the final matrix
for key,value in FINAL_SAMPLES.items():
value=sorted(value)
full_info=FINAL_POSITIONS[key]
full_info=map(str, full_info) #convert the integers to strings
te_info='_'.join(full_info)
te_info = te_info.replace("\t", "_")
KIN_MATRIX.write(str(te_info)) #or output key ID here?
for i in value:
score=(re.split(":", i))[1]
KIN_MATRIX.write("\t{score}".format(**locals()))
KIN_MATRIX.write('\n')
KIN_MATRIX.close()
result, err = Popen(["""head -n1 kin_matrix_ins.txt > tmp"""],stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(["""cat kin_matrix_ins.txt |sed 1d| sort -t"_" -k1,1 -k2,2n >>tmp """],stdout=PIPE, stderr=PIPE, shell=True).communicate()
result, err = Popen(["""mv tmp kin_matrix_ins.txt """],stdout=PIPE, stderr=PIPE, shell=True).communicate()
#error check for smae TE at same position
#why getting only zeros
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops which manipulate lists of tensors via bridge."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test
class ListOpsTest(xla_test.XLATestCase):
def testElementShape(self):
with self.session() as sess, self.test_scope():
dim = array_ops.placeholder(dtypes.int32)
l = list_ops.empty_tensor_list(
element_shape=(dim, 15),
element_dtype=dtypes.float32,
max_num_elements=20)
e32 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32)
e64 = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int64)
self.assertAllEqual(sess.run(e32, {dim: 10}), (10, 15))
self.assertAllEqual(sess.run(e64, {dim: 7}), (7, 15))
def testPushPop(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(7, 15),
element_dtype=dtypes.float32,
max_num_elements=10)
l = list_ops.tensor_list_push_back(
l, constant_op.constant(1.0, shape=(7, 15)))
l = list_ops.tensor_list_push_back(
l, constant_op.constant(2.0, shape=(7, 15)))
l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
_, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e2), 2.0 * np.ones((7, 15)))
self.assertAllEqual(sess.run(e1), 1.0 * np.ones((7, 15)))
def testDoNotConstantFoldVariants(self):
with self.session() as sess, self.test_scope():
val = array_ops.placeholder(dtype=dtypes.float32)
l = list_ops.empty_tensor_list(
element_shape=(7, 15),
element_dtype=dtypes.float32,
max_num_elements=10)
# Note: Pushing a Placeholder will force the constant folding code
# to build a Const node with a DT_VARIANT output. This tests that XLA
# passes a cf_consider_fn which prevent folding such nodes.
l = list_ops.tensor_list_push_back(
l, array_ops.fill(value=val, dims=(7, 15)))
l = list_ops.tensor_list_push_back(
l, constant_op.constant(2.0, shape=(7, 15)))
l, e2 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
_, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e2, {val: 1.0}), 2.0 * np.ones((7, 15)))
self.assertAllEqual(sess.run(e1, {val: 1.0}), 1.0 * np.ones((7, 15)))
def testPushPopSeparateLists(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=[],
element_dtype=dtypes.float32,
max_num_elements=20)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l2 = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
l3 = list_ops.tensor_list_push_back(l, constant_op.constant(3.0))
_, e11 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
l2, e21 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l2, e22 = list_ops.tensor_list_pop_back(l2, element_dtype=dtypes.float32)
l3, e31 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
l3, e32 = list_ops.tensor_list_pop_back(l3, element_dtype=dtypes.float32)
result = sess.run([e11, [e21, e22], [e31, e32]])
self.assertEqual(result, [1.0, [2.0, 1.0], [3.0, 1.0]])
def testEmptyTensorListNoMax(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(7, 15), element_dtype=dtypes.float32)
l = list_ops.tensor_list_push_back(
l, constant_op.constant(1.0, shape=(7, 15)))
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Set the max number of elements"):
self.assertAllEqual(sess.run(e), 1.0 * np.ones((7, 15)))
def testEmptyTensorListMax(self):
with self.session() as sess, self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=(10, 15), element_dtype=dtypes.float32,
max_num_elements=2)
l = list_ops.tensor_list_push_back(
l, array_ops.fill(value=3.0, dims=(10, 15)))
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e), 3.0 * np.ones((10, 15)))
def testListFromTensor(self):
with self.session(), self.test_scope():
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e, 1.0)
l, e0 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 2.0)
l, e1 = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e1, 1.0)
self.assertAllEqual(list_ops.tensor_list_length(l), 2)
def testGetSet(self):
with self.session(), self.test_scope():
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 1.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [3.0, 2.0])
def testSetDoesNotUpdatePushIndex(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_shape=[], element_dtype=dtypes.float32, max_num_elements=2)
# SetItem should not change the push index.
l = list_ops.tensor_list_set_item(l, 1, 3.)
l = list_ops.tensor_list_push_back(l, 5.)
l = list_ops.tensor_list_push_back(l, 7.)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [5., 7.])
def testGetSetReserved(self):
with self.session(), self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=2)
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e0, 0.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [3.0, 0.0])
def testSetStackReservedUnknownElementShape(self):
with self.session(), self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=2)
l = list_ops.tensor_list_set_item(l, 0, [3.0, 4.0])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [[3.0, 4.0], [0., 0.]])
def testPushInEmptyListWithUnknownElementShape(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None, max_num_elements=2)
l = list_ops.tensor_list_push_back(l, [3.0, 4.0])
# Pushing an element with a different shape should raise an error.
with self.assertRaisesRegexp(errors.InternalError, "shape"):
l = list_ops.tensor_list_push_back(l, 5.)
self.evaluate(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32))
def testGetSetReservedNonScalar(self):
with self.session() as sess, self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32,
element_shape=(7, 15),
num_elements=2)
l = list_ops.tensor_list_set_item(
l, 0, constant_op.constant(1.0, shape=(7, 15)))
e1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
e2 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(sess.run(e1), np.ones((7, 15)))
self.assertAllEqual(sess.run(e2), np.zeros((7, 15)))
def testStack(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=2)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(e, 1.0)
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t.shape.as_list(), [None])
self.assertAllEqual(t, [1.0, 2.0])
def testStackWithUninitializedTensors(self):
with self.session(), self.test_scope():
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(t, [0., 0., 0.])
def testZerosLikeForTensorList(self):
with self.session(), self.test_scope():
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=2)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
z = array_ops.zeros_like(l)
z = list_ops.tensor_list_stack(z, element_dtype=dtypes.float32)
self.assertAllEqual(z.shape.as_list(), [None])
self.assertAllEqual(z, [0.0, 0.0])
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=2 ' +
os.environ.get('TF_XLA_FLAGS', ''))
test.main()
|
|
# Python Substrate Interface Library
#
# Copyright 2018-2021 Stichting Polkascan (Polkascan Foundation).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# It has been modified for vendored imports and vendored test harness.
# keypair specific tests have been commented out.
import synapse.vendor.utils as s_v_utils
from synapse.vendor.substrateinterface.utils.ss58 import ss58_decode, ss58_encode, ss58_encode_account_index, \
ss58_decode_account_index, is_valid_ss58_address
# from substrateinterface import Keypair
class SS58TestCase(s_v_utils.VendorTest):
# @classmethod
# def setUpClass(cls) -> None:
#
# cls.alice_keypair = Keypair.create_from_uri('//Alice')
#
# cls.subkey_pairs = [
# {
# 'address': '5EU9mjvZdLRGyDFiBHjxrxvQuaaBpeTZCguhxM3yMX8cpZ2u',
# 'public_key': '0x6a5a5957ce778c174c02c151e7c4917ac127b33ad8485f579f830fc15d31bc5a',
# 'ss58_format': 42
# },
# {
# # ecdsa
# 'address': '4pbsSkWcBaYoFHrKJZp5fDVUKbqSYD9dhZZGvpp3vQ5ysVs5ybV',
# 'public_key': '0x035676109c54b9a16d271abeb4954316a40a32bcce023ac14c8e26e958aa68fba9',
# 'ss58_format': 200
# },
# {
# 'address': 'yGF4JP7q5AK46d1FPCEm9sYQ4KooSjHMpyVAjLnsCSWVafPnf',
# 'public_key': '0x66cd6cf085627d6c85af1aaf2bd10cf843033e929b4e3b1c2ba8e4aa46fe111b',
# 'ss58_format': 255
# },
# {
# 'address': 'yGDYxQatQwuxqT39Zs4LtcTnpzE12vXb7ZJ6xpdiHv6gTu1hF',
# 'public_key': '0x242fd5a078ac6b7c3c2531e9bcf1314343782aeb58e7bc6880794589e701db55',
# 'ss58_format': 255
# },
# {
# 'address': 'mHm8k9Emsvyfp3piCauSH684iA6NakctF8dySQcX94GDdrJrE',
# 'public_key': '0x44d5a3ac156335ea99d33a83c57c7146c40c8e2260a8a4adf4e7a86256454651',
# 'ss58_format': 4242
# },
# {
# 'address': 'r6Gr4gaMP8TsjhFbqvZhv3YvnasugLiRJpzpRHifsqqG18UXa',
# 'public_key': '0x88f01441682a17b52d6ae12d1a5670cf675fd254897efabaa5069eb3a701ab73',
# 'ss58_format': 14269
# }
# ]
#
# def test_encode_key_pair_alice_address(self):
# self.assertEqual(self.alice_keypair.ss58_address, "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY")
def test_encode_1_byte_account_index(self):
self.assertEqual('F7NZ', ss58_encode_account_index(1))
def test_encode_1_byte_account_index_with_format(self):
self.assertEqual('g4b', ss58_encode_account_index(1, ss58_format=2))
self.assertEqual('g4b', ss58_encode('0x01', ss58_format=2))
def test_encode_2_bytes_account_index(self):
self.assertEqual('3xygo', ss58_encode_account_index(256, ss58_format=2))
self.assertEqual('3xygo', ss58_encode('0x0001', ss58_format=2))
def test_encode_4_bytes_account_index(self):
self.assertEqual('zswfoZa', ss58_encode_account_index(67305985, ss58_format=2))
self.assertEqual('zswfoZa', ss58_encode('0x01020304', ss58_format=2))
def test_encode_8_bytes_account_index(self):
self.assertEqual('848Gh2GcGaZia', ss58_encode('0x2a2c0a0000000000', ss58_format=2))
def test_decode_1_byte_account_index(self):
self.assertEqual(1, ss58_decode_account_index('F7NZ'))
def test_decode_2_bytes_account_index(self):
self.assertEqual(256, ss58_decode_account_index('3xygo'))
def test_decode_4_bytes_account_index(self):
self.assertEqual(67305985, ss58_decode_account_index('zswfoZa'))
def test_decode_8_bytes_account_index(self):
self.assertEqual(666666, ss58_decode_account_index('848Gh2GcGaZia'))
def test_encode_33_byte_address(self):
self.assertEqual(
'KWCv1L3QX9LDPwY4VzvLmarEmXjVJidUzZcinvVnmxAJJCBou',
ss58_encode('0x03b9dc646dd71118e5f7fda681ad9eca36eb3ee96f344f582fbe7b5bcdebb13077')
)
def test_encode_with_2_byte_prefix(self):
public_key = ss58_decode('5GoKvZWG5ZPYL1WUovuHW3zJBWBP5eT8CbqjdRY4Q6iMaQua')
self.assertEqual(
'yGHU8YKprxHbHdEv7oUK4rzMZXtsdhcXVG2CAMyC9WhzhjH2k',
ss58_encode(public_key, ss58_format=255)
)
# def test_encode_subkey_generated_pairs(self):
# for subkey_pair in self.subkey_pairs:
# self.assertEqual(
# subkey_pair['address'],
# ss58_encode(address=subkey_pair['public_key'], ss58_format=subkey_pair['ss58_format'])
# )
#
# def test_decode_subkey_generated_pairs(self):
# for subkey_pair in self.subkey_pairs:
# self.assertEqual(
# subkey_pair['public_key'],
# '0x' + ss58_decode(address=subkey_pair['address'], valid_ss58_format=subkey_pair['ss58_format'])
# )
#
# def test_invalid_ss58_format_range_exceptions(self):
# with self.assertRaises(ValueError) as cm:
# ss58_encode(self.alice_keypair.public_key, ss58_format=-1)
#
# self.assertEqual('Invalid value for ss58_format', str(cm.exception))
#
# with self.assertRaises(ValueError) as cm:
# ss58_encode(self.alice_keypair.public_key, ss58_format=16384)
#
# self.assertEqual('Invalid value for ss58_format', str(cm.exception))
#
# def test_invalid_reserved_ss58_format(self):
# with self.assertRaises(ValueError) as cm:
# ss58_encode(self.alice_keypair.public_key, ss58_format=46)
#
# self.assertEqual('Invalid value for ss58_format', str(cm.exception))
#
# with self.assertRaises(ValueError) as cm:
# ss58_encode(self.alice_keypair.public_key, ss58_format=47)
#
# self.assertEqual('Invalid value for ss58_format', str(cm.exception))
#
# def test_invalid_public_key(self):
# with self.assertRaises(ValueError) as cm:
# ss58_encode(self.alice_keypair.public_key[:30])
#
# self.assertEqual('Invalid length for address', str(cm.exception))
def test_decode_public_key(self):
self.assertEqual(
'0x03b9dc646dd71118e5f7fda681ad9eca36eb3ee96f344f582fbe7b5bcdebb13077',
ss58_decode('0x03b9dc646dd71118e5f7fda681ad9eca36eb3ee96f344f582fbe7b5bcdebb13077')
)
def test_decode_reserved_ss58_formats(self):
with self.assertRaises(ValueError) as cm:
ss58_decode('MGP3U1wqNhFofseKXU7B6FcZuLbvQvJFyin1EvQM65mBcNsY8')
self.assertEqual('46 is a reserved SS58 format', str(cm.exception))
with self.assertRaises(ValueError) as cm:
ss58_decode('MhvaLBvSb5jhjrftHLQPAvJegnpXgyDTE1ZprRNzAcfQSRdbL')
self.assertEqual('47 is a reserved SS58 format', str(cm.exception))
def test_invalid_ss58_format_check(self):
with self.assertRaises(ValueError) as cm:
ss58_decode('5GoKvZWG5ZPYL1WUovuHW3zJBWBP5eT8CbqjdRY4Q6iMaQua', valid_ss58_format=2)
self.assertEqual('Invalid SS58 format', str(cm.exception))
def test_decode_invalid_checksum(self):
with self.assertRaises(ValueError) as cm:
ss58_decode('5GoKvZWG5ZPYL1WUovuHW3zJBWBP5eT8CbqjdRY4Q6iMaQub')
self.assertEqual('Invalid checksum', str(cm.exception))
def test_decode_invalid_length(self):
with self.assertRaises(ValueError) as cm:
ss58_decode('5GoKvZWG5ZPYL1WUovuHW3zJBWBP5eT8CbqjdRY4Q6iMaQubsdhfjksdhfkj')
self.assertEqual('Invalid address length', str(cm.exception))
def test_is_valid_ss58_address(self):
self.assertTrue(is_valid_ss58_address('5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY'))
self.assertTrue(is_valid_ss58_address('5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY', valid_ss58_format=42))
self.assertFalse(is_valid_ss58_address('5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY', valid_ss58_format=2))
self.assertTrue(is_valid_ss58_address('GLdQ4D4wkeEJUX8DBT9HkpycFVYQZ3fmJyQ5ZgBRxZ4LD3S', valid_ss58_format=2))
self.assertFalse(is_valid_ss58_address('GLdQ4D4wkeEJUX8DBT9HkpycFVYQZ3fmJyQ5ZgBRxZ4LD3S', valid_ss58_format=42))
self.assertFalse(is_valid_ss58_address('GLdQ4D4wkeEJUX8DBT9HkpycFVYQZ3fmJyQ5ZgBRxZ4LD3S', valid_ss58_format=0))
self.assertTrue(is_valid_ss58_address('12gX42C4Fj1wgtfgoP624zeHrcPBqzhb4yAENyvFdGX6EUnN', valid_ss58_format=0))
self.assertFalse(is_valid_ss58_address('5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQ'))
self.assertFalse(is_valid_ss58_address('6GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY'))
self.assertFalse(is_valid_ss58_address('0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d'))
self.assertFalse(is_valid_ss58_address('d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d'))
self.assertFalse(is_valid_ss58_address('incorrect_string'))
|
|
"""Mongodb implementations of osid managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package mongo package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from pymongo import MongoClient
from . import profile
from .. import MONGO_CLIENT
from .. import utilities
from ...abstract_osid.osid import managers as abc_osid_managers
from ..primitives import DisplayText
from ..primitives import Id
from ..primitives import Type
from dlkit.abstract_osid.osid import errors
from dlkit.mongo.osid import markers as osid_markers
class OsidProfile(abc_osid_managers.OsidProfile, osid_markers.Sourceable):
"""The ``OsidProfile`` defines the interoperability areas of an OSID.
An ``OsidProfile`` is implemented by an ``OsidManager``. The top
level ``OsidProfile`` tests for version compatibility. Each OSID
extends this interface to include its own interoperability
definitions within its managers.
"""
def __init__(self):
self._runtime = None
self._config = None
def _get_registry(self, entry):
# get from the runtime
try:
records_location_param_id = Id('parameter:recordsRegistry@mongo')
registry = self._runtime.get_configuration().get_value_by_parameter(
records_location_param_id).get_string_value()
return import_module(registry).__dict__.get(entry, {})
except (ImportError, AttributeError, KeyError, errors.NotFound):
return {}
def _initialize_manager(self, runtime):
"""Sets the runtime, configuration and mongo client"""
if self._runtime is not None:
raise errors.IllegalState('this manager has already been initialized.')
self._runtime = runtime
self._config = runtime.get_configuration()
if not MONGO_CLIENT.is_mongo_client_set():
try:
mongo_host_param_id = Id('parameter:mongoHostURI@mongo')
mongo_host = runtime.get_configuration().get_value_by_parameter(mongo_host_param_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
MONGO_CLIENT.set_mongo_client(MongoClient())
else:
MONGO_CLIENT.set_mongo_client(MongoClient(mongo_host))
def get_id(self):
"""Gets an identifier for this service implementation.
The identifier is unique among services but multiple
instantiations of the same service use the same ``Id``. This
identifier is the same identifier used in managing OSID
installations.
return: (osid.id.Id) - the ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
return Id(**profile.ID)
id_ = property(fget=get_id)
ident = property(fget=get_id)
def get_display_name(self):
"""Gets a display name for this service implementation.
return: (osid.locale.DisplayText) - a display name
*compliance: mandatory -- This method must be implemented.*
"""
return DisplayText(
text = profile.DISPLAYNAME,
language_type=Type(**profile.LANGUAGETYPE),
script_type=Type(**profile.SCRIPTTYPE),
format_type=Type(**profile.FORMATTYPE))
display_name = property(fget=get_display_name)
def get_description(self):
"""Gets a description of this service implementation.
return: (osid.locale.DisplayText) - a description
*compliance: mandatory -- This method must be implemented.*
"""
return DisplayText(
text=profile.DESCRIPTION,
language_type=Type(**profile.LANGUAGETYPE),
script_type=Type(**profile.SCRIPTTYPE),
format_type=Type(**profile.FORMATTYPE))
description = property(fget=get_description)
def get_version(self):
"""Gets the version of this service implementation.
return: (osid.installation.Version) - the service implementation
version
*compliance: mandatory -- This method must be implemented.*
"""
## THIS ALL NEEDS TO BE FIXED:
#try:
# from ..installation.primitives import Version
#except:
# from .common import Version
#try:
# from ..type.primitives import Type
#except:
# from .common import Type
#return Version(
# components=profile.VERSIONCOMPONENTS,
# scheme=Type(**profile.VERSIONSCHEME))
raise errors.Unimplemented()
version = property(fget=get_version)
def get_release_date(self):
"""Gets the date this service implementation was released.
return: (osid.calendaring.DateTime) - the release date
*compliance: mandatory -- This method must be implemented.*
"""
# NEED TO IMPLEMENT
raise errors.Unimplemented()
release_date = property(fget=get_release_date)
@utilities.arguments_not_none
def supports_osid_version(self, version):
"""Test for support of an OSID specification version.
arg: version (osid.installation.Version): the specification
version to test
return: (boolean) - ``true`` if this manager supports the given
OSID version, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: An implementation may support multiple
versions of an OSID.
"""
## THIS ALL NEEDS TO BE FIXED:
#try:
# from ..installation.primitives import Version
#except:
# from .common import Version
#try:
# from ..type.primitives import Type
#except:
# from .common import Type
#return Version(
# components=profile.OSIDVERSION,
# scheme=Type(**profile.VERSIONSCHEME))
raise errors.Unimplemented()
def get_locales(self):
"""Gets the locales supported in this service.
return: (osid.locale.LocaleList) - list of locales supported
*compliance: mandatory -- This method must be implemented.*
"""
# NEED TO IMPLEMENT
raise errors.Unimplemented()
locales = property(fget=get_locales)
def supports_journal_rollback(self):
"""Test for support of a journaling rollback service.
return: (boolean) - ``true`` if this manager supports the
journal rollback, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Perhaps someday I will support journaling
return False
def supports_journal_branching(self):
"""Test for support of a journal branching service.
return: (boolean) - ``true`` if this manager supports the
journal branching, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Perhaps someday I will support journaling
return False
def get_branch_id(self):
"""Gets the ``Branch Id`` representing this service branch.
return: (osid.id.Id) - the branch ``Id``
raise: Unimplemented - ``supports_journal_branching()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Perhaps someday I will support journaling
raise errors.Unimplemented()
branch_id = property(fget=get_branch_id)
def get_branch(self):
"""Gets this service branch.
return: (osid.journaling.Branch) - the service branch
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_journal_branching()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Perhaps someday I will support journaling
raise errors.Unimplemented()
branch = property(fget=get_branch)
def get_proxy_record_types(self):
"""Gets the proxy record ``Types`` supported in this service.
If no proxy manager is available, an empty list is returned.
return: (osid.type.TypeList) - list of proxy record types
supported
*compliance: mandatory -- This method must be implemented.*
"""
# NEED TO IMPLEMENT
raise errors.Unimplemented()
proxy_record_types = property(fget=get_proxy_record_types)
@utilities.arguments_not_none
def supports_proxy_record_type(self, proxy_record_type):
"""Test for support of a proxy type.
arg: proxy_record_type (osid.type.Type): a proxy record type
return: (boolean) - ``true`` if this service supports the given
proxy record type, ``false`` otherwise
raise: NullArgument - ``proxy_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# NEED TO IMPLEMENT
raise errors.Unimplemented()
class OsidManager(abc_osid_managers.OsidManager, OsidProfile):
"""The ``OsidManager`` is the top level interface for all OSID managers.
An OSID manager is instantiated through the ``OsidRuntimeManager``
and represents an instance of a service. An OSID manager is
responsible for implementing a profile for a service and creating
sessions that, in general, correspond to the profile. An application
need only create a single ``OsidManager`` per service and
implementors must ensure the ``OsidManager`` is thread-safe ````.
The ``OsidSessions`` spawned from an OSID manager are dedicated to
single processing threads. The ``OsidManager`` defines methods in
common throughout all OSID managers which implement this interface.
"""
def __init__(self):
OsidProfile.__init__(self)
@utilities.arguments_not_none
def initialize(self, runtime):
"""Initializes this manager.
A manager is initialized once at the time of creation.
arg: runtime (osid.OsidRuntimeManager): the runtime
environment
raise: ConfigurationError - an error with implementation
configuration
raise: IllegalState - this manager has already been initialized
by the ``OsidRuntime``
raise: NullArgument - ``runtime`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: In addition to loading its runtime
configuration an implementation may create shared resources such
as connection pools to be shared among all sessions of this
service and released when this manager is closed. Providers must
thread-protect any data stored in the manager. To maximize
interoperability, providers should not honor a second call to
``initialize()`` and must set an ``IllegalState`` error.
"""
OsidProfile._initialize_manager(self, runtime)
@utilities.arguments_not_none
def rollback_service(self, rollback_time):
"""Rolls back this service to a point in time.
arg: rollback_time (timestamp): the requested time
return: (osid.journaling.JournalEntry) - the journal entry
corresponding to the actual state of this service
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unimplemented - ``supports_journal_rollback()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def change_branch(self, branch_id):
"""Changes the service branch.
arg: branch_id (osid.id.Id): the new service branch
raise: NotFound - ``branch_id`` not found
raise: NullArgument - ``branch_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unimplemented - ``supports_journal_branching()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class OsidProxyManager(abc_osid_managers.OsidProxyManager, OsidProfile):
"""The ``OsidProxyManager`` is the top level interface for all OSID proxy managers.
A proxy manager accepts parameters to pass through end-user
authentication credentials and other necessary request parameters in
a server environment. Native applications should use an
``OsidManager`` to maintain a higher degree of interoperability by
avoiding this coupling.
An OSID proxy manager is instantiated through the
``OsidRuntimeManager`` and represents an instance of a service. An
OSID manager is responsible for defining clusters of
interoperability within a service and creating sessions that
generally correspond to these clusters, An application need only
create a single ``OsidProxyManager`` per service and implementors
must ensure the ``OsidProxyManager`` is thread-safe ````. The
``OsidSessions`` spawned from an OSID manager are dedicated to
single processing threads. The ``OsidProxyManager`` defines methods
in common throughout all OSID managers which implement this
interface.
"""
def __init__(self):
OsidProfile.__init__(self)
@utilities.arguments_not_none
def initialize(self, runtime):
"""Initializes this manager.
A manager is initialized once at the time of creation.
arg: runtime (osid.OsidRuntimeManager): the runtime
environment
raise: ConfigurationError - an error with implementation
configuration
raise: IllegalState - this manager has already been initialized
by the ``OsidRuntime``
raise: NullArgument - ``runtime`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: In addition to loading its runtime
configuration an implementation may create shared resources such
as connection pools to be shared among all sessions of this
service and released when this manager is closed. Providers must
thread-protect any data stored in the manager. To maximize
interoperability, providers should not honor a second call to
``initialize()`` and must set an ``IllegalState`` error.
"""
OsidProfile._initialize_manager(self, runtime)
@utilities.arguments_not_none
def rollback_service(self, rollback_time, proxy):
"""Rolls back this service to a point in time.
arg: rollback_time (timestamp): the requested time
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.journaling.JournalEntry) - the journal entry
corresponding to the actual state of this service
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unimplemented - ``supports_journal_rollback()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def change_branch(self, branch_id, proxy):
"""Changes the service branch.
arg: branch_id (osid.id.Id): the new service branch
arg: proxy (osid.proxy.Proxy): a proxy
raise: NotFound - ``branch_id`` not found
raise: NullArgument - ``branch_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unimplemented - ``supports_journal_branching()`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class OsidRuntimeProfile(abc_osid_managers.OsidRuntimeProfile, OsidProfile):
"""The ``OsidRuntimeProfile`` defines the service aspects of the OSID runtime service."""
def supports_configuration(self):
"""Tests if a configuration service is provided within this runtime environment.
return: (boolean) - ``true`` if a configuration service is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class OsidRuntimeManager(abc_osid_managers.OsidRuntimeManager, OsidManager, OsidRuntimeProfile):
"""The ``OsidRuntimeManager`` represents and OSID platform and contains the information required for running OSID
implementations such as search paths and configurations."""
def __init__(self, configuration_key = None):
self._configuration_key = configuration_key
@utilities.arguments_not_none
def get_manager(self, osid, impl_class_name, version):
"""Finds, loads and instantiates providers of OSID managers.
Providers must conform to an OsidManager interface. The
interfaces are defined in the OSID enumeration. For all OSID
requests, an instance of ``OsidManager`` that implements the
``OsidManager`` interface is returned. In bindings where
permitted, this can be safely cast into the requested manager.
arg: osid (osid.OSID): represents the OSID
arg: impl_class_name (string): the name of the implementation
arg: version (osid.installation.Version): the minimum
required OSID specification version
return: (osid.OsidManager) - the manager of the service
raise: ConfigurationError - an error in configuring the
implementation
raise: NotFound - the implementation class was not found
raise: NullArgument - ``impl_class_name`` or ``version`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``impl_class_name`` does not support the
requested OSID
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: After finding and instantiating the
requested ``OsidManager,`` providers must invoke
``OsidManager.initialize(OsidRuntimeManager)`` where the
environment is an instance of the current environment that
includes the configuration for the service being initialized.
The ``OsidRuntimeManager`` passed may include information useful
for the configuration such as the identity of the service being
instantiated.
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_proxy_manager(self, osid, implementation, version):
"""Finds, loads and instantiates providers of OSID managers.
Providers must conform to an ``OsidManager`` interface. The
interfaces are defined in the OSID enumeration. For all OSID
requests, an instance of ``OsidManager`` that implements the
``OsidManager`` interface is returned. In bindings where
permitted, this can be safely cast into the requested manager.
arg: osid (osid.OSID): represents the OSID
arg: implementation (string): the name of the implementation
arg: version (osid.installation.Version): the minimum
required OSID specification version
return: (osid.OsidProxyManager) - the manager of the service
raise: ConfigurationError - an error in configuring the
implementation
raise: NotFound - the implementation class was not found
raise: NullArgument - ``implementation`` or ``version`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``implementation`` does not support the
requested OSID
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: After finding and instantiating the
requested ``OsidManager,`` providers must invoke
``OsidManager.initialize(OsidRuntimeManager)`` where the
environment is an instance of the current environment that
includes the configuration for the service being initialized.
The ``OsidRuntimeManager`` passed may include information useful
for the configuration such as the identity of the service being
instantiated.
"""
raise errors.Unimplemented()
def get_configuration(self):
"""Gets the current configuration in the runtime environment.
return: (osid.configuration.ValueLookupSession) - a
configuration
raise: OperationFailed - unable to complete request
raise: PermissionDenied - an authorization failure occured
raise: Unimplemented - a configuration service is not supported
*compliance: optional -- This method must be implemented if
``supports_configuration()`` is ``true``.*
"""
raise errors.Unimplemented()
configuration = property(fget=get_configuration)
|
|
"""Routine to "compile" a .py file to a .pyc file.
This module has intimate knowledge of the format of .pyc files.
"""
import enum
import importlib._bootstrap_external
import importlib.machinery
import importlib.util
import os
import os.path
import sys
import traceback
__all__ = ["compile", "main", "PyCompileError", "PycInvalidationMode"]
class PyCompileError(Exception):
"""Exception raised when an error occurs while attempting to
compile the file.
To raise this exception, use
raise PyCompileError(exc_type,exc_value,file[,msg])
where
exc_type: exception type to be used in error message
type name can be accesses as class variable
'exc_type_name'
exc_value: exception value to be used in error message
can be accesses as class variable 'exc_value'
file: name of file being compiled to be used in error message
can be accesses as class variable 'file'
msg: string message to be written as error message
If no value is given, a default exception message will be
given, consistent with 'standard' py_compile output.
message (or default) can be accesses as class variable
'msg'
"""
def __init__(self, exc_type, exc_value, file, msg=''):
exc_type_name = exc_type.__name__
if exc_type is SyntaxError:
tbtext = ''.join(traceback.format_exception_only(
exc_type, exc_value))
errmsg = tbtext.replace('File "<string>"', 'File "%s"' % file)
else:
errmsg = "Sorry: %s: %s" % (exc_type_name,exc_value)
Exception.__init__(self,msg or errmsg,exc_type_name,exc_value,file)
self.exc_type_name = exc_type_name
self.exc_value = exc_value
self.file = file
self.msg = msg or errmsg
def __str__(self):
return self.msg
class PycInvalidationMode(enum.Enum):
TIMESTAMP = 1
CHECKED_HASH = 2
UNCHECKED_HASH = 3
def _get_default_invalidation_mode():
if os.environ.get('SOURCE_DATE_EPOCH'):
return PycInvalidationMode.CHECKED_HASH
else:
return PycInvalidationMode.TIMESTAMP
def compile(file, cfile=None, dfile=None, doraise=False, optimize=-1,
invalidation_mode=None, quiet=0):
"""Byte-compile one Python source file to Python bytecode.
:param file: The source file name.
:param cfile: The target byte compiled file name. When not given, this
defaults to the PEP 3147/PEP 488 location.
:param dfile: Purported file name, i.e. the file name that shows up in
error messages. Defaults to the source file name.
:param doraise: Flag indicating whether or not an exception should be
raised when a compile error is found. If an exception occurs and this
flag is set to False, a string indicating the nature of the exception
will be printed, and the function will return to the caller. If an
exception occurs and this flag is set to True, a PyCompileError
exception will be raised.
:param optimize: The optimization level for the compiler. Valid values
are -1, 0, 1 and 2. A value of -1 means to use the optimization
level of the current interpreter, as given by -O command line options.
:param invalidation_mode:
:param quiet: Return full output with False or 0, errors only with 1,
and no output with 2.
:return: Path to the resulting byte compiled file.
Note that it isn't necessary to byte-compile Python modules for
execution efficiency -- Python itself byte-compiles a module when
it is loaded, and if it can, writes out the bytecode to the
corresponding .pyc file.
However, if a Python installation is shared between users, it is a
good idea to byte-compile all modules upon installation, since
other users may not be able to write in the source directories,
and thus they won't be able to write the .pyc file, and then
they would be byte-compiling every module each time it is loaded.
This can slow down program start-up considerably.
See compileall.py for a script/module that uses this module to
byte-compile all installed files (or all files in selected
directories).
Do note that FileExistsError is raised if cfile ends up pointing at a
non-regular file or symlink. Because the compilation uses a file renaming,
the resulting file would be regular and thus not the same type of file as
it was previously.
"""
if invalidation_mode is None:
invalidation_mode = _get_default_invalidation_mode()
if cfile is None:
if optimize >= 0:
optimization = optimize if optimize >= 1 else ''
cfile = importlib.util.cache_from_source(file,
optimization=optimization)
else:
cfile = importlib.util.cache_from_source(file)
if os.path.islink(cfile):
msg = ('{} is a symlink and will be changed into a regular file if '
'import writes a byte-compiled file to it')
raise FileExistsError(msg.format(cfile))
elif os.path.exists(cfile) and not os.path.isfile(cfile):
msg = ('{} is a non-regular file and will be changed into a regular '
'one if import writes a byte-compiled file to it')
raise FileExistsError(msg.format(cfile))
loader = importlib.machinery.SourceFileLoader('<py_compile>', file)
source_bytes = loader.get_data(file)
try:
code = loader.source_to_code(source_bytes, dfile or file,
_optimize=optimize)
except Exception as err:
py_exc = PyCompileError(err.__class__, err, dfile or file)
if quiet < 2:
if doraise:
raise py_exc
else:
sys.stderr.write(py_exc.msg + '\n')
return
try:
dirname = os.path.dirname(cfile)
if dirname:
os.makedirs(dirname)
except FileExistsError:
pass
if invalidation_mode == PycInvalidationMode.TIMESTAMP:
source_stats = loader.path_stats(file)
bytecode = importlib._bootstrap_external._code_to_timestamp_pyc(
code, source_stats['mtime'], source_stats['size'])
else:
source_hash = importlib.util.source_hash(source_bytes)
bytecode = importlib._bootstrap_external._code_to_hash_pyc(
code,
source_hash,
(invalidation_mode == PycInvalidationMode.CHECKED_HASH),
)
mode = importlib._bootstrap_external._calc_mode(file)
importlib._bootstrap_external._write_atomic(cfile, bytecode, mode)
return cfile
def main(args=None):
"""Compile several source files.
The files named in 'args' (or on the command line, if 'args' is
not specified) are compiled and the resulting bytecode is cached
in the normal manner. This function does not search a directory
structure to locate source files; it only compiles files named
explicitly. If '-' is the only parameter in args, the list of
files is taken from standard input.
"""
if args is None:
args = sys.argv[1:]
rv = 0
if args == ['-']:
while True:
filename = sys.stdin.readline()
if not filename:
break
filename = filename.rstrip('\n')
try:
compile(filename, doraise=True)
except PyCompileError as error:
rv = 1
if quiet < 2:
sys.stderr.write("%s\n" % error.msg)
except OSError as error:
rv = 1
if quiet < 2:
sys.stderr.write("%s\n" % error)
else:
for filename in args:
try:
compile(filename, doraise=True)
except PyCompileError as error:
# return value to indicate at least one failure
rv = 1
if quiet < 2:
sys.stderr.write("%s\n" % error.msg)
return rv
if __name__ == "__main__":
sys.exit(main())
|
|
import os
from typing import Any, Iterator, List, Optional, Tuple
from django.conf import settings
from zerver.lib.redis_utils import get_redis_client
from zerver.models import UserProfile
import redis
import time
import logging
# Implement a rate-limiting scheme inspired by the one described here, but heavily modified
# http://blog.domaintools.com/2013/04/rate-limiting-with-redis/
client = get_redis_client()
rules = settings.RATE_LIMITING_RULES # type: List[Tuple[int, int]]
KEY_PREFIX = ''
class RateLimiterLockingException(Exception):
pass
class RateLimitedObject:
def get_keys(self) -> List[str]:
key_fragment = self.key_fragment()
return ["{}ratelimit:{}:{}".format(KEY_PREFIX, key_fragment, keytype)
for keytype in ['list', 'zset', 'block']]
def key_fragment(self) -> str:
raise NotImplementedError()
def rules(self) -> List[Tuple[int, int]]:
raise NotImplementedError()
class RateLimitedUser(RateLimitedObject):
def __init__(self, user: UserProfile, domain: str='all') -> None:
self.user = user
self.domain = domain
def key_fragment(self) -> str:
return "{}:{}:{}".format(type(self.user), self.user.id, self.domain)
def rules(self) -> List[Tuple[int, int]]:
if self.user.rate_limits != "":
result = [] # type: List[Tuple[int, int]]
for limit in self.user.rate_limits.split(','):
(seconds, requests) = limit.split(':', 2)
result.append((int(seconds), int(requests)))
return result
return rules
def bounce_redis_key_prefix_for_testing(test_name: str) -> None:
global KEY_PREFIX
KEY_PREFIX = test_name + ':' + str(os.getpid()) + ':'
def max_api_calls(entity: RateLimitedObject) -> int:
"Returns the API rate limit for the highest limit"
return entity.rules()[-1][1]
def max_api_window(entity: RateLimitedObject) -> int:
"Returns the API time window for the highest limit"
return entity.rules()[-1][0]
def add_ratelimit_rule(range_seconds: int, num_requests: int) -> None:
"Add a rate-limiting rule to the ratelimiter"
global rules
rules.append((range_seconds, num_requests))
rules.sort(key=lambda x: x[0])
def remove_ratelimit_rule(range_seconds: int, num_requests: int) -> None:
global rules
rules = [x for x in rules if x[0] != range_seconds and x[1] != num_requests]
def block_access(entity: RateLimitedObject, seconds: int) -> None:
"Manually blocks an entity for the desired number of seconds"
_, _, blocking_key = entity.get_keys()
with client.pipeline() as pipe:
pipe.set(blocking_key, 1)
pipe.expire(blocking_key, seconds)
pipe.execute()
def unblock_access(entity: RateLimitedObject) -> None:
_, _, blocking_key = entity.get_keys()
client.delete(blocking_key)
def clear_history(entity: RateLimitedObject) -> None:
'''
This is only used by test code now, where it's very helpful in
allowing us to run tests quickly, by giving a user a clean slate.
'''
for key in entity.get_keys():
client.delete(key)
def _get_api_calls_left(entity: RateLimitedObject, range_seconds: int, max_calls: int) -> Tuple[int, float]:
list_key, set_key, _ = entity.get_keys()
# Count the number of values in our sorted set
# that are between now and the cutoff
now = time.time()
boundary = now - range_seconds
with client.pipeline() as pipe:
# Count how many API calls in our range have already been made
pipe.zcount(set_key, boundary, now)
# Get the newest call so we can calculate when the ratelimit
# will reset to 0
pipe.lindex(list_key, 0)
results = pipe.execute()
count = results[0] # type: int
newest_call = results[1] # type: Optional[bytes]
calls_left = max_calls - count
if newest_call is not None:
time_reset = now + (range_seconds - (now - float(newest_call)))
else:
time_reset = now
return calls_left, time_reset
def api_calls_left(entity: RateLimitedObject) -> Tuple[int, float]:
"""Returns how many API calls in this range this client has, as well as when
the rate-limit will be reset to 0"""
max_window = max_api_window(entity)
max_calls = max_api_calls(entity)
return _get_api_calls_left(entity, max_window, max_calls)
def is_ratelimited(entity: RateLimitedObject) -> Tuple[bool, float]:
"Returns a tuple of (rate_limited, time_till_free)"
list_key, set_key, blocking_key = entity.get_keys()
rules = entity.rules()
if len(rules) == 0:
return False, 0.0
# Go through the rules from shortest to longest,
# seeing if this user has violated any of them. First
# get the timestamps for each nth items
with client.pipeline() as pipe:
for _, request_count in rules:
pipe.lindex(list_key, request_count - 1) # 0-indexed list
# Get blocking info
pipe.get(blocking_key)
pipe.ttl(blocking_key)
rule_timestamps = pipe.execute() # type: List[Optional[bytes]]
# Check if there is a manual block on this API key
blocking_ttl_b = rule_timestamps.pop()
key_blocked = rule_timestamps.pop()
if key_blocked is not None:
# We are manually blocked. Report for how much longer we will be
if blocking_ttl_b is None:
blocking_ttl = 0.5
else:
blocking_ttl = int(blocking_ttl_b)
return True, blocking_ttl
now = time.time()
for timestamp, (range_seconds, num_requests) in zip(rule_timestamps, rules):
# Check if the nth timestamp is newer than the associated rule. If so,
# it means we've hit our limit for this rule
if timestamp is None:
continue
boundary = float(timestamp) + range_seconds
if boundary > now:
free = boundary - now
return True, free
# No api calls recorded yet
return False, 0.0
def incr_ratelimit(entity: RateLimitedObject) -> None:
"""Increases the rate-limit for the specified entity"""
list_key, set_key, _ = entity.get_keys()
now = time.time()
# If we have no rules, we don't store anything
if len(rules) == 0:
return
# Start redis transaction
with client.pipeline() as pipe:
count = 0
while True:
try:
# To avoid a race condition between getting the element we might trim from our list
# and removing it from our associated set, we abort this whole transaction if
# another agent manages to change our list out from under us
# When watching a value, the pipeline is set to Immediate mode
pipe.watch(list_key)
# Get the last elem that we'll trim (so we can remove it from our sorted set)
last_val = pipe.lindex(list_key, max_api_calls(entity) - 1)
# Restart buffered execution
pipe.multi()
# Add this timestamp to our list
pipe.lpush(list_key, now)
# Trim our list to the oldest rule we have
pipe.ltrim(list_key, 0, max_api_calls(entity) - 1)
# Add our new value to the sorted set that we keep
# We need to put the score and val both as timestamp,
# as we sort by score but remove by value
pipe.zadd(set_key, now, now)
# Remove the trimmed value from our sorted set, if there was one
if last_val is not None:
pipe.zrem(set_key, last_val)
# Set the TTL for our keys as well
api_window = max_api_window(entity)
pipe.expire(list_key, api_window)
pipe.expire(set_key, api_window)
pipe.execute()
# If no exception was raised in the execution, there were no transaction conflicts
break
except redis.WatchError:
if count > 10:
raise RateLimiterLockingException()
count += 1
continue
|
|
#!/usr/bin/env python
import sys
import os
import re
import numpy
from PIL import Image
import StringIO
# you need to install this library yourself
# recent versions handle bigtiff too...
import tifffile
"""
Extract pyramidal TIFF files with JPEG tiled storage into a tree of
separate JPEG files into DZI compliant directory that is usable
by openseadragon. Multiple channels info are combined into single
jpeg.
usage: extract2dzi_rgb.py pyramid-file-dir dest-dir
The pyramid-file must be a multi-page TIFF with each page having an
image scaled by 1/2 from the previous page. All pages must be tiled
with the same tile size, and tiles must be stored using the new-style
JPEG compression format, i.e. TIFF compression == 7.
The lowest resolution page must have 4 or fewer tiles. If it has
more than 1, this script will leave space for the user to decide whether
final lowest zoom tile 0/0_0.jpg that is 1/2 scaled version of the image
represented by that last page should be generated or not.
File directory generated
dest-dir
ImageProperties.xml
0
0_0.jpg
1
0_0.jpg
1_0.jpg
...
Since the tiled tiff kept padded tiles and openseadragon expected its
jpeg files to be cropped but not padded, the border tiles are cropped
and the width and height of image uses the actual image dimension
"""
try:
srcloc = sys.argv[1]
outloc = sys.argv[2]
if not os.path.exists(srcloc) or not os.path.isdir(srcloc):
sys.stderr.write('Pyramid directory must be given and exist')
sys.stderr.write('\nusage: extract2dzi_rgb.py pyramid-file-directory dest-dir\n\n')
sys.exit(1)
if not os.path.exists(outloc):
os.makedirs(outloc)
except:
sys.stderr.write('\nusage: extract2dzi_rgb.py pyramid-file-directory dest-dir\n\n')
raise
## 20140403-R26-Tdt-JJG-0-38-000-DAPI-Z3.tif
## 20140403-R26-Tdt-JJG-0-38-000-FITC-Z3.tif
## 20140403-R26-Tdt-JJG-0-38-000-Rhodamine-Z3.tif
## iterate through the files,
## if valid tiff file, then change the outdir to
## outdir/DAPI/.xml,0,1..
## essentialy like calling extract2dzi.py filename outdir/color
infile=None
txsize=0
tysize=0
pxsize=0
pysize=0
zoomno=0
outinfo=[]
total_tiles=0
topdir_template = '%(outdir)s'
dir_template = topdir_template +'/%(zoomno)d'
tile_template = dir_template + '/%(tcolno)d_%(trowno)d.jpg'
image_template = '%(outdir)s/ImageProperties.xml'
################# helper functions ###################
# http://www.w3.org/Graphics/JPEG/jfif3.pdf
def jpeg_assemble(jpeg_tables_bytes, jpeg_bytes):
return jpeg_bytes[0:2] + jpeg_tables_bytes + jpeg_bytes[2:]
def load_tile(infile, tile_offset, tile_length):
infile.seek(tile_offset)
return infile.read(tile_length)
def getTile(page, infile, jpeg_tables_bytes, tileno):
jpeg = jpeg_assemble(jpeg_tables_bytes, load_tile(infile, page.tags.tile_offsets.value[tileno], page.tags.tile_byte_counts.value[tileno]))
outfile = StringIO.StringIO()
outfile.write( jpeg )
outfile.seek(0)
image = Image.open(outfile)
ret = numpy.asarray(image)
outfile.close()
return ret
def maxTile(page, infile):
pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes = get_page_info(page)
maxval = 0
for tileno in range(0, len(page.tags.tile_offsets.value)):
tile = getTile(page, infile, jpeg_tables_bytes, tileno)
maxval = max(maxval, tile.max())
return maxval
def write_tile(tileno, trow, trows, tcol, tcols, rgb_image):
"""Output one tile. Note this manages global state for tile grouping in subdirs."""
global zoomno
global total_tiles
cropIt = False
if (trow+1 == trows) or (tcol+1 == tcols) :
#this is a border tile, crop it if need to
if tcol+1 == tcols :
cpxsize= (pxsize-(txsize * tcol))
else:
cpxsize=txsize
if trow+1 == trows :
cpysize= (pysize-(tysize * trow))
else:
cpysize=tysize
cropIt = True
total_tiles += 1
topdir = topdir_template % dict(
outdir = outloc
)
if not os.path.exists(topdir):
os.makedirs(topdir, mode=0755)
dirname = dir_template % dict(
outdir = outloc,
zoomno = zoomno
)
if not os.path.exists(dirname):
# create tile group dir on demand
os.makedirs(dirname, mode=0755)
outname = tile_template % dict(
outdir = outloc,
zoomno = zoomno,
tcolno = tcol,
trowno = trow
)
if cropIt :
rgb_image = rgb_image.crop((0,0, cpxsize, cpysize))
rgb_image.save(outname, 'JPEG')
return outname
def get_page_info(page):
pxsize = page.tags.image_width.value
pysize = page.tags.image_length.value
# get common JPEG tables to insert into all tiles
# ffd8 ffdb .... ffd9
if hasattr(page.tags, 'jpeg_tables'):
# trim off start-image/end-image byte markers at prefix and suffix
jpeg_tables_bytes = bytes(bytearray(page.tags.jpeg_tables.value))[2:-2]
else:
# no common tables to insert?
jpeg_tables_bytes = bytes(bytearray([]))
# this page has multiple JPEG tiles
txsize = page.tags.tile_width.value
tysize = page.tags.tile_length.value
tcols = pxsize / txsize + (pxsize % txsize > 0)
trows = pysize / tysize + (pysize % tysize > 0)
return pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes
def processTiff() :
global infile
global txsize
global tysize
global pxsize
global pysize
global zoomno
global total_tiles
# fname=tiff_files[0];
# chop=re.search('(?:[-][^-]*[-]Z[0-9]).tif', fname);
# t=chop.group(0)
for file in range(0, len(tiff_files)):
tiff = tifffile.TiffFile(srcloc+'/'+tiff_files[file])
tiff_tifffile.append(tiff)
pages = list(tiff)
pages.reverse()
outpages = [ page for page in pages if hasattr(page.tags, 'tile_offsets') ]
if type(outpages[0].tags.tile_offsets.value) is int:
outpages[0].tags.tile_offsets.value=[outpages[0].tags.tile_offsets.value]
outpages[0].tags.tile_byte_counts.value=[outpages[0].tags.tile_byte_counts.value]
tiff_outpages.append(outpages)
infile = open(srcloc+'/'+tiff_files[file], 'rb')
tiff_infile.append(infile)
# skip pages that aren't tiled... thumbnails?!
# outpages = tiff_outpages[0]
zoomno = 0
lowest_level = 0
total_tiles = 0
# remember values for debugging sanity checks
prev_page = None
tile_width = None
tile_length = None
reduce_ratio = 2 # default
###############CODE############
for channelno in range(0, len(tiff_outpages)):
tiff_maxval.append([])
for pageno in range(0, len(tiff_outpages[0])):
tiff_maxval[channelno].append(max(0, maxTile(tiff_outpages[channelno][pageno], tiff_infile[channelno])))
for pageno in range(0, len(tiff_outpages[0])):
page = tiff_outpages[0][pageno]
# panic if these change from reverse-engineered samples
assert page.tags.fill_order.value == 1
assert page.tags.orientation.value == 1
assert page.tags.compression.value == 7 # new-style JPEG
if prev_page is not None:
reduce_ratio=page.tags.image_width.value / prev_page.tags.image_width.value
tiff_page_info = []
for channelno in range(0, len(tiff_outpages)):
tiff_page_info.append(tiff_outpages[channelno][pageno])
for tileno in range(0, len(page.tags.tile_offsets.value)):
tile_array = []
for channelno in range(0, len(tiff_outpages)):
tiffPage = tiff_outpages[channelno][pageno]
pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes = get_page_info(tiffPage)
# figure position of tile within tile array
trow = tileno / tcols
tcol = tileno % tcols
assert trow >= 0 and trow < trows
assert tcol >= 0 and tcol < tcols
if tile_width is not None:
assert tile_width == txsize
assert tile_length == tysize
else:
tile_width = txsize
tile_length = tysize
tile = getTile(tiffPage, tiff_infile[channelno], jpeg_tables_bytes, tileno)
tile_norm = (255 * (tile.astype('float') / tiff_maxval[channelno][pageno])).astype('uint8')
tile_array.append(tile_norm)
rgb_array = numpy.dstack( tuple(tile_array) )
rgb_image = Image.fromarray(rgb_array)
write_tile(tileno, trow, trows, tcol, tcols, rgb_image)
outinfo.append(
dict(
tile_width= txsize,
tile_length= tysize,
image_width_orig= pxsize,
image_length_orig= pysize,
image_width_padded= tcols * txsize,
image_length_padded= trows * tysize,
image_level = zoomno,
total_tile_count= total_tiles,
color_type = 'combo',
level_scale=reduce_ratio
)
)
# each page is next higher zoom level
zoomno += 1
prev_page = page
for infile in tiff_infile:
infile.close()
imageinfo=outinfo[-1]
imageinfo['image_lowest_level']=lowest_level
imageinfo['data_location']=outloc;
image_descriptor = """\
<?xml version="1.0" encoding="UTF-8"?>
<IMAGE_PROPERTIES
width="%(image_width_orig)d"
height="%(image_length_orig)d"
numTiles="%(total_tile_count)d"
numImages="1"
version="2.0"
meterScaleInPixels="402738.62263391056"
tileWidth="%(tile_width)d"
tileHeight="%(tile_length)d"
levelScale="%(level_scale)d"
channelName="%(color_type)s"
minLevel="%(image_lowest_level)d"
maxLevel="%(image_level)d"
data="%(data_location)s"
/>
""" % imageinfo
iname= image_template % dict(outdir = outloc)
f = open('%s' % iname, 'w')
f.write(image_descriptor)
f.close
###############################################
tiff_files = []
tiff_outpages = []
tiff_tifffile = []
tiff_infile = []
tiff_maxval = []
redColors = ['Rhodamine', 'RFP', 'Alexa Fluor 555', 'Alexa Fluor 594', 'tdTomato', 'Alexa Fluor 633', 'Alexa Fluor 647']
greenColors = ['FITC', 'Alexa 488', 'EGFP', 'Alexa Fluor 488']
blueColors = ['DAPI']
tiff_colors = {'reds': redColors, 'greens': greenColors, 'blues': blueColors}
def getFileColors(file):
colorMatched = None
for colors in tiff_colors:
for color in tiff_colors[colors]:
if re.match('.*[-]%s([-]Z[0-9]+)*[.]tif' % color, file):
colorMatched = True
return colors
if not colorMatched:
sys.stderr.write('Unknown color for file "%s" \n' % file)
sys.exit(1)
def getFileColor(file):
colorMatched = None
for colors in tiff_colors:
for color in tiff_colors[colors]:
if re.match('.*[-]%s([-]Z[0-9]+)*[.]tif' % color, file):
colorMatched = True
return color
if not colorMatched:
sys.stderr.write('Unknown color for file "%s" \n' % file)
sys.exit(1)
def checkFileColors(files):
for file in files:
colorMatched = None
for colors in tiff_colors:
for color in tiff_colors[colors]:
if re.match('.*[-]%s([-]Z1)*[.]tif' % color, file):
colorMatched = True
break
if colorMatched:
break
if not colorMatched:
sys.stderr.write('000Unknown color for file "%s" \n' % file)
sys.exit(1)
def colorFile(files, colors, pattern):
tifFiles = []
for color in colors:
colorFiles = [ f for f in files if re.match('.*[-]%s%s' % (color, pattern), f) ]
if len(colorFiles) == 1:
tifFiles.append(colorFiles[0])
if len(tifFiles) > 0:
return tifFiles
else:
return None
def getTiffFiles(dname):
global tiff_files
files = os.listdir(dname)
z1 = [f for f in files if re.match('.*[-]Z1[.]tif', f)]
if len(z1) > 0:
checkFileColors(z1)
stacks = len(files) / len(z1)
stackNo = stacks / 2
if stackNo * 2 < stacks:
stackNo += 1
stackPattern = '[-]Z%d[.]tif' % stackNo
else:
stackPattern = '[.]tif'
for colors in tiff_colors:
colorFiles = colorFile(files, colors, stackPattern)
if colorFiles:
for file in colorFiles:
tiff_files.append(file)
if len(tiff_files) == 0:
files = [ '%s' % (f) for f in files if re.match('.*%s' % stackPattern, f) ]
## need to reorder it into RGB order.
red_one=0
blue_one=0
green_one=0
for f in files:
c=getFileColors(f)
if c == 'reds':
red_one=f
if c == 'blues':
blue_one=f
if c == 'greens':
green_one=f
tiff_files = [red_one, green_one, blue_one ]
# print "red is "+red_one
# print "blue is "+blue_one
# print "green is "+green_one
####### Main body ######
try:
getTiffFiles(srcloc)
except SystemExit:
raise
if len(tiff_files) == 0:
print 'Nothing to do'
sys.exit()
if not os.path.exists(outloc):
os.makedirs(outloc)
processTiff()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v10.services.types import (
customer_extension_setting_service,
)
from .base import CustomerExtensionSettingServiceTransport, DEFAULT_CLIENT_INFO
class CustomerExtensionSettingServiceGrpcTransport(
CustomerExtensionSettingServiceTransport
):
"""gRPC backend transport for CustomerExtensionSettingService.
Service to manage customer extension settings.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_customer_extension_settings(
self,
) -> Callable[
[
customer_extension_setting_service.MutateCustomerExtensionSettingsRequest
],
customer_extension_setting_service.MutateCustomerExtensionSettingsResponse,
]:
r"""Return a callable for the mutate customer extension
settings method over gRPC.
Creates, updates, or removes customer extension settings.
Operation statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`CriterionError <>`__ `DatabaseError <>`__ `DateError <>`__
`DistinctError <>`__ `ExtensionSettingError <>`__
`FieldError <>`__ `HeaderError <>`__ `IdError <>`__
`InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`SizeLimitError <>`__ `StringFormatError <>`__
`StringLengthError <>`__ `UrlFieldError <>`__
Returns:
Callable[[~.MutateCustomerExtensionSettingsRequest],
~.MutateCustomerExtensionSettingsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_customer_extension_settings" not in self._stubs:
self._stubs[
"mutate_customer_extension_settings"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.CustomerExtensionSettingService/MutateCustomerExtensionSettings",
request_serializer=customer_extension_setting_service.MutateCustomerExtensionSettingsRequest.serialize,
response_deserializer=customer_extension_setting_service.MutateCustomerExtensionSettingsResponse.deserialize,
)
return self._stubs["mutate_customer_extension_settings"]
def close(self):
self.grpc_channel.close()
__all__ = ("CustomerExtensionSettingServiceGrpcTransport",)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test `BrownianInterval`.
The suite tests both running on CPU and CUDA (if available).
"""
import sys
sys.path = sys.path[1:] # A hack so that we always import the installed library.
import math
import numpy.random as npr
import torch
from scipy.stats import kstest
import pytest
import torchsde
torch.manual_seed(1147481649)
torch.set_default_dtype(torch.float64)
D = 3
SMALL_BATCH_SIZE = 16
LARGE_BATCH_SIZE = 131072
REPS = 2
MEDIUM_REPS = 25
LARGE_REPS = 500
ALPHA = 0.00001
devices = [cpu, gpu] = [torch.device('cpu'), torch.device('cuda')]
def _U_to_H(W: torch.Tensor, U: torch.Tensor, h: float) -> torch.Tensor:
return U / h - .5 * W
def _setup(device, levy_area_approximation, shape):
t0, t1 = torch.tensor([0., 1.], device=device)
ta = torch.rand([], device=device)
tb = torch.rand([], device=device)
ta, tb = min(ta, tb), max(ta, tb)
bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=shape, device=device,
levy_area_approximation=levy_area_approximation)
return ta, tb, bm
def _levy_returns():
yield "none", False, False
yield "space-time", False, False
yield "space-time", True, False
for levy_area_approximation in ('davie', 'foster'):
for return_U in (True, False):
for return_A in (True, False):
yield levy_area_approximation, return_U, return_A
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("levy_area_approximation, return_U, return_A", _levy_returns())
def test_shape(device, levy_area_approximation, return_U, return_A):
if device == gpu and not torch.cuda.is_available():
pytest.skip(msg="CUDA not available.")
for shape, A_shape in (((SMALL_BATCH_SIZE, D), (SMALL_BATCH_SIZE, D, D)),
((SMALL_BATCH_SIZE,), (SMALL_BATCH_SIZE,)),
((), ())):
ta, tb, bm = _setup(device, levy_area_approximation, shape)
sample1 = bm(ta, return_U=return_U, return_A=return_A)
sample2 = bm(tb, return_U=return_U, return_A=return_A)
sample3 = bm(ta, tb, return_U=return_U, return_A=return_A)
shapes = []
A_shapes = []
for sample in (sample1, sample2, sample3):
if return_U:
if return_A:
W1, U1, A1 = sample
shapes.append(W1.shape)
shapes.append(U1.shape)
A_shapes.append(A1.shape)
else:
W1, U1 = sample
shapes.append(W1.shape)
shapes.append(U1.shape)
else:
if return_A:
W1, A1 = sample
shapes.append(W1.shape)
A_shapes.append(A1.shape)
else:
W1 = sample
shapes.append(W1.shape)
for shape_ in shapes:
assert shape_ == shape
for shape_ in A_shapes:
assert shape_ == A_shape
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("levy_area_approximation, return_U, return_A", _levy_returns())
def test_determinism_simple(device, levy_area_approximation, return_U, return_A):
if device == gpu and not torch.cuda.is_available():
pytest.skip(msg="CUDA not available.")
ta, tb, bm = _setup(device, levy_area_approximation, (SMALL_BATCH_SIZE, D))
vals = [bm(ta, tb, return_U=return_U, return_A=return_A) for _ in range(REPS)]
for val in vals[1:]:
if torch.is_tensor(val):
val = (val,)
if torch.is_tensor(vals[0]):
val0 = (vals[0],)
else:
val0 = vals[0]
for v, v0 in zip(val, val0):
assert (v == v0).all()
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("levy_area_approximation, return_U, return_A", _levy_returns())
def test_determinism_large(device, levy_area_approximation, return_U, return_A):
"""
Tests that a single Brownian motion deterministically produces the same results when queried at the same points.
We first of all query it at lots of points (larger than its internal cache), and then re-query at the same set of
points, and compare.
"""
if device == gpu and not torch.cuda.is_available():
pytest.skip(msg="CUDA not available.")
ta, tb, bm = _setup(device, levy_area_approximation, (SMALL_BATCH_SIZE, D))
cache = {}
for _ in range(LARGE_REPS):
ta_ = torch.rand_like(ta)
tb_ = torch.rand_like(tb)
ta_, tb_ = min(ta_, tb_), max(ta_, tb_)
val = bm(ta_, tb_, return_U=return_U, return_A=return_A)
if torch.is_tensor(val):
val = (val,)
cache[ta_, tb_] = tuple(v.detach().clone() for v in val)
cache2 = {}
for ta_, tb_ in cache:
val = bm(ta_, tb_, return_U=return_U, return_A=return_A)
if torch.is_tensor(val):
val = (val,)
cache2[ta_, tb_] = tuple(v.detach().clone() for v in val)
for ta_, tb_ in cache:
for v1, v2 in zip(cache[ta_, tb_], cache2[ta_, tb_]):
assert (v1 == v2).all()
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("levy_area_approximation", ['none', 'space-time', 'davie', 'foster'])
def test_normality_simple(device, levy_area_approximation):
if device == gpu and not torch.cuda.is_available():
pytest.skip(msg="CUDA not available.")
t0, t1 = 0.0, 1.0
for _ in range(REPS):
base_W = torch.tensor(npr.randn(), device=device).repeat(LARGE_BATCH_SIZE)
bm = torchsde.BrownianInterval(t0=t0, t1=t1, W=base_W, levy_area_approximation=levy_area_approximation)
t_ = npr.uniform(low=t0, high=t1)
W = bm(t0, t_)
mean_W = base_W * (t_ - t0) / (t1 - t0)
std_W = math.sqrt((t1 - t_) * (t_ - t0) / (t1 - t0))
rescaled_W = (W - mean_W) / std_W
_, pval = kstest(rescaled_W.cpu().detach().numpy(), 'norm')
assert pval >= ALPHA
if levy_area_approximation != 'none':
W, U = bm(t0, t_, return_U=True)
H = _U_to_H(W, U, t_ - t0)
mean_H = 0
std_H = math.sqrt((t_ - t0) / 12)
rescaled_H = (H - mean_H) / std_H
_, pval = kstest(rescaled_H.cpu().detach().numpy(), 'norm')
assert pval >= ALPHA
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("levy_area_approximation", ['none', 'space-time', 'davie', 'foster'])
def test_normality_conditional(device, levy_area_approximation):
if device == gpu and not torch.cuda.is_available():
pytest.skip(msg="CUDA not available.")
t0, t1 = 0.0, 1.0
for _ in range(REPS):
bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(LARGE_BATCH_SIZE,), device=device,
levy_area_approximation=levy_area_approximation)
for _ in range(MEDIUM_REPS):
ta, t_, tb = sorted(npr.uniform(low=t0, high=t1, size=(3,)))
W = bm(ta, tb)
W1 = bm(ta, t_)
W2 = bm(t_, tb)
mean_W1 = W * (t_ - ta) / (tb - ta)
std_W1 = math.sqrt((tb - t_) * (t_ - ta) / (tb - ta))
rescaled_W1 = (W1 - mean_W1) / std_W1
_, pval = kstest(rescaled_W1.cpu().detach().numpy(), 'norm')
assert pval >= ALPHA
mean_W2 = W * (tb - t_) / (tb - ta)
std_W2 = math.sqrt((tb - t_) * (t_ - ta) / (tb - ta))
rescaled_W2 = (W2 - mean_W2) / std_W2
_, pval = kstest(rescaled_W2.cpu().detach().numpy(), 'norm')
assert pval >= ALPHA
if levy_area_approximation != 'none':
W, U = bm(ta, tb, return_U=True)
W1, U1 = bm(ta, t_, return_U=True)
W2, U2 = bm(t_, tb, return_U=True)
h = tb - ta
h1 = t_ - ta
h2 = tb - t_
denom = math.sqrt(h1 ** 3 + h2 ** 3)
a = h1 ** 3.5 * h2 ** 0.5 / (2 * h * denom)
b = h1 ** 0.5 * h2 ** 3.5 / (2 * h * denom)
c = math.sqrt(3) * h1 ** 1.5 * h2 ** 1.5 / (6 * denom)
H = _U_to_H(W, U, h)
H1 = _U_to_H(W1, U1, h1)
H2 = _U_to_H(W2, U2, h2)
mean_H1 = H * (h1 / h) ** 2
std_H1 = math.sqrt(a ** 2 + c ** 2) / h1
rescaled_H1 = (H1 - mean_H1) / std_H1
_, pval = kstest(rescaled_H1.cpu().detach().numpy(), 'norm')
assert pval >= ALPHA
mean_H2 = H * (h2 / h) ** 2
std_H2 = math.sqrt(b ** 2 + c ** 2) / h2
rescaled_H2 = (H2 - mean_H2) / std_H2
_, pval = kstest(rescaled_H2.cpu().detach().numpy(), 'norm')
assert pval >= ALPHA
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("levy_area_approximation", ['none', 'space-time', 'davie', 'foster'])
def test_consistency(device, levy_area_approximation):
if device == gpu and not torch.cuda.is_available():
pytest.skip(msg="CUDA not available.")
t0, t1 = 0.0, 1.0
for _ in range(REPS):
bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(LARGE_BATCH_SIZE,), device=device,
levy_area_approximation=levy_area_approximation)
for _ in range(MEDIUM_REPS):
ta, t_, tb = sorted(npr.uniform(low=t0, high=t1, size=(3,)))
if levy_area_approximation == 'none':
W = bm(ta, tb)
W1 = bm(ta, t_)
W2 = bm(t_, tb)
else:
W, U = bm(ta, tb, return_U=True)
W1, U1 = bm(ta, t_, return_U=True)
W2, U2 = bm(t_, tb, return_U=True)
torch.testing.assert_allclose(W1 + W2, W, rtol=1e-6, atol=1e-6)
if levy_area_approximation != 'none':
torch.testing.assert_allclose(U1 + U2 + (tb - t_) * W1, U, rtol=1e-6, atol=1e-6)
# We don't test the return_A case because we don't expect that to be consistent.
@pytest.mark.parametrize("random_order", [False, True])
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize("levy_area_approximation, return_U, return_A", _levy_returns())
def test_entropy_determinism(random_order, device, levy_area_approximation, return_U, return_A):
if device == gpu and not torch.cuda.is_available():
pytest.skip(msg="CUDA not available.")
t0, t1 = 0.0, 1.0
entropy = 56789
points1 = torch.rand(1000)
points2 = torch.rand(1000)
outs = []
tol = 1e-6 if random_order else 0.
bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(), device=device,
levy_area_approximation=levy_area_approximation, entropy=entropy, tol=tol,
halfway_tree=random_order)
for point1, point2 in zip(points1, points2):
point1, point2 = sorted([point1, point2])
outs.append(bm(point1, point2, return_U=return_U, return_A=return_A))
bm = torchsde.BrownianInterval(t0=t0, t1=t1, size=(), device=device,
levy_area_approximation=levy_area_approximation, entropy=entropy, tol=tol,
halfway_tree=random_order)
if random_order:
perm = torch.randperm(1000)
points1 = points1[perm]
points2 = points2[perm]
outs = [outs[i.item()] for i in perm]
for point1, point2, out in zip(points1, points2, outs):
point1, point2 = sorted([point1, point2])
out_ = bm(point1, point2, return_U=return_U, return_A=return_A)
# Assert equal
if torch.is_tensor(out):
out = (out,)
if torch.is_tensor(out_):
out_ = (out_,)
for outi, outi_ in zip(out, out_):
if torch.is_tensor(outi):
assert (outi == outi_).all()
else:
assert outi == outi_
|
|
speakerdeck = {
"endpoint": "https://speakerdeck.com/oembed.{format}",
"urls": [
r'^https?://speakerdeck\.com/.+$',
],
}
app_net = {
"endpoint": "https://alpha-api.app.net/oembed",
"urls": [
r'^https?://alpha\.app\.net/[^#?/]+/post/.+$',
r'^https?://photos\.app\.net/[^#?/]+/.+$',
],
}
youtube = {
"endpoint": "https://www.youtube.com/oembed",
"urls": [
r'^https?://(?:[-\w]+\.)?youtube\.com/watch.+$',
r'^https?://(?:[-\w]+\.)?youtube\.com/v/.+$',
r'^https?://youtu\.be/.+$',
r'^https?://(?:[-\w]+\.)?youtube\.com/user/.+$',
r'^https?://(?:[-\w]+\.)?youtube\.com/[^#?/]+#[^#?/]+/.+$',
r'^https?://m\.youtube\.com/index.+$',
r'^https?://(?:[-\w]+\.)?youtube\.com/profile.+$',
r'^https?://(?:[-\w]+\.)?youtube\.com/view_play_list.+$',
r'^https?://(?:[-\w]+\.)?youtube\.com/playlist.+$',
],
}
deviantart = {
"endpoint": "https://backend.deviantart.com/oembed",
"urls": [
r'^https?://(?:[-\w]+\.)?deviantart\.com/art/.+$',
r'^https?://fav\.me/.+$',
r'^https?://sta\.sh/.+$',
r'^https?://(?:[-\w]+\.)?deviantart\.com/[^#?/]+#/d.+$',
],
}
blip_tv = {
"endpoint": "http://blip.tv/oembed/",
"urls": [
r'^http://[-\w]+\.blip\.tv/.+$',
],
}
dailymotion = {
"endpoint": "https://www.dailymotion.com/api/oembed/",
"urls": [
r'^https?://[-\w]+\.dailymotion\.com/.+$',
],
}
flikr = {
"endpoint": "https://www.flickr.com/services/oembed/",
"urls": [
r'^https?://[-\w]+\.flickr\.com/photos/.+$',
r'^https?://flic\.kr\.com/.+$',
],
}
hulu = {
"endpoint": "https://www.hulu.com/api/oembed.{format}",
"urls": [
r'^https?://www\.hulu\.com/watch/.+$',
],
}
nfb = {
"endpoint": "http://www.nfb.ca/remote/services/oembed/",
"urls": [
r'^http://(?:[-\w]+\.)?nfb\.ca/film/.+$',
],
}
qik = {
"endpoint": "http://qik.com/api/oembed.{format}",
"urls": [
r'^http://qik\.com/.+$',
r'^http://qik\.ly/.+$',
],
}
revision3 = {
"endpoint": "http://revision3.com/api/oembed/",
"urls": [
r'^http://[-\w]+\.revision3\.com/.+$',
],
}
scribd = {
"endpoint": "https://www.scribd.com/services/oembed",
"urls": [
r'^https?://[-\w]+\.scribd\.com/.+$',
],
}
viddler = {
"endpoint": "https://www.viddler.com/oembed/",
"urls": [
r'^https?://[-\w]+\.viddler\.com/v/.+$',
r'^https?://[-\w]+\.viddler\.com/explore/.+$',
],
}
vimeo = {
"endpoint": "https://www.vimeo.com/api/oembed.{format}",
"urls": [
r'^https?://(?:www\.)?vimeo\.com/.+$',
r'^https?://player\.vimeo\.com/.+$',
],
}
dotsub = {
"endpoint": "https://dotsub.com/services/oembed",
"urls": [
r'^https?://dotsub\.com/view/.+$',
],
}
yfrog = {
"endpoint": "http://www.yfrog.com/api/oembed",
"urls": [
r'^https?://(?:www\.)?yfrog\.com/.+$',
r'^https?://(?:www\.)?yfrog\.us/.+$',
],
}
clickthrough = {
"endpoint": "http://clikthrough.com/services/oembed",
"urls": [
r'^https?://(?:[-\w]+\.)?clikthrough\.com/.+$',
],
}
kinomap = {
"endpoint": "https://www.kinomap.com/oembed",
"urls": [
r'^https?://[-\w]+\.kinomap\.com/.+$',
],
}
photobucket = {
"endpoint": "https://photobucket.com/oembed",
"urls": [
r'^http://(?:[-\w]+\.)?photobucket\.com/albums/.+$',
r'^http://(?:[-\w]+\.)?photobucket\.com/groups/.+$',
],
}
slideshare = {
"endpoint": "https://www.slideshare.net/api/oembed/2",
"urls": [
r'^https?://www\.slideshare\.net/.+$',
],
}
major_league_gaming = {
"endpoint": "http://tv.majorleaguegaming.com/oembed",
"urls": [
r'^http://mlg\.tv/.+$',
r'^http://tv\.majorleaguegaming\.com/.+$',
],
}
opera = {
"endpoint": "http://my.opera.com/service/oembed",
"urls": [
r'^http://my\.opera\.com/.+$',
],
}
skitch = {
"endpoint": "http://skitch.com/oembed",
"urls": [
r'^https?://(?:www\.)?skitch\.com/.+$',
r'^http://skit\.ch/.+$',
],
}
twitter = {
"endpoint": "https://api.twitter.com/1/statuses/oembed.{format}",
"urls": [
r'^https?://twitter\.com/(?:#!)?[^#?/]+/status/.+$',
],
}
soundcloud = {
"endpoint": "https://soundcloud.com/oembed",
"urls": [
r'^https://soundcloud\.com/.+$',
],
}
collegehumor = {
"endpoint": "http://www.collegehumor.com/oembed.{format}",
"urls": [
r'^http://(?:www\.)?collegehumor\.com/video/.+$',
r'^http://(?:www\.)?collegehumor\.com/video:.+$',
],
}
polleverywhere = {
"endpoint": "https://www.polleverywhere.com/services/oembed/",
"urls": [
r'^https?://www\.polleverywhere\.com/polls/.+$',
r'^https?://www\.polleverywhere\.com/multiple_choice_polls/.+$',
r'^https?://www\.polleverywhere\.com/free_text_polls/.+$',
],
}
ifixit = {
"endpoint": "https://www.ifixit.com/Embed",
"urls": [
r'^https?://www\.ifixit\.com/[^#?/]+/[^#?/]+/.+$',
],
}
smugmug = {
"endpoint": "https://api.smugmug.com/services/oembed/",
"urls": [
r'^https?://(?:www\.)?smugmug\.com/[^#?/]+/.+$',
],
}
github_gist = {
"endpoint": "https://github.com/api/oembed",
"urls": [
r'^https?://gist\.github\.com/.+$',
],
}
animoto = {
"endpoint": "https://animoto.com/services/oembed",
"urls": [
r'^https?://animoto\.com/play/.+$',
],
}
rdio = {
"endpoint": "http://www.rdio.com/api/oembed",
"urls": [
r'^http://(?:wwww\.)?rdio\.com/people/[^#?/]+/playlists/.+$',
r'^http://[-\w]+\.rdio\.com/artist/[^#?/]+/album/.+$',
],
}
five_min = {
"endpoint": "http://api.5min.com/oembed.{format}",
"urls": [
r'^http://www\.5min\.com/video/.+$',
],
}
five_hundred_px = {
"endpoint": "https://500px.com/photo/{1}/oembed.{format}",
"urls": [
r'^https?://500px\.com/photo/([^#?/]+)(?:.+)?$',
],
}
dipdive = {
"endpoint": "http://api.dipdive.com/oembed.{format}",
"urls": [
r'^http://[-\w]+\.dipdive\.com/media/.+$',
],
}
yandex = {
"endpoint": "https://video.yandex.ru/oembed.{format}",
"urls": [
r'^https?://video\.yandex\.ru/users/[^#?/]+/view/.+$',
],
}
mixcloud = {
"endpoint": "https://www.mixcloud.com/oembed/",
"urls": [
r'^https?://(?:www\.)?mixcloud\.com/.+$',
],
}
kickstarter = {
"endpoint": "https://www.kickstarter.com/services/oembed",
"urls": [
r'^https?://[-\w]+\.kickstarter\.com/projects/.+$',
],
}
coub = {
"endpoint": "http://coub.com/api/oembed.{format}",
"urls": [
r'^https?://coub\.com/view/.+$',
r'^https?://coub\.com/embed/.+$',
],
}
screenr = {
"endpoint": "http://www.screenr.com/api/oembed.{format}",
"urls": [
r'^http://www\.screenr\.com/.+$',
],
}
funny_or_die = {
"endpoint": "https://www.funnyordie.com/oembed.{format}",
"urls": [
r'^https?://www\.funnyordie\.com/videos/.+$',
],
}
wistia = {
"endpoint": "http://fast.wistia.com/oembed.{format}",
"urls": [
r'^https?://([^/]+\.)?(wistia.com|wi.st)/(medias|embed)/.+$',
],
}
ustream = {
"endpoint": "http://www.ustream.tv/oembed",
"urls": [
r'^https?://(?:www\.)?ustream\.tv/.+$',
r'^https?://(?:www\.)?ustream\.com/.+$',
r'^http://ustre\.am/.+$',
],
}
wordpress = {
"endpoint": "https://wordpress.tv/oembed/",
"urls": [
r'^https?://wordpress\.tv/.+$',
],
}
polldaddy = {
"endpoint": "https://polldaddy.com/oembed/",
"urls": [
r'^https?://(?:[-\w]+\.)?polldaddy\.com/.+$',
],
}
bambuser = {
"endpoint": "http://api.bambuser.com/oembed.{format}",
"urls": [
r'^http://bambuser\.com/channel/[^#?/]+/broadcast/.+$',
r'^http://bambuser\.com/channel/.+$',
r'^http://bambuser\.com/v/.+$',
],
}
ted = {
"endpoint": "https://www.ted.com/talks/oembed.{format}",
"urls": [
r'^https?://(?:www\.)?ted\.com/talks/.+$',
r'^https?://(?:www\.)?ted\.com/talks/lang/[^#?/]+/.+$',
r'^https?://(?:www\.)?ted\.com/index\.php/talks/.+$',
r'^https?://(?:www\.)?ted\.com/index\.php/talks/lang/[^#?/]+/.+$',
],
}
chirb = {
"endpoint": "https://chirb.it/oembed.{format}",
"urls": [
r'^https?://chirb\.it/.+$',
],
}
circuitlab = {
"endpoint": "https://www.circuitlab.com/circuit/oembed/",
"urls": [
r'^https?://(?:www\.)?circuitlab\.com/circuit/.+$',
],
}
geograph_uk = {
"endpoint": "http://api.geograph.org.uk/api/oembed",
"urls": [
r'^https?://(?:[-\w]+\.)?geograph\.org\.uk/.+$',
r'^https?://(?:[-\w]+\.)?geograph\.co\.uk/.+$',
r'^https?://(?:[-\w]+\.)?geograph\.ie/.+$',
],
}
hlipp = {
"endpoint": "http://geo.hlipp.de/restapi.php/api/oembed",
"urls": [
r'^https?://geo-en\.hlipp\.de/.+$',
r'^https?://geo\.hlipp\.de/.+$',
r'^https?://germany\.geograph\.org/.+$',
],
}
geograph_gg = {
"endpoint": "http://www.geograph.org.gg/api/oembed",
"urls": [
r'^https?://(?:[-\w]+\.)?geograph\.org\.gg/.+$',
r'^https?://(?:[-\w]+\.)?geograph\.org\.je/.+$',
r'^https?://channel-islands\.geograph\.org/.+$',
r'^https?://channel-islands\.geographs\.org/.+$',
r'^https?://(?:[-\w]+\.)?channel\.geographs\.org/.+$',
],
}
vzaar = {
"endpoint": "http://vzaar.com/api/videos/{1}.{format}",
"urls": [
r'^http://(?:www\.)?vzaar\.com/videos/([^#?/]+)(?:.+)?$',
r'^http://www\.vzaar\.tv/([^#?/]+)(?:.+)?$',
r'^http://vzaar\.tv/([^#?/]+)(?:.+)?$',
r'^http://vzaar\.me/([^#?/]+)(?:.+)?$',
r'^http://[-\w]+\.vzaar\.me/([^#?/]+)(?:.+)?$',
],
}
minoto = {
"endpoint": "http://api.minoto-video.com/services/oembed.{format}",
"urls": [
r'^http://api\.minoto-video\.com/publishers/[^#?/]+/videos/.+$',
r'^http://dashboard\.minoto-video\.com/main/video/details/.+$',
r'^http://embed\.minoto-video\.com/.+$',
],
}
videojug = {
"endpoint": "http://www.videojug.com/oembed.{format}",
"urls": [
r'^https?://(?:[-\w]+\.)?videojug\.com/film/.+$',
r'^https?://(?:[-\w]+\.)?videojug\.com/payer/.+$',
r'^https?://(?:[-\w]+\.)?videojug\.com/interview/.+$',
],
}
sapo = {
"endpoint": "http://videos.sapo.pt/oembed",
"urls": [
r'^https?://videos\.sapo\.pt/.+$',
],
}
vhx_tv = {
"endpoint": "http://vhx.tv/services/oembed.{format}",
"urls": [
r'^https?://(?:www\.)?vhx\.tv/.+$',
],
}
justin_tv = {
"endpoint": "http://api.justin.tv/api/embed/from_url.{format}",
"urls": [
r'^https?://(?:www\.)?justin\.tv/.+$',
],
}
official_fm = {
"endpoint": "http://official.fm/services/oembed.{format}",
"urls": [
r'^https?://official\.fm/.+$',
],
}
huffduffer = {
"endpoint": "https://huffduffer.com/oembed",
"urls": [
r'^https?://(?:www\.)?huffduffer\.com/[^#?/]+/.+$',
],
}
spotify = {
"endpoint": "https://embed.spotify.com/oembed/",
"urls": [
r'^https?://open\.spotify\.com/.+$',
r'^https?://spoti\.fi/.+$',
],
}
shoudio = {
"endpoint": "https://shoudio.com/api/oembed",
"urls": [
r'^https?://shoudio\.com/.+$',
r'^https?://shoud\.io/.+$',
],
}
mobypicture = {
"endpoint": "http://api.mobypicture.com/oEmbed",
"urls": [
r'^https?://(?:www\.)?mobypicture\.com/user/[^#?/]+/view/.+$',
r'^https?://(?:www\.)?moby\.to/.+$',
],
}
twenty_three_hq = {
"endpoint": "http://www.23hq.com/23/oembed",
"urls": [
r'^https?://(?:www\.)?23hq\.com/[^#?/]+/photo/.+$',
],
}
gmep = {
"endpoint": "http://gmep.org/oembed.{format}",
"urls": [
r'^https?://(?:www\.)?gmep\.org/.+$',
r'^https?://gmep\.imeducate\.com/.+$',
],
}
urtak = {
"endpoint": "http://oembed.urtak.com/1/oembed",
"urls": [
r'^https?://(?:[-\w]+\.)?urtak\.com/.+$',
],
}
cacoo = {
"endpoint": "http://cacoo.com/oembed.{format}",
"urls": [
r'^https?://cacoo\.com/.+$',
],
}
dailymile = {
"endpoint": "http://api.dailymile.com/oembed",
"urls": [
r'^https?://(?:www\.)?dailymile\.com/people/[^#?/]+/entries/.+$',
],
}
dipity = {
"endpoint": "http://www.dipity.com/oembed/timeline/",
"urls": [
r'^https?://(?:www\.)?dipity\.com/timeline/.+$',
r'^https?://(?:www\.)?dipity\.com/voaweb/.+$',
],
}
sketchfab = {
"endpoint": "https://sketchfab.com/oembed",
"urls": [
r'^https?://sketchfab\.com/show/.+$',
],
}
meetup = {
"endpoint": "https://api.meetup.com/oembed",
"urls": [
r'^https?://(?:www\.)?meetup\.com/.+$',
r'^https?://(?:www\.)?meetup\.ps/.+$',
],
}
roomshare = {
"endpoint": "https://roomshare.jp/oembed.{format}",
"urls": [
r'^https?://(?:www\.)?roomshare\.jp/(?:en/)?post/.+$',
],
}
crowd_ranking = {
"endpoint": "http://crowdranking.com/api/oembed.{format}",
"urls": [
r'^https?://crowdranking\.com/crowdrankings/.+$',
r'^https?://crowdranking\.com/rankings/.+$',
r'^https?://crowdranking\.com/topics/.+$',
r'^https?://crowdranking\.com/widgets/.+$',
r'^https?://crowdranking\.com/r/.+$',
],
}
etsy = {
"endpoint": "https://openapi.etsy.com/svc/oembed/",
"urls": [
r'^https?://(?:www\.)?etsy\.com/listing/.+$',
],
}
audioboom = {
"endpoint": "https://audioboom.com/publishing/oembed.{format}",
"urls": [
r'^https?://audioboom\.com/boos/.+$',
r'^https?://audioboom\.com/posts/.+$',
],
}
clikthrough = {
"endpoint": "http://demo.clikthrough.com/services/oembed/",
"urls": [
r'^https?://demo\.clikthrough\.com/theater/video/.+$',
],
}
ifttt = {
"endpoint": "https://www.ifttt.com/oembed/",
"urls": [
r'^https?://ifttt\.com/recipes/.+$',
],
}
issuu = {
"endpoint": "https://issuu.com/oembed",
"urls": [
r'^https?://(?:www\.)?issuu\.com/[^#?/]+/docs/.+$',
],
}
tumblr = {
"endpoint": "https://www.tumblr.com/oembed/1.0",
"urls": [
r'^https?://.+?\.tumblr\.com/post/.+$',
]
}
vidyard = {
"endpoint": "https://api.vidyard.com/dashboard/v1.1/oembed",
"urls": [
r'^https?://play\.vidyard\.com/.+$',
r'^https?://embed\.vidyard\.com/.+$',
r'^https?://share\.vidyard\.com/.+$',
r'^https?://.+?\.hubs\.vidyard\.com/.+$'
]
}
reddit = {
'endpoint': 'https://www.reddit.com/oembed',
'urls': [
'^https?://(?:www\\.)?reddit\\.com/r/+[^#?/]+/comments/+[^#?/]+[^#?/].+$',
]
}
all_providers = [
speakerdeck, app_net, youtube, deviantart, blip_tv, dailymotion, flikr,
hulu, nfb, qik, revision3, scribd, viddler, vimeo, dotsub, yfrog,
clickthrough, kinomap, photobucket, slideshare,
major_league_gaming, opera, skitch, twitter, soundcloud, collegehumor,
polleverywhere, ifixit, smugmug, github_gist, animoto, rdio, five_min,
five_hundred_px, dipdive, yandex, mixcloud, kickstarter, coub, screenr,
funny_or_die, wistia, ustream, wordpress, polldaddy, bambuser, ted, chirb,
circuitlab, geograph_uk, hlipp, geograph_gg, vzaar, minoto, videojug, sapo,
vhx_tv, justin_tv, official_fm, huffduffer, spotify, shoudio, mobypicture,
twenty_three_hq, gmep, urtak, cacoo, dailymile, dipity, sketchfab, meetup,
roomshare, crowd_ranking, etsy, audioboom, clikthrough, ifttt, issuu, tumblr, vidyard,
reddit
]
|
|
import numpy
import random
import string
import cv2
from imageresolve.puzzlesolver.metrics.helper import *
class Puzzle:
def __init__(self, pieces, orig, x, y, videopath=None):
maxx = x
maxy = y
self.xsize = maxx + 1
self.ysize = maxy + 1
self.pieces = pieces
# solution is vector of 3 integers - pieceId, rotation, segmentId
self.sol = numpy.full((self.xsize, self.ysize, 3), -1, int)
self.calculatemetrics()
self.orig = orig
self.maxsegid = 0
self.piecesposition = {}
self.shiftside = 0
self.rotation = True
self.video = None
if videopath is not None:
videoimg = self.getvideoimage()
height, width, depth = videoimg.shape
self.video = cv2.VideoWriter(videopath, cv2.VideoWriter_fourcc(*"MSVC"), 2, (width, height))
self.writetovideo()
def seedplaced(self):
return self.sol.max() != -1
def placeseed(self):
seedid = random.randint(0, (self.xsize * self.ysize) - 1)
seedpiece = self.pieces[seedid]
posx = random.randint(0, self.xsize - 1)
posy = random.randint(0, self.ysize - 1)
rotation = random.randint(0, 3)
self.addtosol(posx, posy, seedpiece.id, rotation)
# self.addtosol(0, 2, 14, 0)
def mostinformativepos(self):
mostneighbours = 0
bestindexes = []
for x in range(0, self.xsize):
for y in range(0, self.ysize):
if self.sol[x][y][0] == -1:
neighbours = self.getneighbours(x, y)
if len(neighbours) == mostneighbours:
bestindexes.append((x, y, neighbours))
if len(neighbours) > mostneighbours:
mostneighbours = len(neighbours)
bestindexes = [(x, y, neighbours)]
return bestindexes
def getneighbours(self, x, y):
neighbours = []
if x > 0:
if self.sol[x - 1][y][0] != -1:
neighbours.append(self.sol[x - 1][y])
if x < self.xsize - 1:
if self.sol[x + 1][y][0] != -1:
neighbours.append(self.sol[x + 1][y])
if y > 0:
if self.sol[x][y - 1][0] != -1:
neighbours.append(self.sol[x][y - 1])
if y < self.ysize - 1:
if self.sol[x][y + 1][0] != -1:
neighbours.append(self.sol[x][y + 1])
return neighbours
def getbestbuddies(self, position):
x, y, neighbours = position
buddies = []
for neigh in neighbours:
neighpos = self.getpiecepos(neigh[0])
neighpiece = self.pieces[neigh[0]]
neighrot = neigh[1]
bestbuddy = neighpiece.getbestbuddyfor(neighpos[0], neighpos[1], neighrot, x, y)
if not bestbuddy[0] in self.piecesposition:
buddies.append(bestbuddy)
else:
buddies.append(numpy.full(2, -1, int))
return buddies
def getpiecepos(self, pieceid):
return self.piecesposition[pieceid]
def getbestpossible(self, position):
x, y, neighbours = position
metrics = []
for neigh in neighbours:
neighpos = self.getpiecepos(neigh[0])
neighpiece = self.pieces[neigh[0]]
neighrot = neigh[1]
side = gettouchingside(neighpos[0], neighpos[1], neighrot, x, y)
metrics.append(neighpiece.metrics[side])
if len(metrics) == 1:
summ = numpy.add(metrics[0], numpy.zeros_like(metrics[0]))
if len(metrics) == 2:
summ = numpy.add(metrics[0], metrics[1])
if len(metrics) == 3:
summ = numpy.add(metrics[0], numpy.add(metrics[1], metrics[2]))
if len(metrics) == 4:
summ = numpy.add(numpy.add(metrics[0], metrics[1]), numpy.add(metrics[2], metrics[3]))
pieceid = -1
rotation = -1
while True:
flatmax = numpy.nanargmax(summ)
maxtuple = numpy.unravel_index(flatmax, summ.shape)
pieceid = maxtuple[2]
if self.isset(pieceid):
summ[maxtuple[0]][maxtuple[1]][maxtuple[2]] = -1
else:
rotation = maxtuple[0]
break
return pieceid, rotation
def allset(self):
return not self.isset(-1)
def isset(self, pieceid):
for y in range(0, self.ysize):
for x in range(0, self.xsize):
if self.sol[x][y][0] == pieceid:
return True
return False
def calculatemetrics(self):
for piece in self.pieces:
piece.calculatemetrics(self.pieces)
for piece in self.pieces:
piece.calculatebestbuddies(self.pieces)
def getvideoimage(self):
image, seg = self.generateimages()
videoim = numpy.vstack([image, seg])
#cv2.imshow("Sol", videoim)
#cv2.waitKey(0)
return videoim
def getvideoimage2(self, sol):
image, seg = self.generateimages2(sol)
videoim = numpy.vstack([image, seg])
return videoim
def writetovideo(self):
if self.video is not None:
videoimg = self.getvideoimage()
self.video.write(videoimg)
def addtosol(self, x, y, pieceid, rotation):
self.sol[x][y][0] = pieceid
self.sol[x][y][1] = rotation
self.sol[x][y][2] = self.getsegmentid(x, y, rotation)
self.piecesposition[pieceid] = (x, y)
self.writetovideo()
def getsegmentid(self, x, y, r):
neighbours = self.getneighbours(x, y)
piece = self.pieces[self.sol[x][y][0]]
segids = []
for neigh in neighbours:
otherposition = self.piecesposition[neigh[0]]
if piece.isbestbuddywith(x, y, r, otherposition[0], otherposition[1], neigh[1], neigh[0]):
segids.append(neigh[2])
else:
segids.append(-1)
if len(segids) == 0:
return self.getnewsegmentid()
if -1 in segids:
return self.getnewsegmentid()
if len(frozenset(segids)):
return segids[0]
return -2
def getlargestsegment(self):
segcount = {}
for i in range(0, self.xsize):
for j in range(0, self.ysize):
seg = self.sol[i][j][2]
if seg in segcount:
segcount[seg] += 1
else:
segcount[seg] = 1
maxcount = 0
maxseg = -1
for count in segcount:
if segcount[count] > maxcount:
maxcount = segcount[count]
maxseg = count
return maxseg
def getnewsegmentid(self):
segid = self.maxsegid
self.maxsegid += 1
return segid
def leaveonlybestsegment(self):
best = self.getlargestsegment()
for i in range(0, self.xsize):
for j in range(0, self.ysize):
seg = self.sol[i][j][2]
if seg != best:
self.removepiece(i, j)
def removepiece(self, x, y):
old = self.sol[x][y][0]
self.sol[x][y][0] = -1
self.sol[x][y][1] = -1
self.sol[x][y][2] = -1
del self.piecesposition[old]
# self.writetovideo()
def hasonlyonesegment(self):
only = -2
for x in range(0, self.xsize):
for y in range(0, self.ysize):
seg = self.sol[x][y][2]
if seg == -1:
return False
if only == -2:
only = seg
if seg != only:
return False
return True
def replacesegment(self):
minx, maxx, miny, maxy = self.getsegmentsize()
xsize = maxx - minx + 1
ysize = maxy - miny + 1
rotated = False
if self.rotation:
if xsize <= self.ysize and ysize <= self.xsize:
newsol = numpy.full((self.xsize, self.ysize, 3), -1, int)
newx = 0
newy = 0
for x in range(minx, maxx + 1):
for y in range(miny, maxy + 1):
newsol[newx + ysize - 1][newy] = self.sol[x][y]
newx -= 1
newx = 0
newy += 1
for x in range(0, self.xsize):
for y in range(0, self.ysize):
rot = newsol[x][y][1]
if rot != -1:
newsol[x][y][1] = getcounterclockwiseside(rot)
rotated = True
self.rotation = False
else:
newsol = numpy.copy(self.sol)
else:
newsol = numpy.copy(self.sol)
if rotated:
self.clearboard()
for x in range(0, self.xsize):
for y in range(0, self.ysize):
if newsol[x][y][0] != -1:
self.addtosol(x, y, newsol[x][y][0], newsol[x][y][1])
return True
else:
self.rotation = True
emptycols = range(0, self.xsize)
fullcols = []
emptyrows = range(0, self.ysize)
fullrows = []
for x in range(0, self.xsize):
for y in range(0, self.ysize):
if newsol[x][y][0] != -1:
if x in emptycols:
fullcols.append(x)
emptycols.remove(x)
if y in emptyrows:
fullrows.append(y)
emptyrows.remove(y)
if self.shiftside == 0:
self.shiftside = 1
shift = min(fullcols)
if shift > 0:
self.clearboard()
for x in range(0, self.xsize):
for y in range(0, self.ysize):
if newsol[x][y][0] != -1:
self.addtosol(x - shift, y, newsol[x][y][0], newsol[x][y][1])
return True
if self.shiftside == 1:
self.shiftside = 2
shift = self.xsize - 1 - max(fullcols)
if shift > 0:
self.clearboard()
for x in range(0, self.xsize):
for y in range(0, self.ysize):
if newsol[x][y][0] != -1:
self.addtosol(x + shift, y, newsol[x][y][0], newsol[x][y][1])
return True
if self.shiftside == 2:
self.shiftside = 3
shift = min(fullrows)
if shift > 0:
self.clearboard()
for x in range(0, self.xsize):
for y in range(0, self.ysize):
if newsol[x][y][0] != -1:
self.addtosol(x, y - shift, newsol[x][y][0], newsol[x][y][1])
return True
if self.shiftside == 3:
self.shiftside = 0
shift = self.ysize - 1 - max(fullrows)
if shift > 0:
self.clearboard()
for x in range(0, self.xsize):
for y in range(0, self.ysize):
if newsol[x][y][0] != -1:
self.addtosol(x, y + shift, newsol[x][y][0], newsol[x][y][1])
return True
return False
def clearboard(self):
self.maxsegid = 0
for x in range(0, self.xsize):
for y in range(0, self.ysize):
if self.sol[x][y][0] != -1:
self.removepiece(x, y)
def getsegmentsize(self):
minx, maxx, miny, maxy = self.xsize + 1, 0, self.ysize + 1, 0
for i in range(0, self.xsize):
for j in range(0, self.ysize):
if self.sol[i][j][2] != -1:
if i < minx:
minx = i
if i > maxx:
maxx = i
if j < miny:
miny = j
if j > maxy:
maxy = j
return minx, maxx, miny, maxy
def generatesegmentsquare(self, sol, x, y):
image = numpy.full_like(self.pieces[0].image, (sol[2] + 1) * 10)
pieceid = sol[0]
rot = sol[1]
cv2.putText(image, "{}/{}".format(pieceid, rot), (35, 45), cv2.FONT_HERSHEY_PLAIN, 0.6, (255, 255, 255))
if pieceid != -1:
piece = self.pieces[pieceid]
if x > 0:
bb = piece.getbestbuddyfor(x, y, rot, x - 1, y)
cv2.putText(image, "{}/{}".format(bb[0], bb[1]), (5, 45), cv2.FONT_HERSHEY_PLAIN, 0.6, (255, 0, 0))
if x < self.xsize - 1:
bb = piece.getbestbuddyfor(x, y, rot, x + 1, y)
cv2.putText(image, "{}/{}".format(bb[0], bb[1]), (60, 45), cv2.FONT_HERSHEY_PLAIN, 0.6, (0, 255, 0))
if y > 0:
bb = piece.getbestbuddyfor(x, y, rot, x, y - 1)
cv2.putText(image, "{}/{}".format(bb[0], bb[1]), (35, 15), cv2.FONT_HERSHEY_PLAIN, 0.6, (0, 0, 255))
if y < self.ysize - 1:
bb = piece.getbestbuddyfor(x, y, rot, x, y + 1)
cv2.putText(image, "{}/{}".format(bb[0], bb[1]), (35, 75), cv2.FONT_HERSHEY_PLAIN, 0.6, (255, 255, 0))
return image
def generateimages(self):
dummy = numpy.full_like(self.pieces[0].image, 0)
columns = []
columnsseg = []
for i in range(0, self.xsize):
columns.append([])
columnsseg.append([])
for j in range(0, self.ysize):
sol = self.sol[i][j]
columnsseg[i].append(self.generatesegmentsquare(sol, i, j))
if sol[0] == -1:
columns[i].append(dummy)
else:
if sol[1] == 0:
columns[i].append(self.pieces[sol[0]].image)
if sol[1] == 1:
columns[i].append(numpy.rot90(self.pieces[sol[0]].image, 2))
if sol[1] == 2:
columns[i].append(numpy.rot90(self.pieces[sol[0]].image, 1))
if sol[1] == 3:
columns[i].append(numpy.rot90(self.pieces[sol[0]].image, 3))
imagecols = []
imagesegcols = []
for col in columns:
imagecols.append(numpy.vstack(col))
for segcol in columnsseg:
imagesegcols.append(numpy.vstack(segcol))
image = numpy.hstack(imagecols)
imageseg = numpy.hstack(imagesegcols)
return image, imageseg
def generateimages2(self, sol2):
dummy = numpy.full_like(self.pieces[0].image, 0)
columns = []
columnsseg = []
for i in range(0, self.xsize):
columns.append([])
columnsseg.append([])
for j in range(0, self.ysize):
sol = sol2[i][j]
columnsseg[i].append(self.generatesegmentsquare(sol, i, j))
if sol[0] == -1:
columns[i].append(dummy)
else:
if sol[1] == 0:
columns[i].append(self.pieces[sol[0]].image)
if sol[1] == 1:
columns[i].append(numpy.rot90(self.pieces[sol[0]].image, 2))
if sol[1] == 2:
columns[i].append(numpy.rot90(self.pieces[sol[0]].image, 1))
if sol[1] == 3:
columns[i].append(numpy.rot90(self.pieces[sol[0]].image, 3))
imagecols = []
imagesegcols = []
for col in columns:
imagecols.append(numpy.vstack(col))
for segcol in columnsseg:
imagesegcols.append(numpy.vstack(segcol))
image = numpy.hstack(imagecols)
imageseg = numpy.hstack(imagesegcols)
return image, imageseg
def showsol(self):
image, imageseg = self.generateimages()
#if self.orig is not None:
#cv2.imshow("Original", self.orig)
#cv2.imshow("Solved", image)
#cv2.imshow("Segments", imageseg)
def __str__(self):
data = ["Puzzle {0}x{1}\r\n".format(self.xsize, self.ysize)]
for y in range(0, self.ysize):
for x in range(0, self.xsize):
data.append("[{0},{1},{2}] ".format(self.sol[x][y][0], self.sol[x][y][1], self.sol[x][y][2]))
data.append("\r\n")
return string.join(data, '')
|
|
"""This module provides plugins used in the hairball paper."""
from collections import defaultdict, Counter
from hairball.plugins import HairballPlugin
class Animation(HairballPlugin):
"""Plugin that checks for instances of 'complex animation'.
Animation should include loops, motion, timing, and costume changes.
"""
COSTUME = frozenset(['switch to costume %l', 'next costume'])
LOOP = frozenset(['repeat %n', 'repeat until %b', 'forever',
'forever if %b'])
MOTION = frozenset(['change y by %n', 'change x by %n',
'glide %n secs to x:%n y:%n',
'move %n steps', 'go to x:%n y:%n'])
ROTATE = frozenset(['turn cw %n degrees', 'turn ccw %n degrees',
'point in direction %d'])
SIZE = frozenset(['change size by %n', 'set size to %n%'])
TIMING = frozenset(['wait %n secs', 'glide %n secs to x:%n y:%n'])
ANIMATION = COSTUME | LOOP | MOTION | ROTATE | SIZE | TIMING
@staticmethod
def check_results(tmp_):
"""Return a 3 tuple for something."""
# TODO: Fix this to work with more meaningful names
if tmp_['t'] > 0:
if tmp_['l'] > 0:
if tmp_['rr'] > 0 or tmp_['ra'] > 1:
print 1, 3, tmp_
return 3
elif tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 2, 3, tmp_
return 3
elif tmp_['mr'] > 0 or tmp_['ma'] > 1:
print 3, 2, tmp_
return 2
if tmp_['cr'] > 1 or tmp_['ca'] > 2:
print 4, 2, tmp_
return 2
if tmp_['mr'] > 0 or tmp_['ma'] > 1:
if tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 6, 0, tmp_
return 0
if tmp_['rr'] > 1 or tmp_['ra'] > 2:
print 7, 0, tmp_
return 0
if tmp_['sr'] > 1 or tmp_['sa'] > 2:
print 8, 0, tmp_
return 0
if tmp_['l'] > 0:
if tmp_['rr'] > 0 or tmp_['ra'] > 1:
print 9, 2, tmp_
return 2
if tmp_['cr'] > 0 or tmp_['ca'] > 1:
print 10, 0, tmp_
return 0
return -1
def check_animation(self, last, last_level, gen):
tmp_ = Counter()
results = Counter()
name, level, block = last, last_level, last
others = False
while name in self.ANIMATION and level >= last_level:
if name in self.LOOP:
if block != last:
count = self.check_results(tmp_)
if count > -1:
results[count] += 1
tmp_.clear()
tmp_['last'] += 1
for attribute in ('costume', 'orientation', 'position', 'size'):
if (name, 'relative') in self.BLOCKMAPPING[attribute]:
tmp_[(attribute, 'relative')] += 1
elif (name, 'absolute') in self.BLOCKMAPPING[attribute]:
tmp_[(attribute, 'absolute')] += 1
if name in self.TIMING:
tmp_['timing'] += 1
last_level = level
name, level, block = next(gen, ('', 0, ''))
# allow some exceptions
if name not in self.ANIMATION and name != '':
if not others:
if block.type.flag != 't':
last_level = level
(name, level, block) = next(gen, ('', 0, ''))
others = True
count = self.check_results(tmp_)
if count > -1:
results[count] += 1
return gen, results
def analyze(self, scratch):
results = Counter()
for script in self.iter_scripts(scratch):
gen = self.iter_blocks(script.blocks)
name = 'start'
level = None
while name != '':
if name in self.ANIMATION:
gen, count = self.check_animation(name, level, gen)
results.update(count)
name, level, _ = next(gen, ('', 0, ''))
return {'animation': results}
class BroadcastReceive(HairballPlugin):
"""Plugin that checks for proper usage of broadcast and receive blocks."""
def get_receive(self, script_list):
"""Return a list of received events contained in script_list."""
events = defaultdict(set)
for script in script_list:
if self.script_start_type(script) == self.HAT_WHEN_I_RECEIVE:
event = script.blocks[0].args[0].lower()
events[event].add(script)
return events
def analyze(self, scratch):
all_scripts = list(self.iter_scripts(scratch))
results = defaultdict(set)
broadcast = dict((x, self.get_broadcast_events(x)) # Events by script
for x in all_scripts)
correct = self.get_receive(all_scripts)
results['never broadcast'] = set(correct.keys())
for script, events in broadcast.items():
for event in events.keys():
if event is True: # Remove dynamic broadcasts
results['dynamic broadcast'].add(script.morph.name)
del events[event]
elif event in correct:
results['never broadcast'].discard(event)
else:
results['never received'].add(event)
# remove events from correct dict that were never broadcast
for event in correct.keys():
if event in results['never broadcast']:
del correct[event]
# Find scripts that have more than one broadcast event on any possible
# execution path through the program
# TODO: Permit mutually exclusive broadcasts
for events in broadcast.values():
if len(events) > 1:
for event in events:
if event in correct:
results['parallel broadcasts'].add(event)
del correct[event]
# Find events that have two (or more) receivers in which one of the
# receivers has a "delay" block
for event, scripts in correct.items():
if len(scripts) > 1:
for script in scripts:
for _, _, block in self.iter_blocks(script.blocks):
if block.type.flag == 't':
results['multiple receivers with delay'].add(event)
if event in correct:
del correct[event]
results['success'] = set(correct.keys())
return {'broadcast': results}
class SaySoundSync(HairballPlugin):
"""Plugin that checks for synchronization between say and sound blocks.
The order should be:
Say "___",
Play sound "___" until done,
Say ""
"""
CORRECT = -1
ERROR = 0
INCORRECT = 1
HACKISH = 2
SAY_THINK = ('say %s', 'think %s')
SAY_THINK_DURATION = ('say %s for %n secs', 'think %s for %n secs')
ALL_SAY_THINK = SAY_THINK + SAY_THINK_DURATION
@staticmethod
def is_blank(word):
"""Return True if the string is empty, or only whitespace."""
return not word or word.isspace()
def analyze(self, scratch):
"""Categorize instances of attempted say and sound synchronization."""
errors = Counter()
for script in self.iter_scripts(scratch):
prev_name, prev_depth, prev_block = '', 0, script.blocks[0]
gen = self.iter_blocks(script.blocks)
for name, depth, block in gen:
if prev_depth == depth:
if prev_name in self.SAY_THINK:
if name == 'play sound %S until done':
if not self.is_blank(prev_block.args[0]):
errors += self.check(gen)
# TODO: What about play sound?
elif prev_name in self.SAY_THINK_DURATION and \
'play sound %S' in name:
errors['1'] += 1
elif prev_name == 'play sound %S':
if name in self.SAY_THINK:
errors[self.INCORRECT] += 1
elif name in self.SAY_THINK_DURATION:
if self.is_blank(block.args[0]):
errors[self.ERROR] += 1
else:
errors[self.HACKISH] += 1
elif prev_name == 'play sound %S until done' and \
name in self.ALL_SAY_THINK:
if not self.is_blank(block.args[0]):
errors[self.INCORRECT] += 1
# TODO: Should there be an else clause here?
prev_name, prev_depth, prev_block = name, depth, block
return {'sound': errors}
def check(self, gen):
"""Check that the last part of the chain matches.
TODO: Fix to handle the following situation that appears to not work
say 'message 1'
play sound until done
say 'message 2'
say 'message 3'
play sound until done
say ''
"""
retval = Counter()
name, _, block = next(gen, ('', 0, ''))
if name in self.SAY_THINK:
if self.is_blank(block.args[0]):
retval[self.CORRECT] += 1
else:
name, _, block = next(gen, ('', 0, ''))
if name == 'play sound %S until done':
# Increment the correct count because we have at least
# one successful instance
retval[self.CORRECT] += 1
# This block represents the beginning of a second
retval += self.check(gen)
else:
retval[self.INCORRECT] += 1
else:
retval[self.INCORRECT] += 1
return retval
|
|
import warnings
import numpy as np
import pandas as pd
import pandas.util.testing as tm
try:
from pandas.api.types import union_categoricals
except ImportError:
try:
from pandas.types.concat import union_categoricals
except ImportError:
pass
from .pandas_vb_common import setup # noqa
class Concat(object):
goal_time = 0.2
def setup(self):
N = 10**5
self.s = pd.Series(list('aabbcd') * N).astype('category')
self.a = pd.Categorical(list('aabbcd') * N)
self.b = pd.Categorical(list('bbcdjk') * N)
def time_concat(self):
pd.concat([self.s, self.s])
def time_union(self):
union_categoricals([self.a, self.b])
class Constructor(object):
goal_time = 0.2
def setup(self):
N = 10**5
self.categories = list('abcde')
self.cat_idx = pd.Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
self.datetimes = pd.Series(pd.date_range('1995-01-01 00:00:00',
periods=N / 10,
freq='s'))
self.datetimes_with_nat = self.datetimes.copy()
self.datetimes_with_nat.iloc[-1] = pd.NaT
self.values_some_nan = list(np.tile(self.categories + [np.nan], N))
self.values_all_nan = [np.nan] * len(self.values)
self.values_all_int8 = np.ones(N, 'int8')
def time_regular(self):
pd.Categorical(self.values, self.categories)
def time_fastpath(self):
pd.Categorical(self.codes, self.cat_idx, fastpath=True)
def time_datetimes(self):
pd.Categorical(self.datetimes)
def time_datetimes_with_nat(self):
pd.Categorical(self.datetimes_with_nat)
def time_with_nan(self):
pd.Categorical(self.values_some_nan)
def time_all_nan(self):
pd.Categorical(self.values_all_nan)
def time_from_codes_all_int8(self):
pd.Categorical.from_codes(self.values_all_int8, self.categories)
class ValueCounts(object):
goal_time = 0.2
params = [True, False]
param_names = ['dropna']
def setup(self, dropna):
n = 5 * 10**5
arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype('category')
def time_value_counts(self, dropna):
self.ts.value_counts(dropna=dropna)
class Repr(object):
goal_time = 0.2
def setup(self):
self.sel = pd.Series(['s1234']).astype('category')
def time_rendering(self):
str(self.sel)
class SetCategories(object):
goal_time = 0.2
def setup(self):
n = 5 * 10**5
arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype('category')
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
class Rank(object):
goal_time = 0.2
def setup(self):
N = 10**5
ncats = 100
self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
self.s_str_cat = self.s_str.astype('category')
with warnings.catch_warnings(record=True):
self.s_str_cat_ordered = self.s_str.astype('category',
ordered=True)
self.s_int = pd.Series(np.random.randint(0, ncats, size=N))
self.s_int_cat = self.s_int.astype('category')
with warnings.catch_warnings(record=True):
self.s_int_cat_ordered = self.s_int.astype('category',
ordered=True)
def time_rank_string(self):
self.s_str.rank()
def time_rank_string_cat(self):
self.s_str_cat.rank()
def time_rank_string_cat_ordered(self):
self.s_str_cat_ordered.rank()
def time_rank_int(self):
self.s_int.rank()
def time_rank_int_cat(self):
self.s_int_cat.rank()
def time_rank_int_cat_ordered(self):
self.s_int_cat_ordered.rank()
class Isin(object):
goal_time = 0.2
params = ['object', 'int64']
param_names = ['dtype']
def setup(self, dtype):
np.random.seed(1234)
n = 5 * 10**5
sample_size = 100
arr = [i for i in np.random.randint(0, n // 10, size=n)]
if dtype == 'object':
arr = ['s%04d' % i for i in arr]
self.sample = np.random.choice(arr, sample_size)
self.series = pd.Series(arr).astype('category')
def time_isin_categorical(self, dtype):
self.series.isin(self.sample)
class IsMonotonic(object):
def setup(self):
N = 1000
self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N))
self.s = pd.Series(self.c)
def time_categorical_index_is_monotonic_increasing(self):
self.c.is_monotonic_increasing
def time_categorical_index_is_monotonic_decreasing(self):
self.c.is_monotonic_decreasing
def time_categorical_series_is_monotonic_increasing(self):
self.s.is_monotonic_increasing
def time_categorical_series_is_monotonic_decreasing(self):
self.s.is_monotonic_decreasing
class Contains(object):
goal_time = 0.2
def setup(self):
N = 10**5
self.ci = tm.makeCategoricalIndex(N)
self.c = self.ci.values
self.key = self.ci.categories[0]
def time_categorical_index_contains(self):
self.key in self.ci
def time_categorical_contains(self):
self.key in self.c
class CategoricalSlicing(object):
goal_time = 0.2
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**6
values = list('a' * N + 'b' * N + 'c' * N)
indices = {
'monotonic_incr': pd.Categorical(values),
'monotonic_decr': pd.Categorical(reversed(values)),
'non_monotonic': pd.Categorical(list('abc' * N))}
self.data = indices[index]
self.scalar = 10000
self.list = list(range(10000))
self.cat_scalar = 'b'
def time_getitem_scalar(self, index):
self.data[self.scalar]
def time_getitem_slice(self, index):
self.data[:self.scalar]
def time_getitem_list_like(self, index):
self.data[[self.scalar]]
def time_getitem_list(self, index):
self.data[self.list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
|
|
from __future__ import absolute_import
from __future__ import unicode_literals
import functools
import logging
import ntpath
import os
import string
import sys
from collections import namedtuple
import six
import yaml
from cached_property import cached_property
from ..const import COMPOSEFILE_V1 as V1
from ..const import COMPOSEFILE_V2_0 as V2_0
from ..utils import build_string_dict
from .environment import env_vars_from_file
from .environment import Environment
from .environment import split_env
from .errors import CircularReference
from .errors import ComposeFileNotFound
from .errors import ConfigurationError
from .errors import VERSION_EXPLANATION
from .interpolation import interpolate_environment_variables
from .sort_services import get_container_name_from_network_mode
from .sort_services import get_service_name_from_network_mode
from .sort_services import sort_service_dicts
from .types import parse_extra_hosts
from .types import parse_restart_spec
from .types import ServiceLink
from .types import VolumeFromSpec
from .types import VolumeSpec
from .validation import match_named_volumes
from .validation import validate_against_config_schema
from .validation import validate_config_section
from .validation import validate_depends_on
from .validation import validate_extends_file_path
from .validation import validate_links
from .validation import validate_network_mode
from .validation import validate_service_constraints
from .validation import validate_top_level_object
from .validation import validate_ulimits
DOCKER_CONFIG_KEYS = [
'cap_add',
'cap_drop',
'cgroup_parent',
'command',
'cpu_quota',
'cpu_shares',
'cpuset',
'detach',
'devices',
'dns',
'dns_search',
'domainname',
'entrypoint',
'env_file',
'environment',
'extra_hosts',
'hostname',
'image',
'ipc',
'labels',
'links',
'mac_address',
'mem_limit',
'memswap_limit',
'mem_swappiness',
'net',
'oom_score_adj'
'pid',
'ports',
'privileged',
'read_only',
'restart',
'security_opt',
'shm_size',
'stdin_open',
'stop_signal',
'tty',
'user',
'volume_driver',
'volumes',
'volumes_from',
'working_dir',
]
ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
'build',
'container_name',
'dockerfile',
'log_driver',
'log_opt',
'logging',
'network_mode',
]
DOCKER_VALID_URL_PREFIXES = (
'http://',
'https://',
'git://',
'github.com/',
'git@',
)
SUPPORTED_FILENAMES = [
'docker-compose.yml',
'docker-compose.yaml',
]
DEFAULT_OVERRIDE_FILENAME = 'docker-compose.override.yml'
log = logging.getLogger(__name__)
class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files environment')):
"""
:param working_dir: the directory to use for relative paths in the config
:type working_dir: string
:param config_files: list of configuration files to load
:type config_files: list of :class:`ConfigFile`
:param environment: computed environment values for this project
:type environment: :class:`environment.Environment`
"""
def __new__(cls, working_dir, config_files, environment=None):
if environment is None:
environment = Environment.from_env_file(working_dir)
return super(ConfigDetails, cls).__new__(
cls, working_dir, config_files, environment
)
class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
"""
:param filename: filename of the config file
:type filename: string
:param config: contents of the config file
:type config: :class:`dict`
"""
@classmethod
def from_filename(cls, filename):
return cls(filename, load_yaml(filename))
@cached_property
def version(self):
if 'version' not in self.config:
return V1
version = self.config['version']
if isinstance(version, dict):
log.warn('Unexpected type for "version" key in "{}". Assuming '
'"version" is the name of a service, and defaulting to '
'Compose file version 1.'.format(self.filename))
return V1
if not isinstance(version, six.string_types):
raise ConfigurationError(
'Version in "{}" is invalid - it should be a string.'
.format(self.filename))
if version == '1':
raise ConfigurationError(
'Version in "{}" is invalid. {}'
.format(self.filename, VERSION_EXPLANATION))
if version == '2':
version = V2_0
if version != V2_0:
raise ConfigurationError(
'Version in "{}" is unsupported. {}'
.format(self.filename, VERSION_EXPLANATION))
return version
def get_service(self, name):
return self.get_service_dicts()[name]
def get_service_dicts(self):
return self.config if self.version == V1 else self.config.get('services', {})
def get_volumes(self):
return {} if self.version == V1 else self.config.get('volumes', {})
def get_networks(self):
return {} if self.version == V1 else self.config.get('networks', {})
class Config(namedtuple('_Config', 'version services volumes networks')):
"""
:param version: configuration version
:type version: int
:param services: List of service description dictionaries
:type services: :class:`list`
:param volumes: Dictionary mapping volume names to description dictionaries
:type volumes: :class:`dict`
:param networks: Dictionary mapping network names to description dictionaries
:type networks: :class:`dict`
"""
class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')):
@classmethod
def with_abs_paths(cls, working_dir, filename, name, config):
if not working_dir:
raise ValueError("No working_dir for ServiceConfig.")
return cls(
os.path.abspath(working_dir),
os.path.abspath(filename) if filename else filename,
name,
config)
def find(base_dir, filenames, environment):
if filenames == ['-']:
return ConfigDetails(
os.getcwd(),
[ConfigFile(None, yaml.safe_load(sys.stdin))],
environment
)
if filenames:
filenames = [os.path.join(base_dir, f) for f in filenames]
else:
filenames = get_default_config_files(base_dir)
log.debug("Using configuration files: {}".format(",".join(filenames)))
return ConfigDetails(
os.path.dirname(filenames[0]),
[ConfigFile.from_filename(f) for f in filenames],
environment
)
def validate_config_version(config_files):
main_file = config_files[0]
validate_top_level_object(main_file)
for next_file in config_files[1:]:
validate_top_level_object(next_file)
if main_file.version != next_file.version:
raise ConfigurationError(
"Version mismatch: file {0} specifies version {1} but "
"extension file {2} uses version {3}".format(
main_file.filename,
main_file.version,
next_file.filename,
next_file.version))
def get_default_config_files(base_dir):
(candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
if not candidates:
raise ComposeFileNotFound(SUPPORTED_FILENAMES)
winner = candidates[0]
if len(candidates) > 1:
log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
log.warn("Using %s\n", winner)
return [os.path.join(path, winner)] + get_default_override_file(path)
def get_default_override_file(path):
override_filename = os.path.join(path, DEFAULT_OVERRIDE_FILENAME)
return [override_filename] if os.path.exists(override_filename) else []
def find_candidates_in_parent_dirs(filenames, path):
"""
Given a directory path to start, looks for filenames in the
directory, and then each parent directory successively,
until found.
Returns tuple (candidates, path).
"""
candidates = [filename for filename in filenames
if os.path.exists(os.path.join(path, filename))]
if not candidates:
parent_dir = os.path.join(path, '..')
if os.path.abspath(parent_dir) != os.path.abspath(path):
return find_candidates_in_parent_dirs(filenames, parent_dir)
return (candidates, path)
def load(config_details):
"""Load the configuration from a working directory and a list of
configuration files. Files are loaded in order, and merged on top
of each other to create the final configuration.
Return a fully interpolated, extended and validated configuration.
"""
validate_config_version(config_details.config_files)
processed_files = [
process_config_file(config_file, config_details.environment)
for config_file in config_details.config_files
]
config_details = config_details._replace(config_files=processed_files)
main_file = config_details.config_files[0]
volumes = load_mapping(
config_details.config_files, 'get_volumes', 'Volume'
)
networks = load_mapping(
config_details.config_files, 'get_networks', 'Network'
)
service_dicts = load_services(config_details, main_file)
if main_file.version != V1:
for service_dict in service_dicts:
match_named_volumes(service_dict, volumes)
return Config(main_file.version, service_dicts, volumes, networks)
def load_mapping(config_files, get_func, entity_type):
mapping = {}
for config_file in config_files:
for name, config in getattr(config_file, get_func)().items():
mapping[name] = config or {}
if not config:
continue
external = config.get('external')
if external:
if len(config.keys()) > 1:
raise ConfigurationError(
'{} {} declared as external but specifies'
' additional attributes ({}). '.format(
entity_type,
name,
', '.join([k for k in config.keys() if k != 'external'])
)
)
if isinstance(external, dict):
config['external_name'] = external.get('name')
else:
config['external_name'] = name
mapping[name] = config
if 'driver_opts' in config:
config['driver_opts'] = build_string_dict(
config['driver_opts']
)
return mapping
def load_services(config_details, config_file):
def build_service(service_name, service_dict, service_names):
service_config = ServiceConfig.with_abs_paths(
config_details.working_dir,
config_file.filename,
service_name,
service_dict)
resolver = ServiceExtendsResolver(
service_config, config_file, environment=config_details.environment
)
service_dict = process_service(resolver.run())
service_config = service_config._replace(config=service_dict)
validate_service(service_config, service_names, config_file.version)
service_dict = finalize_service(
service_config,
service_names,
config_file.version,
config_details.environment)
return service_dict
def build_services(service_config):
service_names = service_config.keys()
return sort_service_dicts([
build_service(name, service_dict, service_names)
for name, service_dict in service_config.items()
])
def merge_services(base, override):
all_service_names = set(base) | set(override)
return {
name: merge_service_dicts_from_files(
base.get(name, {}),
override.get(name, {}),
config_file.version)
for name in all_service_names
}
service_configs = [
file.get_service_dicts() for file in config_details.config_files
]
service_config = service_configs[0]
for next_config in service_configs[1:]:
service_config = merge_services(service_config, next_config)
return build_services(service_config)
def interpolate_config_section(filename, config, section, environment):
validate_config_section(filename, config, section)
return interpolate_environment_variables(config, section, environment)
def process_config_file(config_file, environment, service_name=None):
services = interpolate_config_section(
config_file.filename,
config_file.get_service_dicts(),
'service',
environment,)
if config_file.version == V2_0:
processed_config = dict(config_file.config)
processed_config['services'] = services
processed_config['volumes'] = interpolate_config_section(
config_file.filename,
config_file.get_volumes(),
'volume',
environment,)
processed_config['networks'] = interpolate_config_section(
config_file.filename,
config_file.get_networks(),
'network',
environment,)
if config_file.version == V1:
processed_config = services
config_file = config_file._replace(config=processed_config)
validate_against_config_schema(config_file)
if service_name and service_name not in services:
raise ConfigurationError(
"Cannot extend service '{}' in {}: Service not found".format(
service_name, config_file.filename))
return config_file
class ServiceExtendsResolver(object):
def __init__(self, service_config, config_file, environment, already_seen=None):
self.service_config = service_config
self.working_dir = service_config.working_dir
self.already_seen = already_seen or []
self.config_file = config_file
self.environment = environment
@property
def signature(self):
return self.service_config.filename, self.service_config.name
def detect_cycle(self):
if self.signature in self.already_seen:
raise CircularReference(self.already_seen + [self.signature])
def run(self):
self.detect_cycle()
if 'extends' in self.service_config.config:
service_dict = self.resolve_extends(*self.validate_and_construct_extends())
return self.service_config._replace(config=service_dict)
return self.service_config
def validate_and_construct_extends(self):
extends = self.service_config.config['extends']
if not isinstance(extends, dict):
extends = {'service': extends}
config_path = self.get_extended_config_path(extends)
service_name = extends['service']
extends_file = ConfigFile.from_filename(config_path)
validate_config_version([self.config_file, extends_file])
extended_file = process_config_file(
extends_file, self.environment, service_name=service_name
)
service_config = extended_file.get_service(service_name)
return config_path, service_config, service_name
def resolve_extends(self, extended_config_path, service_dict, service_name):
resolver = ServiceExtendsResolver(
ServiceConfig.with_abs_paths(
os.path.dirname(extended_config_path),
extended_config_path,
service_name,
service_dict),
self.config_file,
already_seen=self.already_seen + [self.signature],
environment=self.environment
)
service_config = resolver.run()
other_service_dict = process_service(service_config)
validate_extended_service_dict(
other_service_dict,
extended_config_path,
service_name)
return merge_service_dicts(
other_service_dict,
self.service_config.config,
self.config_file.version)
def get_extended_config_path(self, extends_options):
"""Service we are extending either has a value for 'file' set, which we
need to obtain a full path too or we are extending from a service
defined in our own file.
"""
filename = self.service_config.filename
validate_extends_file_path(
self.service_config.name,
extends_options,
filename)
if 'file' in extends_options:
return expand_path(self.working_dir, extends_options['file'])
return filename
def resolve_environment(service_dict, environment=None):
"""Unpack any environment variables from an env_file, if set.
Interpolate environment values if set.
"""
env = {}
for env_file in service_dict.get('env_file', []):
env.update(env_vars_from_file(env_file))
env.update(parse_environment(service_dict.get('environment')))
return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(env))
def resolve_build_args(build, environment):
args = parse_build_arguments(build.get('args'))
return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(args))
def validate_extended_service_dict(service_dict, filename, service):
error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
if 'links' in service_dict:
raise ConfigurationError(
"%s services with 'links' cannot be extended" % error_prefix)
if 'volumes_from' in service_dict:
raise ConfigurationError(
"%s services with 'volumes_from' cannot be extended" % error_prefix)
if 'net' in service_dict:
if get_container_name_from_network_mode(service_dict['net']):
raise ConfigurationError(
"%s services with 'net: container' cannot be extended" % error_prefix)
if 'network_mode' in service_dict:
if get_service_name_from_network_mode(service_dict['network_mode']):
raise ConfigurationError(
"%s services with 'network_mode: service' cannot be extended" % error_prefix)
if 'depends_on' in service_dict:
raise ConfigurationError(
"%s services with 'depends_on' cannot be extended" % error_prefix)
def validate_service(service_config, service_names, version):
service_dict, service_name = service_config.config, service_config.name
validate_service_constraints(service_dict, service_name, version)
validate_paths(service_dict)
validate_ulimits(service_config)
validate_network_mode(service_config, service_names)
validate_depends_on(service_config, service_names)
validate_links(service_config, service_names)
if not service_dict.get('image') and has_uppercase(service_name):
raise ConfigurationError(
"Service '{name}' contains uppercase characters which are not valid "
"as part of an image name. Either use a lowercase service name or "
"use the `image` field to set a custom name for the service image."
.format(name=service_name))
def process_service(service_config):
working_dir = service_config.working_dir
service_dict = dict(service_config.config)
if 'env_file' in service_dict:
service_dict['env_file'] = [
expand_path(working_dir, path)
for path in to_list(service_dict['env_file'])
]
if 'build' in service_dict:
if isinstance(service_dict['build'], six.string_types):
service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
elif isinstance(service_dict['build'], dict) and 'context' in service_dict['build']:
path = service_dict['build']['context']
service_dict['build']['context'] = resolve_build_path(working_dir, path)
if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
if 'labels' in service_dict:
service_dict['labels'] = parse_labels(service_dict['labels'])
if 'extra_hosts' in service_dict:
service_dict['extra_hosts'] = parse_extra_hosts(service_dict['extra_hosts'])
for field in ['dns', 'dns_search', 'tmpfs']:
if field in service_dict:
service_dict[field] = to_list(service_dict[field])
return service_dict
def finalize_service(service_config, service_names, version, environment):
service_dict = dict(service_config.config)
if 'environment' in service_dict or 'env_file' in service_dict:
service_dict['environment'] = resolve_environment(service_dict, environment)
service_dict.pop('env_file', None)
if 'volumes_from' in service_dict:
service_dict['volumes_from'] = [
VolumeFromSpec.parse(vf, service_names, version)
for vf in service_dict['volumes_from']
]
if 'volumes' in service_dict:
service_dict['volumes'] = [
VolumeSpec.parse(v) for v in service_dict['volumes']]
if 'net' in service_dict:
network_mode = service_dict.pop('net')
container_name = get_container_name_from_network_mode(network_mode)
if container_name and container_name in service_names:
service_dict['network_mode'] = 'service:{}'.format(container_name)
else:
service_dict['network_mode'] = network_mode
if 'networks' in service_dict:
service_dict['networks'] = parse_networks(service_dict['networks'])
if 'restart' in service_dict:
service_dict['restart'] = parse_restart_spec(service_dict['restart'])
normalize_build(service_dict, service_config.working_dir, environment)
service_dict['name'] = service_config.name
return normalize_v1_service_format(service_dict)
def normalize_v1_service_format(service_dict):
if 'log_driver' in service_dict or 'log_opt' in service_dict:
if 'logging' not in service_dict:
service_dict['logging'] = {}
if 'log_driver' in service_dict:
service_dict['logging']['driver'] = service_dict['log_driver']
del service_dict['log_driver']
if 'log_opt' in service_dict:
service_dict['logging']['options'] = service_dict['log_opt']
del service_dict['log_opt']
if 'dockerfile' in service_dict:
service_dict['build'] = service_dict.get('build', {})
service_dict['build'].update({
'dockerfile': service_dict.pop('dockerfile')
})
return service_dict
def merge_service_dicts_from_files(base, override, version):
"""When merging services from multiple files we need to merge the `extends`
field. This is not handled by `merge_service_dicts()` which is used to
perform the `extends`.
"""
new_service = merge_service_dicts(base, override, version)
if 'extends' in override:
new_service['extends'] = override['extends']
elif 'extends' in base:
new_service['extends'] = base['extends']
return new_service
class MergeDict(dict):
"""A dict-like object responsible for merging two dicts into one."""
def __init__(self, base, override):
self.base = base
self.override = override
def needs_merge(self, field):
return field in self.base or field in self.override
def merge_field(self, field, merge_func, default=None):
if not self.needs_merge(field):
return
self[field] = merge_func(
self.base.get(field, default),
self.override.get(field, default))
def merge_mapping(self, field, parse_func):
if not self.needs_merge(field):
return
self[field] = parse_func(self.base.get(field))
self[field].update(parse_func(self.override.get(field)))
def merge_sequence(self, field, parse_func):
def parse_sequence_func(seq):
return to_mapping((parse_func(item) for item in seq), 'merge_field')
if not self.needs_merge(field):
return
merged = parse_sequence_func(self.base.get(field, []))
merged.update(parse_sequence_func(self.override.get(field, [])))
self[field] = [item.repr() for item in sorted(merged.values())]
def merge_scalar(self, field):
if self.needs_merge(field):
self[field] = self.override.get(field, self.base.get(field))
def merge_service_dicts(base, override, version):
md = MergeDict(base, override)
md.merge_mapping('environment', parse_environment)
md.merge_mapping('labels', parse_labels)
md.merge_mapping('ulimits', parse_ulimits)
md.merge_mapping('networks', parse_networks)
md.merge_sequence('links', ServiceLink.parse)
for field in ['volumes', 'devices']:
md.merge_field(field, merge_path_mappings)
for field in [
'ports', 'cap_add', 'cap_drop', 'expose', 'external_links',
'security_opt', 'volumes_from', 'depends_on',
]:
md.merge_field(field, merge_unique_items_lists, default=[])
for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
md.merge_field(field, merge_list_or_string)
for field in set(ALLOWED_KEYS) - set(md):
md.merge_scalar(field)
if version == V1:
legacy_v1_merge_image_or_build(md, base, override)
elif md.needs_merge('build'):
md['build'] = merge_build(md, base, override)
return dict(md)
def merge_unique_items_lists(base, override):
return sorted(set().union(base, override))
def merge_build(output, base, override):
def to_dict(service):
build_config = service.get('build', {})
if isinstance(build_config, six.string_types):
return {'context': build_config}
return build_config
md = MergeDict(to_dict(base), to_dict(override))
md.merge_scalar('context')
md.merge_scalar('dockerfile')
md.merge_mapping('args', parse_build_arguments)
return dict(md)
def legacy_v1_merge_image_or_build(output, base, override):
output.pop('image', None)
output.pop('build', None)
if 'image' in override:
output['image'] = override['image']
elif 'build' in override:
output['build'] = override['build']
elif 'image' in base:
output['image'] = base['image']
elif 'build' in base:
output['build'] = base['build']
def merge_environment(base, override):
env = parse_environment(base)
env.update(parse_environment(override))
return env
def split_label(label):
if '=' in label:
return label.split('=', 1)
else:
return label, ''
def parse_dict_or_list(split_func, type_name, arguments):
if not arguments:
return {}
if isinstance(arguments, list):
return dict(split_func(e) for e in arguments)
if isinstance(arguments, dict):
return dict(arguments)
raise ConfigurationError(
"%s \"%s\" must be a list or mapping," %
(type_name, arguments)
)
parse_build_arguments = functools.partial(parse_dict_or_list, split_env, 'build arguments')
parse_environment = functools.partial(parse_dict_or_list, split_env, 'environment')
parse_labels = functools.partial(parse_dict_or_list, split_label, 'labels')
parse_networks = functools.partial(parse_dict_or_list, lambda k: (k, None), 'networks')
def parse_ulimits(ulimits):
if not ulimits:
return {}
if isinstance(ulimits, dict):
return dict(ulimits)
def resolve_env_var(key, val, environment):
if val is not None:
return key, val
elif environment and key in environment:
return key, environment[key]
else:
return key, None
def resolve_volume_paths(working_dir, service_dict):
return [
resolve_volume_path(working_dir, volume)
for volume in service_dict['volumes']
]
def resolve_volume_path(working_dir, volume):
container_path, host_path = split_path_mapping(volume)
if host_path is not None:
if host_path.startswith('.'):
host_path = expand_path(working_dir, host_path)
host_path = os.path.expanduser(host_path)
return u"{}:{}".format(host_path, container_path)
else:
return container_path
def normalize_build(service_dict, working_dir, environment):
if 'build' in service_dict:
build = {}
# Shortcut where specifying a string is treated as the build context
if isinstance(service_dict['build'], six.string_types):
build['context'] = service_dict.pop('build')
else:
build.update(service_dict['build'])
if 'args' in build:
build['args'] = build_string_dict(
resolve_build_args(build, environment)
)
service_dict['build'] = build
def resolve_build_path(working_dir, build_path):
if is_url(build_path):
return build_path
return expand_path(working_dir, build_path)
def is_url(build_path):
return build_path.startswith(DOCKER_VALID_URL_PREFIXES)
def validate_paths(service_dict):
if 'build' in service_dict:
build = service_dict.get('build', {})
if isinstance(build, six.string_types):
build_path = build
elif isinstance(build, dict) and 'context' in build:
build_path = build['context']
else:
# We have a build section but no context, so nothing to validate
return
if (
not is_url(build_path) and
(not os.path.exists(build_path) or not os.access(build_path, os.R_OK))
):
raise ConfigurationError(
"build path %s either does not exist, is not accessible, "
"or is not a valid URL." % build_path)
def merge_path_mappings(base, override):
d = dict_from_path_mappings(base)
d.update(dict_from_path_mappings(override))
return path_mappings_from_dict(d)
def dict_from_path_mappings(path_mappings):
if path_mappings:
return dict(split_path_mapping(v) for v in path_mappings)
else:
return {}
def path_mappings_from_dict(d):
return [join_path_mapping(v) for v in sorted(d.items())]
def split_path_mapping(volume_path):
"""
Ascertain if the volume_path contains a host path as well as a container
path. Using splitdrive so windows absolute paths won't cause issues with
splitting on ':'.
"""
# splitdrive is very naive, so handle special cases where we can be sure
# the first character is not a drive.
if (volume_path.startswith('.') or volume_path.startswith('~') or
volume_path.startswith('/')):
drive, volume_config = '', volume_path
else:
drive, volume_config = ntpath.splitdrive(volume_path)
if ':' in volume_config:
(host, container) = volume_config.split(':', 1)
return (container, drive + host)
else:
return (volume_path, None)
def join_path_mapping(pair):
(container, host) = pair
if host is None:
return container
else:
return ":".join((host, container))
def expand_path(working_dir, path):
return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path)))
def merge_list_or_string(base, override):
return to_list(base) + to_list(override)
def to_list(value):
if value is None:
return []
elif isinstance(value, six.string_types):
return [value]
else:
return value
def to_mapping(sequence, key_field):
return {getattr(item, key_field): item for item in sequence}
def has_uppercase(name):
return any(char in string.ascii_uppercase for char in name)
def load_yaml(filename):
try:
with open(filename, 'r') as fh:
return yaml.safe_load(fh)
except (IOError, yaml.YAMLError) as e:
error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
raise ConfigurationError(u"{}: {}".format(error_name, e))
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Metadata.annotation_area'
db.add_column('images_metadata', 'annotation_area', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True), keep_default=False)
# Adding field 'Source.image_annotation_area'
db.add_column('images_source', 'image_annotation_area', self.gf('django.db.models.fields.CharField')(max_length=50, null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Metadata.annotation_area'
db.delete_column('images_metadata', 'annotation_area')
# Deleting field 'Source.image_annotation_area'
db.delete_column('images_source', 'image_annotation_area')
models = {
'annotations.label': {
'Meta': {'object_name': 'Label'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.LabelGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'})
},
'annotations.labelgroup': {
'Meta': {'object_name': 'LabelGroup'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'annotations.labelset': {
'Meta': {'object_name': 'LabelSet'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['annotations.Label']", 'symmetrical': 'False'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'images.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_robot_annotator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Robot']", 'null': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Metadata']"}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'original_height': ('django.db.models.fields.IntegerField', [], {}),
'original_width': ('django.db.models.fields.IntegerField', [], {}),
'point_generation_method': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'process_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.ImageStatus']"}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'images.imagestatus': {
'Meta': {'object_name': 'ImageStatus'},
'annotatedByHuman': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'annotatedByRobot': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featuresExtracted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hasRandomPoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preprocessed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'images.metadata': {
'Meta': {'object_name': 'Metadata'},
'annotation_area': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'balance': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'camera': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'depth': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'framing': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'group1_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group2_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group3_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group4_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group5_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group6_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group7_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'height_in_cm': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'photo_date': ('django.db.models.fields.DateField', [], {}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'strobes': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'value1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value1']", 'null': 'True'}),
'value2': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value2']", 'null': 'True'}),
'value3': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value3']", 'null': 'True'}),
'value4': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value4']", 'null': 'True'}),
'value5': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value5']", 'null': 'True'}),
'water_quality': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'images.point': {
'Meta': {'object_name': 'Point'},
'annotation_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'column': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Image']"}),
'point_number': ('django.db.models.fields.IntegerField', [], {}),
'row': ('django.db.models.fields.IntegerField', [], {})
},
'images.robot': {
'Meta': {'object_name': 'Robot'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path_to_model': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'time_to_train': ('django.db.models.fields.BigIntegerField', [], {}),
'version': ('django.db.models.fields.IntegerField', [], {'unique': 'True'})
},
'images.source': {
'Meta': {'object_name': 'Source'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_point_generation_method': ('django.db.models.fields.CharField', [], {'default': "'m_200'", 'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_annotation_area': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'image_height_in_cm': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'key1': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key2': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key3': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key4': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key5': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'labelset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.LabelSet']"}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'v'", 'max_length': '1'})
},
'images.sourceinvite': {
'Meta': {'unique_together': "(['recipient', 'source'],)", 'object_name': 'SourceInvite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invites_received'", 'to': "orm['auth.User']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invites_sent'", 'to': "orm['auth.User']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'source_perm': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'images.value1': {
'Meta': {'object_name': 'Value1'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value2': {
'Meta': {'object_name': 'Value2'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value3': {
'Meta': {'object_name': 'Value3'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value4': {
'Meta': {'object_name': 'Value4'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value5': {
'Meta': {'object_name': 'Value5'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
}
}
complete_apps = ['images']
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.policytroubleshooter_v1.types import checker
from .base import IamCheckerTransport, DEFAULT_CLIENT_INFO
from .grpc import IamCheckerGrpcTransport
class IamCheckerGrpcAsyncIOTransport(IamCheckerTransport):
"""gRPC AsyncIO backend transport for IamChecker.
IAM Policy Troubleshooter service.
This service helps you troubleshoot access issues for Google
Cloud resources.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "policytroubleshooter.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "policytroubleshooter.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def troubleshoot_iam_policy(
self,
) -> Callable[
[checker.TroubleshootIamPolicyRequest],
Awaitable[checker.TroubleshootIamPolicyResponse],
]:
r"""Return a callable for the troubleshoot iam policy method over gRPC.
Checks whether a member has a specific permission for
a specific resource, and explains why the member does or
does not have that permission.
Returns:
Callable[[~.TroubleshootIamPolicyRequest],
Awaitable[~.TroubleshootIamPolicyResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "troubleshoot_iam_policy" not in self._stubs:
self._stubs["troubleshoot_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.policytroubleshooter.v1.IamChecker/TroubleshootIamPolicy",
request_serializer=checker.TroubleshootIamPolicyRequest.serialize,
response_deserializer=checker.TroubleshootIamPolicyResponse.deserialize,
)
return self._stubs["troubleshoot_iam_policy"]
def close(self):
return self.grpc_channel.close()
__all__ = ("IamCheckerGrpcAsyncIOTransport",)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2013 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.common import exception
from glance import context
import glance.store
import glance.store.filesystem
import glance.store.http
import glance.store.location as location
import glance.store.s3
import glance.store.swift
from glance.tests.unit import base
from glance.tests.unit import utils
class TestStoreLocation(base.StoreClearingUnitTest):
def setUp(self):
self.config(default_store='file')
super(TestStoreLocation, self).setUp()
def test_get_location_from_uri_back_to_uri(self):
"""
Test that for various URIs, the correct Location
object can be contructed and then the original URI
returned via the get_store_uri() method.
"""
good_store_uris = [
'https://user:pass@example.com:80/images/some-id',
'http://images.oracle.com/123456',
'swift://account%3Auser:pass@authurl.com/container/obj-id',
'swift://storeurl.com/container/obj-id',
'swift+https://account%3Auser:pass@authurl.com/container/obj-id',
's3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id',
's3://accesskey:secretwith/aslash@s3.amazonaws.com/bucket/key-id',
's3+http://accesskey:secret@s3.amazonaws.com/bucket/key-id',
's3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id',
'file:///var/lib/glance/images/1',
'rbd://imagename',
'rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F',
'sheepdog://244e75f1-9c69-4167-9db7-1aa7d1973f6c',
'cinder://12345678-9012-3455-6789-012345678901',
]
for uri in good_store_uris:
loc = location.get_location_from_uri(uri)
# The get_store_uri() method *should* return an identical URI
# to the URI that is passed to get_location_from_uri()
self.assertEqual(loc.get_store_uri(), uri)
def test_bad_store_scheme(self):
"""
Test that a URI with a non-existing scheme triggers exception
"""
bad_uri = 'unknown://user:pass@example.com:80/images/some-id'
self.assertRaises(exception.UnknownScheme,
location.get_location_from_uri,
bad_uri)
def test_filesystem_store_location(self):
"""
Test the specific StoreLocation for the Filesystem store
"""
uri = 'file:///var/lib/glance/images/1'
loc = glance.store.filesystem.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("file", loc.scheme)
self.assertEqual("/var/lib/glance/images/1", loc.path)
self.assertEqual(uri, loc.get_uri())
bad_uri = 'fil://'
self.assertRaises(Exception, loc.parse_uri, bad_uri)
bad_uri = 'file://'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_http_store_location(self):
"""
Test the specific StoreLocation for the HTTP store
"""
uri = 'http://example.com/images/1'
loc = glance.store.http.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("http", loc.scheme)
self.assertEqual("example.com", loc.netloc)
self.assertEqual("/images/1", loc.path)
self.assertEqual(uri, loc.get_uri())
uri = 'https://example.com:8080/images/container/1'
loc.parse_uri(uri)
self.assertEqual("https", loc.scheme)
self.assertEqual("example.com:8080", loc.netloc)
self.assertEqual("/images/container/1", loc.path)
self.assertEqual(uri, loc.get_uri())
uri = 'https://user:password@example.com:8080/images/container/1'
loc.parse_uri(uri)
self.assertEqual("https", loc.scheme)
self.assertEqual("example.com:8080", loc.netloc)
self.assertEqual("user", loc.user)
self.assertEqual("password", loc.password)
self.assertEqual("/images/container/1", loc.path)
self.assertEqual(uri, loc.get_uri())
uri = 'https://user:@example.com:8080/images/1'
loc.parse_uri(uri)
self.assertEqual("https", loc.scheme)
self.assertEqual("example.com:8080", loc.netloc)
self.assertEqual("user", loc.user)
self.assertEqual("", loc.password)
self.assertEqual("/images/1", loc.path)
self.assertEqual(uri, loc.get_uri())
bad_uri = 'htt://'
self.assertRaises(Exception, loc.parse_uri, bad_uri)
bad_uri = 'http://'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://user@example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_swift_store_location(self):
"""
Test the specific StoreLocation for the Swift store
"""
uri = 'swift://example.com/images/1'
loc = glance.store.swift.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("swift", loc.scheme)
self.assertEqual("example.com", loc.auth_or_store_url)
self.assertEqual("https://example.com", loc.swift_url)
self.assertEqual("images", loc.container)
self.assertEqual("1", loc.obj)
self.assertEqual(None, loc.user)
self.assertEqual(uri, loc.get_uri())
uri = 'swift+https://user:pass@authurl.com/images/1'
loc.parse_uri(uri)
self.assertEqual("swift+https", loc.scheme)
self.assertEqual("authurl.com", loc.auth_or_store_url)
self.assertEqual("https://authurl.com", loc.swift_url)
self.assertEqual("images", loc.container)
self.assertEqual("1", loc.obj)
self.assertEqual("user", loc.user)
self.assertEqual("pass", loc.key)
self.assertEqual(uri, loc.get_uri())
uri = 'swift+https://user:pass@authurl.com/v1/container/12345'
loc.parse_uri(uri)
self.assertEqual("swift+https", loc.scheme)
self.assertEqual("authurl.com/v1", loc.auth_or_store_url)
self.assertEqual("https://authurl.com/v1", loc.swift_url)
self.assertEqual("container", loc.container)
self.assertEqual("12345", loc.obj)
self.assertEqual("user", loc.user)
self.assertEqual("pass", loc.key)
self.assertEqual(uri, loc.get_uri())
uri = ('swift+http://a%3Auser%40example.com:p%40ss@authurl.com/'
'v1/container/12345')
loc.parse_uri(uri)
self.assertEqual("swift+http", loc.scheme)
self.assertEqual("authurl.com/v1", loc.auth_or_store_url)
self.assertEqual("http://authurl.com/v1", loc.swift_url)
self.assertEqual("container", loc.container)
self.assertEqual("12345", loc.obj)
self.assertEqual("a:user@example.com", loc.user)
self.assertEqual("p@ss", loc.key)
self.assertEqual(uri, loc.get_uri())
# multitenant puts store URL in the location (not auth)
uri = ('swift+http://storeurl.com/v1/container/12345')
loc.parse_uri(uri)
self.assertEqual("swift+http", loc.scheme)
self.assertEqual("storeurl.com/v1", loc.auth_or_store_url)
self.assertEqual("http://storeurl.com/v1", loc.swift_url)
self.assertEqual("container", loc.container)
self.assertEqual("12345", loc.obj)
self.assertEqual(None, loc.user)
self.assertEqual(None, loc.key)
self.assertEqual(uri, loc.get_uri())
bad_uri = 'swif://'
self.assertRaises(Exception, loc.parse_uri, bad_uri)
bad_uri = 'swift://'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'swift://user@example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'swift://user:pass@http://example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_s3_store_location(self):
"""
Test the specific StoreLocation for the S3 store
"""
uri = 's3://example.com/images/1'
loc = glance.store.s3.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("s3", loc.scheme)
self.assertEqual("example.com", loc.s3serviceurl)
self.assertEqual("images", loc.bucket)
self.assertEqual("1", loc.key)
self.assertEqual(None, loc.accesskey)
self.assertEqual(uri, loc.get_uri())
uri = 's3+https://accesskey:pass@s3serviceurl.com/images/1'
loc.parse_uri(uri)
self.assertEqual("s3+https", loc.scheme)
self.assertEqual("s3serviceurl.com", loc.s3serviceurl)
self.assertEqual("images", loc.bucket)
self.assertEqual("1", loc.key)
self.assertEqual("accesskey", loc.accesskey)
self.assertEqual("pass", loc.secretkey)
self.assertEqual(uri, loc.get_uri())
uri = 's3+https://accesskey:pass@s3serviceurl.com/v1/bucket/12345'
loc.parse_uri(uri)
self.assertEqual("s3+https", loc.scheme)
self.assertEqual("s3serviceurl.com/v1", loc.s3serviceurl)
self.assertEqual("bucket", loc.bucket)
self.assertEqual("12345", loc.key)
self.assertEqual("accesskey", loc.accesskey)
self.assertEqual("pass", loc.secretkey)
self.assertEqual(uri, loc.get_uri())
uri = 's3://accesskey:pass/withslash@s3serviceurl.com/v1/bucket/12345'
loc.parse_uri(uri)
self.assertEqual("s3", loc.scheme)
self.assertEqual("s3serviceurl.com/v1", loc.s3serviceurl)
self.assertEqual("bucket", loc.bucket)
self.assertEqual("12345", loc.key)
self.assertEqual("accesskey", loc.accesskey)
self.assertEqual("pass/withslash", loc.secretkey)
self.assertEqual(uri, loc.get_uri())
bad_uri = 's://'
self.assertRaises(Exception, loc.parse_uri, bad_uri)
bad_uri = 's3://'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 's3://accesskey@example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 's3://user:pass@http://example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_rbd_store_location(self):
"""
Test the specific StoreLocation for the RBD store
"""
uri = 'rbd://imagename'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('imagename', loc.image)
self.assertEqual(None, loc.fsid)
self.assertEqual(None, loc.pool)
self.assertEqual(None, loc.snapshot)
uri = u'rbd://imagename'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('imagename', loc.image)
self.assertEqual(None, loc.fsid)
self.assertEqual(None, loc.pool)
self.assertEqual(None, loc.snapshot)
uri = 'rbd://fsid/pool/image/snap'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('image', loc.image)
self.assertEqual('fsid', loc.fsid)
self.assertEqual('pool', loc.pool)
self.assertEqual('snap', loc.snapshot)
uri = u'rbd://fsid/pool/image/snap'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('image', loc.image)
self.assertEqual('fsid', loc.fsid)
self.assertEqual('pool', loc.pool)
self.assertEqual('snap', loc.snapshot)
uri = 'rbd://%2f/%2f/%2f/%2f'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('/', loc.image)
self.assertEqual('/', loc.fsid)
self.assertEqual('/', loc.pool)
self.assertEqual('/', loc.snapshot)
uri = u'rbd://%2f/%2f/%2f/%2f'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('/', loc.image)
self.assertEqual('/', loc.fsid)
self.assertEqual('/', loc.pool)
self.assertEqual('/', loc.snapshot)
bad_uri = 'rbd:/image'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://image/extra'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://image/'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://image'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://fsid/pool/image/snap'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://fsid/pool/image/'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://fsid/pool/image/snap/'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://///'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://' + unichr(300)
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_sheepdog_store_location(self):
"""
Test the specific StoreLocation for the Sheepdog store
"""
uri = 'sheepdog://244e75f1-9c69-4167-9db7-1aa7d1973f6c'
loc = glance.store.sheepdog.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('244e75f1-9c69-4167-9db7-1aa7d1973f6c', loc.image)
bad_uri = 'sheepdog:/244e75f1-9c69-4167-9db7-1aa7d1973f6c'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://244e75f1-9c69-4167-9db7-1aa7d1973f6c'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'image; name'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_cinder_store_good_location(self):
"""
Test the specific StoreLocation for the Cinder store
"""
good_uri = 'cinder://12345678-9012-3455-6789-012345678901'
loc = glance.store.cinder.StoreLocation({})
loc.parse_uri(good_uri)
self.assertEqual('12345678-9012-3455-6789-012345678901', loc.volume_id)
def test_cinder_store_bad_location(self):
"""
Test the specific StoreLocation for the Cinder store
"""
bad_uri = 'cinder://volume-id-is-a-uuid'
loc = glance.store.cinder.StoreLocation({})
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_get_store_from_scheme(self):
"""
Test that the backend returned by glance.store.get_backend_class
is correct or raises an appropriate error.
"""
good_results = {
'swift': glance.store.swift.SingleTenantStore,
'swift+http': glance.store.swift.SingleTenantStore,
'swift+https': glance.store.swift.SingleTenantStore,
's3': glance.store.s3.Store,
's3+http': glance.store.s3.Store,
's3+https': glance.store.s3.Store,
'file': glance.store.filesystem.Store,
'filesystem': glance.store.filesystem.Store,
'http': glance.store.http.Store,
'https': glance.store.http.Store,
'rbd': glance.store.rbd.Store,
'sheepdog': glance.store.sheepdog.Store,
'cinder': glance.store.cinder.Store}
ctx = context.RequestContext()
for scheme, store in good_results.items():
store_obj = glance.store.get_store_from_scheme(ctx, scheme)
self.assertEqual(store_obj.__class__, store)
bad_results = ['fil', 'swift+h', 'unknown']
for store in bad_results:
self.assertRaises(exception.UnknownScheme,
glance.store.get_store_from_scheme,
ctx,
store)
class FakeImageProxy(object):
size = None
context = None
def __init__(self, store_api):
self.store_api = store_api
def test_add_location_with_restricted_sources(self):
loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}}
loc2 = {'url': 'swift+config:///xxx', 'metadata': {}}
loc3 = {'url': 'filesystem:///foo.img.tar.gz', 'metadata': {}}
# Test for insert location
image1 = TestStoreLocation.FakeImageProxy(utils.FakeStoreAPI())
locations = glance.store.StoreLocations(image1, [])
self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc1)
self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc3)
self.assertNotIn(loc1, locations)
self.assertNotIn(loc3, locations)
# Test for set_attr of _locations_proxy
image2 = TestStoreLocation.FakeImageProxy(utils.FakeStoreAPI())
locations = glance.store.StoreLocations(image2, [loc1])
self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc2)
self.assertNotIn(loc2, locations)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PublicIPAddressesOperations(object):
"""PublicIPAddressesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPAddress"
"""Gets the specified public IP address in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
parameters, # type: "_models.PublicIPAddress"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPAddress"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPAddress')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
parameters, # type: "_models.PublicIPAddress"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PublicIPAddress"]
"""Creates or updates a static or dynamic public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to the create or update public IP address operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.PublicIPAddress
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPAddress or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_12_01.models.PublicIPAddress]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
public_ip_address_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPAddress"
"""Updates public IP address tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to update public IP address tags.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPAddressListResult"]
"""Gets all the public IP addresses in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPAddressListResult"]
"""Gets all public IP addresses in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list_virtual_machine_scale_set_public_ip_addresses(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPAddressListResult"]
"""Gets information about all public IP addresses on a virtual machine scale set level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses'} # type: ignore
def list_virtual_machine_scale_set_vm_public_ip_addresses(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
ip_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PublicIPAddressListResult"]
"""Gets information about all public IP addresses in a virtual machine IP configuration in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The network interface name.
:type network_interface_name: str
:param ip_configuration_name: The IP configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'} # type: ignore
def get_virtual_machine_scale_set_public_ip_address(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
ip_configuration_name, # type: str
public_ip_address_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PublicIPAddress"
"""Get the specified public IP address in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the IP configuration.
:type ip_configuration_name: str
:param public_ip_address_name: The name of the public IP Address.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_public_ip_address.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'} # type: ignore
|
|
"""
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import datetime
import enum
import functools
import logging
import os
import pathlib
import threading
from time import monotonic
import uuid
from types import MappingProxyType
from typing import ( # noqa: F401 pylint: disable=unused-import
Optional,
Any,
Callable,
List,
TypeVar,
Dict,
Coroutine,
Set,
TYPE_CHECKING,
Awaitable,
Iterator,
)
from async_timeout import timeout
import attr
import voluptuous as vol
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_FRIENDLY_NAME,
ATTR_NOW,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
ATTR_SECONDS,
CONF_UNIT_SYSTEM_IMPERIAL,
EVENT_CALL_SERVICE,
EVENT_CORE_CONFIG_UPDATE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_SERVICE_REMOVED,
EVENT_SERVICE_REGISTERED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
EVENT_TIMER_OUT_OF_SYNC,
MATCH_ALL,
__version__,
)
from homeassistant import loader
from homeassistant.exceptions import (
HomeAssistantError,
InvalidEntityFormatError,
InvalidStateError,
Unauthorized,
ServiceNotFound,
)
from homeassistant.util.async_ import (
run_coroutine_threadsafe,
run_callback_threadsafe,
fire_coroutine_threadsafe,
)
from homeassistant import util
import homeassistant.util.dt as dt_util
from homeassistant.util import location, slugify
from homeassistant.util.unit_system import ( # NOQA
UnitSystem,
IMPERIAL_SYSTEM,
METRIC_SYSTEM,
)
# Typing imports that create a circular dependency
# pylint: disable=using-constant-test
if TYPE_CHECKING:
from homeassistant.config_entries import ConfigEntries # noqa
# pylint: disable=invalid-name
T = TypeVar("T")
CALLABLE_T = TypeVar("CALLABLE_T", bound=Callable)
CALLBACK_TYPE = Callable[[], None]
# pylint: enable=invalid-name
CORE_STORAGE_KEY = "core.config"
CORE_STORAGE_VERSION = 1
DOMAIN = "homeassistant"
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Source of core configuration
SOURCE_DISCOVERED = "discovered"
SOURCE_STORAGE = "storage"
SOURCE_YAML = "yaml"
# How long to wait till things that run on startup have to finish.
TIMEOUT_EVENT_START = 15
_LOGGER = logging.getLogger(__name__)
def split_entity_id(entity_id: str) -> List[str]:
"""Split a state entity_id into domain, object_id."""
return entity_id.split(".", 1)
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format.
Format: <domain>.<entity> where both are slugs.
"""
return "." in entity_id and slugify(entity_id) == entity_id.replace(".", "_", 1)
def valid_state(state: str) -> bool:
"""Test if a state is valid."""
return len(state) < 256
def callback(func: CALLABLE_T) -> CALLABLE_T:
"""Annotation to mark method as safe to call from within the event loop."""
setattr(func, "_hass_callback", True)
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, "_hass_callback", False) is True
@callback
def async_loop_exception_handler(_: Any, context: Dict) -> None:
"""Handle all exception inside the core loop."""
kwargs = {}
exception = context.get("exception")
if exception:
kwargs["exc_info"] = (type(exception), exception, exception.__traceback__)
_LOGGER.error( # type: ignore
"Error doing job: %s", context["message"], **kwargs
)
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
not_running = "NOT_RUNNING"
starting = "STARTING"
running = "RUNNING"
stopping = "STOPPING"
def __str__(self) -> str:
"""Return the event."""
return self.value # type: ignore
class HomeAssistant:
"""Root object of the Home Assistant home automation."""
def __init__(self, loop: Optional[asyncio.events.AbstractEventLoop] = None) -> None:
"""Initialize new Home Assistant object."""
self.loop: asyncio.events.AbstractEventLoop = (loop or asyncio.get_event_loop())
executor_opts = {
"max_workers": None,
"thread_name_prefix": "SyncWorker",
} # type: Dict[str, Any]
self.executor = ThreadPoolExecutor(**executor_opts)
self.loop.set_default_executor(self.executor)
self.loop.set_exception_handler(async_loop_exception_handler)
self._pending_tasks: list = []
self._track_task = True
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.states = StateMachine(self.bus, self.loop)
self.config = Config(self)
self.components = loader.Components(self)
self.helpers = loader.Helpers(self)
# This is a dictionary that any component can store any data on.
self.data: dict = {}
self.state = CoreState.not_running
self.exit_code = 0
self.config_entries: Optional[ConfigEntries] = None
# If not None, use to signal end-of-loop
self._stopped: Optional[asyncio.Event] = None
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
def start(self) -> int:
"""Start home assistant.
Note: This function is only used for testing.
For regular use, use "await hass.run()".
"""
# Register the async start
fire_coroutine_threadsafe(self.async_start(), self.loop)
# Run forever
try:
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
finally:
self.loop.close()
return self.exit_code
async def async_run(self, *, attach_signals: bool = True) -> int:
"""Home Assistant main entry point.
Start Home Assistant and block until stopped.
This method is a coroutine.
"""
if self.state != CoreState.not_running:
raise RuntimeError("HASS is already running")
# _async_stop will set this instead of stopping the loop
self._stopped = asyncio.Event()
await self.async_start()
if attach_signals:
from homeassistant.helpers.signal import async_register_signal_handling
async_register_signal_handling(self)
await self._stopped.wait()
return self.exit_code
async def async_start(self) -> None:
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
_LOGGER.info("Starting Home Assistant")
self.state = CoreState.starting
setattr(self.loop, "_thread_ident", threading.get_ident())
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
try:
# Only block for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
with timeout(TIMEOUT_EVENT_START):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Something is blocking Home Assistant from wrapping up the "
"start up phase. We're going to continue anyway. Please "
"report the following info at http://bit.ly/2ogP58T : %s",
", ".join(self.config.components),
)
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0)
if self.state != CoreState.starting:
_LOGGER.warning(
"Home Assistant startup has been interrupted. "
"Its state may be inconsistent."
)
return
self.state = CoreState.running
_async_create_timer(self)
def add_job(self, target: Callable[..., None], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def async_add_job(
self, target: Callable[..., Any], *args: Any
) -> Optional[asyncio.Future]:
"""Add a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
task = None
# Check for partials to properly determine if coroutine function
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutine(check_target):
task = self.loop.create_task(target) # type: ignore
elif is_callback(check_target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(check_target):
task = self.loop.create_task(target(*args))
else:
task = self.loop.run_in_executor( # type: ignore
None, target, *args
)
# If a task is scheduled
if self._track_task and task is not None:
self._pending_tasks.append(task)
return task
@callback
def async_create_task(self, target: Coroutine) -> asyncio.tasks.Task:
"""Create a task from within the eventloop.
This method must be run in the event loop.
target: target to call.
"""
task: asyncio.tasks.Task = self.loop.create_task(target)
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_add_executor_job(
self, target: Callable[..., T], *args: Any
) -> Awaitable[T]:
"""Add an executor job from within the event loop."""
task = self.loop.run_in_executor(None, target, *args)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_track_tasks(self) -> None:
"""Track tasks so you can wait for all tasks to be done."""
self._track_task = True
@callback
def async_stop_track_tasks(self) -> None:
"""Stop track tasks so you can't wait for all tasks to be done."""
self._track_task = False
@callback
def async_run_job(self, target: Callable[..., None], *args: Any) -> None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if not asyncio.iscoroutine(target) and is_callback(target):
target(*args)
else:
self.async_add_job(target, *args)
def block_till_done(self) -> None:
"""Block till all pending work is done."""
run_coroutine_threadsafe(self.async_block_till_done(), self.loop).result()
async def async_block_till_done(self) -> None:
"""Block till all pending work is done."""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
while self._pending_tasks:
pending = [task for task in self._pending_tasks if not task.done()]
self._pending_tasks.clear()
if pending:
await asyncio.wait(pending)
else:
await asyncio.sleep(0)
def stop(self) -> None:
"""Stop Home Assistant and shuts down all threads."""
if self.state == CoreState.not_running: # just ignore
return
fire_coroutine_threadsafe(self.async_stop(), self.loop)
async def async_stop(self, exit_code: int = 0, *, force: bool = False) -> None:
"""Stop Home Assistant and shuts down all threads.
The "force" flag commands async_stop to proceed regardless of
Home Assistan't current state. You should not set this flag
unless you're testing.
This method is a coroutine.
"""
if not force:
# Some tests require async_stop to run,
# regardless of the state of the loop.
if self.state == CoreState.not_running: # just ignore
return
if self.state == CoreState.stopping:
_LOGGER.info("async_stop called twice: ignored")
return
if self.state == CoreState.starting:
# This may not work
_LOGGER.warning("async_stop called before startup is complete")
# stage 1
self.state = CoreState.stopping
self.async_track_tasks()
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await self.async_block_till_done()
# stage 2
self.state = CoreState.not_running
self.bus.async_fire(EVENT_HOMEASSISTANT_CLOSE)
await self.async_block_till_done()
self.executor.shutdown()
self.exit_code = exit_code
if self._stopped is not None:
self._stopped.set()
else:
self.loop.stop()
@attr.s(slots=True, frozen=True)
class Context:
"""The context that triggered something."""
user_id = attr.ib(type=str, default=None)
parent_id = attr.ib(type=Optional[str], default=None)
id = attr.ib(type=str, default=attr.Factory(lambda: uuid.uuid4().hex))
def as_dict(self) -> dict:
"""Return a dictionary representation of the context."""
return {"id": self.id, "parent_id": self.parent_id, "user_id": self.user_id}
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = "LOCAL"
remote = "REMOTE"
def __str__(self) -> str:
"""Return the event."""
return self.value # type: ignore
class Event:
"""Representation of an event within the bus."""
__slots__ = ["event_type", "data", "origin", "time_fired", "context"]
def __init__(
self,
event_type: str,
data: Optional[Dict] = None,
origin: EventOrigin = EventOrigin.local,
time_fired: Optional[int] = None,
context: Optional[Context] = None,
) -> None:
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
self.context: Context = context or Context()
def as_dict(self) -> Dict:
"""Create a dict representation of this Event.
Async friendly.
"""
return {
"event_type": self.event_type,
"data": dict(self.data),
"origin": str(self.origin),
"time_fired": self.time_fired,
"context": self.context.as_dict(),
}
def __repr__(self) -> str:
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}[{}]: {}>".format(
self.event_type, str(self.origin)[0], util.repr_helper(self.data)
)
return "<Event {}[{}]>".format(self.event_type, str(self.origin)[0])
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
return ( # type: ignore
self.__class__ == other.__class__
and self.event_type == other.event_type
and self.data == other.data
and self.origin == other.origin
and self.time_fired == other.time_fired
and self.context == other.context
)
class EventBus:
"""Allow the firing of and listening for events."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new event bus."""
self._listeners: Dict[str, List[Callable]] = {}
self._hass = hass
@callback
def async_listeners(self) -> Dict[str, int]:
"""Return dictionary with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(self._listeners[key]) for key in self._listeners}
@property
def listeners(self) -> Dict[str, int]:
"""Return dictionary with events and the number of listeners."""
return run_callback_threadsafe( # type: ignore
self._hass.loop, self.async_listeners
).result()
def fire(
self,
event_type: str,
event_data: Optional[Dict] = None,
origin: EventOrigin = EventOrigin.local,
context: Optional[Context] = None,
) -> None:
"""Fire an event."""
self._hass.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, origin, context
)
@callback
def async_fire(
self,
event_type: str,
event_data: Optional[Dict] = None,
origin: EventOrigin = EventOrigin.local,
context: Optional[Context] = None,
) -> None:
"""Fire an event.
This method must be run in the event loop.
"""
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
if match_all_listeners is not None and event_type != EVENT_HOMEASSISTANT_CLOSE:
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, origin, None, context)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.debug("Bus:Handling %s", event)
if not listeners:
return
for func in listeners:
self._hass.async_add_job(func, event)
def listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
This method must be run in the event loop.
"""
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def remove_listener() -> None:
"""Remove the listener."""
self._async_remove_listener(event_type, listener)
return remove_listener
def listen_once(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen_once, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen_once(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
@callback
def onetime_listener(event: Event) -> None:
"""Remove listener from event bus and then fire listener."""
if hasattr(onetime_listener, "run"):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(onetime_listener, "run", True)
self._async_remove_listener(event_type, onetime_listener)
self._hass.async_run_job(listener, event)
return self.async_listen(event_type, onetime_listener)
@callback
def _async_remove_listener(self, event_type: str, listener: Callable) -> None:
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.warning("Unable to remove unknown listener %s", listener)
class State:
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
context: Context in which it was created
"""
__slots__ = [
"entity_id",
"state",
"attributes",
"last_changed",
"last_updated",
"context",
]
def __init__(
self,
entity_id: str,
state: Any,
attributes: Optional[Dict] = None,
last_changed: Optional[datetime.datetime] = None,
last_updated: Optional[datetime.datetime] = None,
context: Optional[Context] = None,
# Temp, because database can still store invalid entity IDs
# Remove with 1.0 or in 2020.
temp_invalid_id_bypass: Optional[bool] = False,
) -> None:
"""Initialize a new state."""
state = str(state)
if not valid_entity_id(entity_id) and not temp_invalid_id_bypass:
raise InvalidEntityFormatError(
(
"Invalid entity id encountered: {}. "
"Format should be <domain>.<object_id>"
).format(entity_id)
)
if not valid_state(state):
raise InvalidStateError(
(
"Invalid state encountered for entity id: {}. "
"State max length is 255 characters."
).format(entity_id)
)
self.entity_id = entity_id.lower()
self.state = state # type: str
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
self.context = context or Context()
@property
def domain(self) -> str:
"""Domain of this state."""
return split_entity_id(self.entity_id)[0]
@property
def object_id(self) -> str:
"""Object id of this state."""
return split_entity_id(self.entity_id)[1]
@property
def name(self) -> str:
"""Name of this state."""
return self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace(
"_", " "
)
def as_dict(self) -> Dict:
"""Return a dict representation of the State.
Async friendly.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
return {
"entity_id": self.entity_id,
"state": self.state,
"attributes": dict(self.attributes),
"last_changed": self.last_changed,
"last_updated": self.last_updated,
"context": self.context.as_dict(),
}
@classmethod
def from_dict(cls, json_dict: Dict) -> Any:
"""Initialize a state from a dict.
Async friendly.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and "entity_id" in json_dict and "state" in json_dict):
return None
last_changed = json_dict.get("last_changed")
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get("last_updated")
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
context = json_dict.get("context")
if context:
context = Context(id=context.get("id"), user_id=context.get("user_id"))
return cls(
json_dict["entity_id"],
json_dict["state"],
json_dict.get("attributes"),
last_changed,
last_updated,
context,
)
def __eq__(self, other: Any) -> bool:
"""Return the comparison of the state."""
return ( # type: ignore
self.__class__ == other.__class__
and self.entity_id == other.entity_id
and self.state == other.state
and self.attributes == other.attributes
and self.context == other.context
)
def __repr__(self) -> str:
"""Return the representation of the states."""
attrs = (
"; {}".format(util.repr_helper(self.attributes)) if self.attributes else ""
)
return "<state {}={}{} @ {}>".format(
self.entity_id,
self.state,
attrs,
dt_util.as_local(self.last_changed).isoformat(),
)
class StateMachine:
"""Helper class that tracks the state of different entities."""
def __init__(self, bus: EventBus, loop: asyncio.events.AbstractEventLoop) -> None:
"""Initialize state machine."""
self._states = {} # type: Dict[str, State]
self._bus = bus
self._loop = loop
def entity_ids(self, domain_filter: Optional[str] = None) -> List[str]:
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
return future.result() # type: ignore
@callback
def async_entity_ids(self, domain_filter: Optional[str] = None) -> List[str]:
"""List of entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states.keys())
domain_filter = domain_filter.lower()
return [
state.entity_id
for state in self._states.values()
if state.domain == domain_filter
]
def all(self) -> List[State]:
"""Create a list of all states."""
return run_callback_threadsafe( # type: ignore
self._loop, self.async_all
).result()
@callback
def async_all(self) -> List[State]:
"""Create a list of all states.
This method must be run in the event loop.
"""
return list(self._states.values())
def get(self, entity_id: str) -> Optional[State]:
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
return self._states.get(entity_id.lower())
def is_state(self, entity_id: str, state: str) -> bool:
"""Test if entity exists and is in specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj is not None and state_obj.state == state
def remove(self, entity_id: str) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return run_callback_threadsafe( # type: ignore
self._loop, self.async_remove, entity_id
).result()
@callback
def async_remove(self, entity_id: str) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
if old_state is None:
return False
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": None},
)
return True
def set(
self,
entity_id: str,
new_state: Any,
attributes: Optional[Dict] = None,
force_update: bool = False,
context: Optional[Context] = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
self.async_set,
entity_id,
new_state,
attributes,
force_update,
context,
).result()
@callback
def async_set(
self,
entity_id: str,
new_state: Any,
attributes: Optional[Dict] = None,
force_update: bool = False,
context: Optional[Context] = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
old_state = self._states.get(entity_id)
if old_state is None:
same_state = False
same_attr = False
last_changed = None
else:
same_state = old_state.state == new_state and not force_update
same_attr = old_state.attributes == MappingProxyType(attributes)
last_changed = old_state.last_changed if same_state else None
if same_state and same_attr:
return
if context is None:
context = Context()
state = State(entity_id, new_state, attributes, last_changed, None, context)
self._states[entity_id] = state
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": state},
EventOrigin.local,
context,
)
class Service:
"""Representation of a callable service."""
__slots__ = ["func", "schema", "is_callback", "is_coroutinefunction"]
def __init__(
self,
func: Callable,
schema: Optional[vol.Schema],
context: Optional[Context] = None,
) -> None:
"""Initialize a service."""
self.func = func
self.schema = schema
# Properly detect wrapped functions
while isinstance(func, functools.partial):
func = func.func
self.is_callback = is_callback(func)
self.is_coroutinefunction = asyncio.iscoroutinefunction(func)
class ServiceCall:
"""Representation of a call to a service."""
__slots__ = ["domain", "service", "data", "context"]
def __init__(
self,
domain: str,
service: str,
data: Optional[Dict] = None,
context: Optional[Context] = None,
) -> None:
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.context = context or Context()
def __repr__(self) -> str:
"""Return the representation of the service."""
if self.data:
return "<ServiceCall {}.{} (c:{}): {}>".format(
self.domain, self.service, self.context.id, util.repr_helper(self.data)
)
return "<ServiceCall {}.{} (c:{})>".format(
self.domain, self.service, self.context.id
)
class ServiceRegistry:
"""Offer the services over the eventbus."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a service registry."""
self._services = {} # type: Dict[str, Dict[str, Service]]
self._hass = hass
@property
def services(self) -> Dict[str, Dict[str, Service]]:
"""Return dictionary with per domain a list of available services."""
return run_callback_threadsafe( # type: ignore
self._hass.loop, self.async_services
).result()
@callback
def async_services(self) -> Dict[str, Dict[str, Service]]:
"""Return dictionary with per domain a list of available services.
This method must be run in the event loop.
"""
return {domain: self._services[domain].copy() for domain in self._services}
def has_service(self, domain: str, service: str) -> bool:
"""Test if specified service exists.
Async friendly.
"""
return service.lower() in self._services.get(domain.lower(), [])
def register(
self,
domain: str,
service: str,
service_func: Callable,
schema: Optional[vol.Schema] = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
"""
run_callback_threadsafe(
self._hass.loop, self.async_register, domain, service, service_func, schema
).result()
@callback
def async_register(
self,
domain: str,
service: str,
service_func: Callable,
schema: Optional[vol.Schema] = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
service_obj = Service(service_func, schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
self._hass.bus.async_fire(
EVENT_SERVICE_REGISTERED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler."""
run_callback_threadsafe(
self._hass.loop, self.async_remove, domain, service
).result()
@callback
def async_remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
if service not in self._services.get(domain, {}):
_LOGGER.warning("Unable to remove unknown service %s/%s.", domain, service)
return
self._services[domain].pop(service)
self._hass.bus.async_fire(
EVENT_SERVICE_REMOVED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def call(
self,
domain: str,
service: str,
service_data: Optional[Dict] = None,
blocking: bool = False,
context: Optional[Context] = None,
) -> Optional[bool]:
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
successfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
"""
return run_coroutine_threadsafe( # type: ignore
self.async_call(domain, service, service_data, blocking, context),
self._hass.loop,
).result()
async def async_call(
self,
domain: str,
service: str,
service_data: Optional[Dict] = None,
blocking: bool = False,
context: Optional[Context] = None,
) -> Optional[bool]:
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
successfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
domain = domain.lower()
service = service.lower()
context = context or Context()
service_data = service_data or {}
try:
handler = self._services[domain][service]
except KeyError:
raise ServiceNotFound(domain, service) from None
if handler.schema:
processed_data = handler.schema(service_data)
else:
processed_data = service_data
service_call = ServiceCall(domain, service, processed_data, context)
self._hass.bus.async_fire(
EVENT_CALL_SERVICE,
{
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
},
context=context,
)
if not blocking:
self._hass.async_create_task(self._safe_execute(handler, service_call))
return None
try:
with timeout(SERVICE_CALL_LIMIT):
await asyncio.shield(self._execute_service(handler, service_call))
return True
except asyncio.TimeoutError:
return False
async def _safe_execute(self, handler: Service, service_call: ServiceCall) -> None:
"""Execute a service and catch exceptions."""
try:
await self._execute_service(handler, service_call)
except Unauthorized:
_LOGGER.warning(
"Unauthorized service called %s/%s",
service_call.domain,
service_call.service,
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error executing service %s", service_call)
async def _execute_service(
self, handler: Service, service_call: ServiceCall
) -> None:
"""Execute a service."""
if handler.is_callback:
handler.func(service_call)
elif handler.is_coroutinefunction:
await handler.func(service_call)
else:
await self._hass.async_add_executor_job(handler.func, service_call)
class Config:
"""Configuration settings for Home Assistant."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new config object."""
self.hass = hass
self.latitude = 0 # type: float
self.longitude = 0 # type: float
self.elevation = 0 # type: int
self.location_name = "Home" # type: str
self.time_zone = dt_util.UTC # type: datetime.tzinfo
self.units = METRIC_SYSTEM # type: UnitSystem
self.config_source = "default" # type: str
# If True, pip install is skipped for requirements on startup
self.skip_pip = False # type: bool
# List of loaded components
self.components = set() # type: set
# API (HTTP) server configuration, see components.http.ApiConfig
self.api = None # type: Optional[Any]
# Directory that holds the configuration
self.config_dir = None # type: Optional[str]
# List of allowed external dirs to access
self.whitelist_external_dirs = set() # type: Set[str]
def distance(self, lat: float, lon: float) -> Optional[float]:
"""Calculate distance from Home Assistant.
Async friendly.
"""
return self.units.length(
location.distance(self.latitude, self.longitude, lat, lon), "m"
)
def path(self, *path: str) -> str:
"""Generate path to the file within the configuration directory.
Async friendly.
"""
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def is_allowed_path(self, path: str) -> bool:
"""Check if the path is valid for access from outside."""
assert path is not None
thepath = pathlib.Path(path)
try:
# The file path does not have to exist (it's parent should)
if thepath.exists():
thepath = thepath.resolve()
else:
thepath = thepath.parent.resolve()
except (FileNotFoundError, RuntimeError, PermissionError):
return False
for whitelisted_path in self.whitelist_external_dirs:
try:
thepath.relative_to(whitelisted_path)
return True
except ValueError:
pass
return False
def as_dict(self) -> Dict:
"""Create a dictionary representation of the configuration.
Async friendly.
"""
time_zone = dt_util.UTC.zone
if self.time_zone and getattr(self.time_zone, "zone"):
time_zone = getattr(self.time_zone, "zone")
return {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.as_dict(),
"location_name": self.location_name,
"time_zone": time_zone,
"components": self.components,
"config_dir": self.config_dir,
"whitelist_external_dirs": self.whitelist_external_dirs,
"version": __version__,
"config_source": self.config_source,
}
def set_time_zone(self, time_zone_str: str) -> None:
"""Help to set the time zone."""
time_zone = dt_util.get_time_zone(time_zone_str)
if time_zone:
self.time_zone = time_zone
dt_util.set_default_time_zone(time_zone)
else:
raise ValueError("Received invalid time zone {}".format(time_zone_str))
@callback
def _update(
self,
*,
source: str,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
elevation: Optional[int] = None,
unit_system: Optional[str] = None,
location_name: Optional[str] = None,
time_zone: Optional[str] = None,
) -> None:
"""Update the configuration from a dictionary."""
self.config_source = source
if latitude is not None:
self.latitude = latitude
if longitude is not None:
self.longitude = longitude
if elevation is not None:
self.elevation = elevation
if unit_system is not None:
if unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
self.units = IMPERIAL_SYSTEM
else:
self.units = METRIC_SYSTEM
if location_name is not None:
self.location_name = location_name
if time_zone is not None:
self.set_time_zone(time_zone)
async def async_update(self, **kwargs: Any) -> None:
"""Update the configuration from a dictionary."""
self._update(source=SOURCE_STORAGE, **kwargs)
await self.async_store()
self.hass.bus.async_fire(EVENT_CORE_CONFIG_UPDATE, kwargs)
async def async_load(self) -> None:
"""Load [homeassistant] core config."""
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
data = await store.async_load()
if not data:
return
self._update(source=SOURCE_STORAGE, **data)
async def async_store(self) -> None:
"""Store [homeassistant] core config."""
time_zone = dt_util.UTC.zone
if self.time_zone and getattr(self.time_zone, "zone"):
time_zone = getattr(self.time_zone, "zone")
data = {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.name,
"location_name": self.location_name,
"time_zone": time_zone,
}
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
await store.async_save(data)
def _async_create_timer(hass: HomeAssistant) -> None:
"""Create a timer that will start on HOMEASSISTANT_START."""
handle = None
def schedule_tick(now: datetime.datetime) -> None:
"""Schedule a timer tick when the next second rolls around."""
nonlocal handle
slp_seconds = 1 - (now.microsecond / 10 ** 6)
target = monotonic() + slp_seconds
handle = hass.loop.call_later(slp_seconds, fire_time_event, target)
@callback
def fire_time_event(target: float) -> None:
"""Fire next time event."""
now = dt_util.utcnow()
hass.bus.async_fire(EVENT_TIME_CHANGED, {ATTR_NOW: now})
# If we are more than a second late, a tick was missed
late = monotonic() - target
if late > 1:
hass.bus.async_fire(EVENT_TIMER_OUT_OF_SYNC, {ATTR_SECONDS: late})
schedule_tick(now)
@callback
def stop_timer(_: Event) -> None:
"""Stop the timer."""
if handle is not None:
handle.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
_LOGGER.info("Timer:starting")
schedule_tick(dt_util.utcnow())
|
|
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts up an appserver and runs end-to-end tests against it.
Instead of running this script directly, use the 'server_tests' shell script,
which sets up the PYTHONPATH and other necessary environment variables.
The actual test cases reside in server_test_cases.py.
Use -k to select particular test classes or methods by a substring match:
tools/server_tests -k ConfigTests
tools/server_tests -k test_delete_and_restore
Specify -v to show the name of each test as it runs (rather than just dots).
Specify -s to see the messages printed by all tests as they run (by default,
stdout/stderr will be captured and then shown only for failing tests).
"""
from __future__ import print_function
import os
import pytest
import re
import signal
import smtpd
import subprocess
import sys
import tempfile
import threading
import time
from model import *
import remote_api
import setup_pf as setup
class ProcessRunner(threading.Thread):
"""A thread that starts a subprocess, collects its output, and stops it."""
READY_RE = re.compile('') # this output means the process is ready
ERROR_RE = re.compile('ERROR|CRITICAL') # output indicating failure
OMIT_RE = re.compile('INFO |WARNING ') # don't bother showing these lines
# this output is for appserver's port error
BIND_RE = re.compile('BindError: Unable to bind (.*):(\d+)')
debug = False # set to True to see all log messages, ignoring OMIT_RE
def __init__(self, name, args):
threading.Thread.__init__(self)
self.name = name
self.args = args
self.process = None # subprocess.Popen instance
self.ready = False # process is running and ready
self.failed = False # process emitted an error message in its output
self.output = []
def run(self):
"""Starts the subprocess and collects its output while it runs."""
self.process = subprocess.Popen(
self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
# Each subprocess needs a thread to be watching it and absorbing its
# output; otherwise it will block when its stdout pipe buffer fills.
self.start_watching_output(self.process.stdout)
self.start_watching_output(self.process.stderr)
self.process.wait()
def start_watching_output(self, output):
stdout_thread = threading.Thread(target=self.watch_output, args=(output,))
stdout_thread.setDaemon(True)
stdout_thread.start()
def watch_output(self, output):
while self.process.poll() is None:
line = output.readline()
if not line: # process finished
return
if self.READY_RE.search(line):
self.ready = True
if not self.debug and self.OMIT_RE.search(line): # omit these lines
continue
if self.ERROR_RE.search(line): # something went wrong
self.failed = True
if line.strip():
self.output.append(line.strip('\n'))
def stop(self):
"""Terminates the subprocess and returns its status code."""
if self.process: # started
if self.isAlive(): # still running
os.kill(self.process.pid, signal.SIGINT)
else:
self.failed = self.process.returncode != 0
self.clean_up()
if self.failed:
self.flush_output()
print('%s failed (status %s).\n' % (
self.name, self.process.returncode), file=sys.stderr)
else:
print('%s stopped.' % self.name, file=sys.stderr)
def flush_output(self):
"""Flushes the buffered output from this subprocess to stderr."""
self.output, lines_to_print = [], self.output
if lines_to_print:
sys.stderr.write('\n--- output from %s ---\n' % self.name)
sys.stderr.write('\n'.join(lines_to_print) + '\n\n')
def wait_until_ready(self, timeout=10):
"""Waits until the subprocess has logged that it is ready."""
fail_time = time.time() + timeout
while self.isAlive() and not self.ready and time.time() < fail_time:
for jiffy in range(10): # wait one second, aborting early if ready
if not self.ready:
time.sleep(0.1)
if not self.ready:
self.flush_output() # after each second, show output
if self.ready:
print('%s started.' % self.name, file=sys.stderr)
else:
raise RuntimeError('%s failed to start.' % self.name)
def clean_up(self):
pass
class AppServerRunner(ProcessRunner):
"""Manages a dev_appserver subprocess."""
READY_RE = re.compile('Starting module "default" running at|Running application')
OMIT_RE = re.compile(
'INFO |WARNING |DeprecationWarning: get_request_cpu_usage')
def __init__(self, port, smtp_port):
self.__datastore_file = tempfile.NamedTemporaryFile()
ProcessRunner.__init__(self, 'appserver', [
os.environ['PYTHON'],
os.path.join(os.environ['APPENGINE_DIR'], 'dev_appserver.py'),
os.environ['APP_DIR'],
'--port=%s' % port,
'--datastore_path=%s' % self.__datastore_file.name,
'--require_indexes',
'--smtp_host=localhost',
'--smtp_port=%d' % smtp_port,
# By default, if we perform a datastore write and a query in this
# order, the query may see the data before the write is applied.
# This is the behavior in the production, but it is inconvenient
# to perform server tests, because we often perform a database
# write then test if it's visible in the web page. This flag makes
# sure that the query see the data after the write is applied.
'--datastore_consistency_policy=consistent',
# We'll get notified if we're behind when we run local instances,
# and we don't want this to get in the way of automated tests (it
# will stop everything and wait for user input when it asks
# permission to check).
'--skip_sdk_update_check',
])
def flush_output(self):
"""Flushes the buffered output from this subprocess to stderr."""
self.output, original_output = [], self.output
if original_output:
original_output_text = '\n'.join(original_output)
match = self.BIND_RE.search(original_output_text, re.MULTILINE)
if match:
host = match.group(1)
port = match.group(2)
sys.stderr.write('%s failed %s port %s is already in use.\n' %
(self.name, host, port))
sys.stderr.write('Please turn down local Person Finder ' +
'server or the server test if any.\n\n')
else:
sys.stderr.write('\n--- output from %s ---\n' % self.name)
sys.stderr.write(original_output_text + '\n\n')
class MailThread(threading.Thread):
"""Runs an SMTP server and stores the incoming messages."""
messages = []
def __init__(self, port):
threading.Thread.__init__(self)
self.port = port
self.stop_requested = False
def run(self):
class MailServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
print('mail from:', mailfrom, 'to:', rcpttos, file=sys.stderr)
MailThread.messages.append(
{'from': mailfrom, 'to': rcpttos, 'data': data})
try:
server = MailServer(('localhost', self.port), None)
except Exception, e:
print('SMTP server failed: %s' % e, file=sys.stderr)
sys.exit(-1)
print('SMTP server started.', file=sys.stderr)
while not self.stop_requested:
smtpd.asyncore.loop(timeout=0.5, count=1)
print('SMTP server stopped.', file=sys.stderr)
def stop(self):
self.stop_requested = True
def wait_until_ready(self, timeout=10):
pass
def flush_output(self):
pass
class PyTestPlugin:
"""A plugin for pytest that does the setup and teardown for server tests."""
def __init__(self):
self.threads = []
def pytest_addoption(self, parser):
group = parser.getgroup(
'server_tests', 'App Engine server testing', after='general')
group.addoption('--server',
help='appserver URL (default: localhost:8081)')
group.addoption('--port', type='int', default=8081,
help='appserver port number (default: 8081)')
group.addoption('--mailport', type='int', default=8025,
help='SMTP server port number (default: 8025)')
def pytest_configure(self, config):
options = config.option
url = options.server or 'localhost:%d' % options.port
secure, host, port, path = remote_api.parse_url(url)
if host == 'localhost':
# We need to start up a clean new appserver for testing.
self.threads.append(AppServerRunner(options.port, options.mailport))
self.threads.append(MailThread(options.mailport))
for thread in self.threads:
thread.start()
for thread in self.threads:
thread.wait_until_ready()
# Connect to the datastore.
remote_api.connect(url, server_type='local')
# Reset the datastore for the first test.
reset_data()
# Give the tests access to configuration information.
config.hostport = '%s:%d' % (host, port)
config.mail_server = MailThread
def pytest_unconfigure(self, config):
for thread in self.threads:
if hasattr(thread, 'flush_output'):
thread.flush_output()
for thread in self.threads:
thread.stop()
thread.join()
def pytest_runtest_setup(self):
MailThread.messages = []
def reset_data():
"""Reset the datastore to a known state, populated with test data."""
setup.reset_datastore()
db.put([
Authorization.create(
'haiti', 'test_key', domain_write_permission='test.google.com'),
Authorization.create(
'haiti', 'domain_test_key',
domain_write_permission='mytestdomain.com'),
Authorization.create(
'haiti', 'reviewed_test_key',
domain_write_permission='test.google.com',
mark_notes_reviewed=True),
Authorization.create(
'haiti', 'not_allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=False),
Authorization.create(
'haiti', 'allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=True),
Authorization.create(
'haiti', 'other_key', domain_write_permission='other.google.com'),
Authorization.create(
'haiti', 'read_key', read_permission=True),
Authorization.create(
'haiti', 'full_read_key', full_read_permission=True),
Authorization.create(
'haiti', 'search_key', search_permission=True),
Authorization.create(
'haiti', 'subscribe_key', subscribe_permission=True),
Authorization.create(
'*', 'global_test_key',
domain_write_permission='globaltestdomain.com'),
# An API key which can be used for SMS API.
Authorization.create(
'*',
'sms_key',
search_permission=True,
domain_write_permission='*'),
])
def monkeypatch_pytest_terminal_reporter():
"""Improves the output produced by _pytest.terminal.TerminalReporter."""
import _pytest.terminal
def write_sep(self, sep, title=None, **markup):
if sep == '_':
markup['cyan'] = 1 # highlight the failed test name in cyan
self._tw.line() # put a blank line before the failure report
self._tw.sep(sep, title, **markup)
_pytest.terminal.TerminalReporter.write_sep = write_sep
if __name__ == '__main__':
monkeypatch_pytest_terminal_reporter()
# Run the tests, using sys.exit to set exit status (nonzero for failure).
sys.exit(pytest.main(plugins=[PyTestPlugin()]))
|
|
# This class is a subclass FoxySheepListener that rewrites the
# parse tree generated by ANTLR4. It "flattens" flat operators that ANTLR
# parses as left associative.
from antlr4.RuleContext import RuleContext
from antlr4.ParserRuleContext import ParserRuleContext
from antlr4.tree.Tree import TerminalNodeImpl, ParseTree
from antlr4.Token import CommonToken
from FoxySheep.generated.FoxySheepListener import *
from FoxySheep.generated.FoxySheepParser import FoxySheepParser
def addChild(parent:ParserRuleContext, child:ParseTree, i:int = None):
"""Does what RuleContext.addChild is supposed to do. The ANTLR4 Python3 target
doesn't follow the same API as the Java target. Some classes in the hierarchy
don't have addChild, while others do."""
try:
if i is None:
parent.children.append(child)
else:
parent.children.insert(i, child)
except:
parent.children = [child]
def adopt(parent:ParserRuleContext, child:ParseTree):
"""Convenience function to create a bidirectional parent-child relationship."""
addChild(parent, child)
child.parentCtx = parent
def makeNumber(parent:ParserRuleContext, n:int):
"""This node represents a virtual ParseTree node that does not come from
the parser but rather is constructed via a rewriting rule. For example,
the expression "a-b" is parsed as "Plus[a, Times[-1, b]]" in exitPlusOp(),
so a node for "-1" needs to be created even though "-1" does not appear as
a token in the token stream.
Note that makeNumber sets the NumberContext's parent but does not add
anything to parent's children."""
# The hierarchy is:
# CommonToken->TerminalNodeImpl->NumberLiteralContext
# ->NumberBaseTenContext->NumberContext->parent
digits_token = CommonToken(type=FoxySheepParser.DIGITS)
digits_token._text = str(n)
number = FoxySheepParser.NumberContext(None, FoxySheepParser.ExprContext(None, parent=parent))
number_literal = FoxySheepParser.NumberBaseTenContext(None, FoxySheepParser.NumberLiteralContext(None, parent=number))
number_literal.addTokenNode(digits_token)
addChild(number, number_literal)
return number
def flatten(ctx:ParserRuleContext):
"""Takes a ParserRuleContext of a binary operator and "flattens"
the operator if one of its operands is the same binary operator context.
This function only flattens if the operator is the same and also
keeps the operators intact."""
# If the child isn't the same construct, nothing to do.
if ctx.getChild(0).__class__ != ctx.__class__:
return
lhs = ctx.getChild(0)
op = ctx.getChild(1)
rhs = ctx.getChild(2)
lhsop = lhs.getChild(1)
# If the operator of the nested Context isn't the same, nothing to do.
# The operator is always in position 1 for infix operators. We do this
# check because some Contexts that use the same context for multiple
# operators.
if op.getSymbol().type != lhsop.getSymbol().type:
return
# Clear all children.
ctx.children.clear()
# Add all children of lhs. (Also adds the operator of the lhs.)
ctx.children.extend(lhs.children)
# Finally, add the rhs back in.
ctx.children.append(rhs)
def rewriteSpan(ctx:ParserRuleContext):
opIndex = [i for (i, c) in enumerate(ctx.children) if c.getText() == ';;']
spanExpressions = []
opIndexSize = len(opIndex)
childrenSize = len(ctx.children)
# We emulate a for-loop with a while-loop. We can't use python's for
# because we need to change the index variables in the loop body.
i = 0
nextOp = 0
while True:
if nextOp + 1 < opIndexSize \
and opIndex[nextOp + 1] + 1 < childrenSize \
and isinstance(ctx.children[opIndex[nextOp + 1] + 1], FoxySheepParser.ExprContext):
# There is a next ";;"
# and there is a node after the next ";;"
# and there is a second ";;" followed by an expr.
i = opIndex[nextOp + 1] + 1
spanExpressions.append(i)
# We want nextOp to end at the last ";;" of the current expression.
nextOp += 1
else:
# There is no second ";;" belonging to this expression.
if opIndex[nextOp] + 1 < childrenSize \
and isinstance(ctx.children[opIndex[nextOp + 1] + 1], FoxySheepParser.ExprContext):
# There is a node after ";;"
# and this span expression ends in an expr.
i = opIndex[nextOp] + 1
spanExpressions.append(i)
else:
# This span expression ends in the current ";;".
i = opIndex[nextOp]
spanExpressions.append(i)
# Check for end of for-loop
if i >= childrenSize or nextOp >= opIndexSize:
break
# Set up the next loop.
i += 1
nextOp +=1
# At this point spanExpressions holds the index of the last child of each
# span expression. It might be that after all of this there is nothing to do.
if len(spanExpressions) == 1:
return
# Otherwise there is more than one span expression, and we need to rewrite
# the tree replacing the Span?Context this method was invoked on with a
# TimesContext.
timesctx = FoxySheepParser.TimesContext(None, ctx)
timesctx.children = [] # Clear children
# Add each span expression as a child to timesctx.
j = 0
for i, spanExp in enumerate(spanExpressions):
# i is the index of the current span expression in spanExpressions,
# and j is the index to the beginning of the new span expression's
# children in ctx.children. We make new SpanAContext objects for each
# span expression.
span = FoxySheepParser.SpanAContext(None, ctx)
adopt(timesctx, span)
span.children = ctx.children[j:spanExpressions[i]]
# update j to be the beginning of the next expression.
j = spanExpressions[i] + 1
# Finally, detach the span this method was invoked on from its parent and
# replace with the TimesContext.
parentsChildren = ctx.parentCtx.children
ctxIndex = parentsChildren.index(ctx)
parentsChildren[ctxIndex] = timesctx
ctx.parentCtx = timesctx
class PostParser(FoxySheepListener):
def exitComparison(self, ctx:FoxySheepParser.ComparisonContext):
"""Inequality[]
This function flattens and keeps the operators intact. It differs from
flatten() in that we flatten if the class is the same but don't check
if the operator is the same."""
# If the child isn't the same construct, nothing to do.
if ctx.getChild(0).__class__ != ctx.__class__:
return
lhs = ctx.getChild(0)
op = ctx.getChild(1)
rhs = ctx.getChild(2)
lhsop = lhs.getChild(1)
# This is where we differ from flatten(). We don't do the following
# check.
# if op.getSymbol().getType() != lhsop.getSymbol().getType():
# return
# Clear all children.
ctx.children.clear()
# Add all children of lhs. (Also adds the operator of the lhs.)
ctx.children.extend(lhs.children)
# Keep the operator.
ctx.children.append(op)
# Finally, add the rhs back in.
ctx.children.append(rhs)
def exitCompoundExpression(self, ctx:FoxySheepParser.CompoundExpressionContext):
"""Composition[expr1,expr2] e@*e@*e.
ANTLR4 parses this rule as right associative for some reason, so
we cannot use flatten(). The code is actually much simpler than
flatten because of the right associativity."""
childCount = ctx.getChildCount()
# If there is no RHS, nothing to do.
if childCount < 3:
return
rhs = ctx.getChild(childCount-1)
# If the RHS child isn't the same construct, nothing to do.
if rhs.__class__ != ctx.__class__:
return
# Remove RHS child.
ctx.removeLastChild()
# Add all children of rhs. (Also adds the operator of the rhs.)
ctx.children.extend(rhs.children)
def exitComposition(self, ctx:FoxySheepParser.CompositionContext):
flatten(ctx)
def exitRightComposition(self, ctx:FoxySheepParser.RightCompositionContext):
flatten(ctx)
def exitStringJoin(self, ctx:FoxySheepParser.StringJoinContext):
flatten(ctx)
def exitSmallCircle(self, ctx:FoxySheepParser.SmallCircleContext):
flatten(ctx)
def exitSpan(self, ctx:FoxySheepParser.ExprContext):
"""Span[expr1,expr2,expr3] e;;e;;e
Parsing Span nodes is a complete mess."""
# The only difference between the code for exitSpanA and exitSpanB is
# the index of the rhs.
rhsIndex = 1
if isinstance(ctx, FoxySheepParser.SpanBContext):
rhsIndex = 0
# If there is no RHS expr, nothing to do.
if len(ctx.expr()) == rhsIndex:
return
# Get the RHS expr.
rhs = ctx.expr(rhsIndex)
# If the RHS child isn't a SpanA, nothing to do.
if not isinstance(rhs, FoxySheepParser.SpanAContext):
return
# Remove the last expr
ctx.removeLastChild()
# Replace it with its children
ctx.children.extend(rhs.children)
# If this is the topmost Span context, rewrite the tree.
if not (isinstance(ctx.parentCtx, FoxySheepParser.SpanAContext)
or isinstance(ctx.parentCtx, FoxySheepParser.SpanBContext)):
rewriteSpan(ctx)
def exitSpanA(self, ctx:FoxySheepParser.SpanAContext):
"""Span[expr1,expr2,expr3] e;;e;;e
Parsing Span nodes is a complete mess."""
self.exitSpan(ctx)
def exitSpanB(self, ctx:FoxySheepParser.SpanBContext):
"""Span[expr1,expr2,expr3] e;;e;;e
Parsing Span nodes is a complete mess."""
self.exitSpan(ctx)
def exitCircleDot(self, ctx:FoxySheepParser.CircleDotContext):
flatten(ctx)
def exitNonCommutativeMultiply(self, ctx:FoxySheepParser.NonCommutativeMultiplyContext):
flatten(ctx)
def exitCross(self, ctx:FoxySheepParser.CrossContext):
flatten(ctx)
def exitDot(self, ctx:FoxySheepParser.DotContext):
flatten(ctx)
def exitDiamond(self, ctx:FoxySheepParser.DiamondContext):
flatten(ctx)
def exitWedge(self, ctx:FoxySheepParser.WedgeContext):
flatten(ctx)
def exitVee(self, ctx:FoxySheepParser.VeeContext):
flatten(ctx)
def exitCircleTimes(self, ctx:FoxySheepParser.CircleTimesContext):
flatten(ctx)
def exitCenterDot(self, ctx:FoxySheepParser.CenterDotContext):
flatten(ctx)
def exitTimes(self, ctx:FoxySheepParser.TimesContext):
"""Times[expr1,expr2]
We need to flatten over both Times and implicit Times. So
flatten() isn't going to cut it because sometimes there is
no operator."""
# If the child isn't the same construct, nothing to do.
if not isinstance(ctx.getChild(0), FoxySheepParser.TimesContext):
return
lhs = ctx.expr(0)
rhs = ctx.expr(1)
# Replace children with all children of lhs. (Also adds the operator of the lhs.)
ctx.children = lhs.children
# Add the rhs back in.
ctx.children.append(rhs)
def exitStar(self, ctx:FoxySheepParser.StarContext):
flatten(ctx)
def exitVerticalTilde(self, ctx:FoxySheepParser.VerticalTildeContext):
flatten(ctx)
def exitCoproduct(self, ctx:FoxySheepParser.CoproductContext):
flatten(ctx)
def exitCap(self, ctx:FoxySheepParser.CapContext):
flatten(ctx)
def exitCup(self, ctx:FoxySheepParser.CupContext):
flatten(ctx)
def exitCirclePlus(self, ctx:FoxySheepParser.CirclePlusContext):
flatten(ctx)
def exitPlusOp(self, ctx:FoxySheepParser.PlusOpContext):
"""PlusOp[expr1,expr2]
We have to treat PlusOp special, because we have to keep the
operators intact, and only plus and minus (not PlusMinus or
MinusPlus) are flat. The situation is complicated by the fact
that Mathematica parses "a-b" as "Plus[a, Times[-1, b]]". We
Rewrite the parse tree, inserting the Times context and
changing BINARYMINUS to BINARYPLUS."""
# If the op isn't Plus or Minus, nothing to do.
if ctx.BINARYMINUS() is None and ctx.BINARYPLUS() is None:
return
# Since ANTLR4 parses this operator as left associative, we only
# need to check the left hand side expr.
rhs = ctx.getChild(2)
# If the operator of the PlusOp is BINARYMINUS, we rewrite the tree as
# "Plus[lhs, Times[-1, rhs]]". Note that if rhs is TIMES, we have to
# keep that TIMES flat.
if ctx.BINARYMINUS() is not None:
# Construct Times, or obtain it from the rhs.
times = None
if isinstance(rhs, FoxySheepParser.TimesContext):
times = rhs
else:
# If rhs is already a times, keep it flat.
times = FoxySheepParser.TimesContext(None, FoxySheepParser.ExprContext(None))
ctx.children.remove(rhs)
adopt(ctx, times)
adopt(times, rhs)
# Add "-1" as the first child of Times.
addChild(times, makeNumber(times, -1), 0)
# Finally, we have to change operator to BINARYPLUS.
plustoken = CommonToken(type=FoxySheepParser.BINARYPLUS)
plustoken.text = '+'
plus = TerminalNodeImpl(plustoken)
# Replace minus token with plus.
ctx.children[1] = plus
plus.parentCtx = ctx
# Flatten
flatten(ctx)
def exitIntersection(self, ctx:FoxySheepParser.IntersectionContext):
flatten(ctx)
def exitUnion(self, ctx:FoxySheepParser.UnionContext):
flatten(ctx)
def exitVerticalBar(self, ctx:FoxySheepParser.VerticalBarContext):
flatten(ctx)
def exitSame(self, ctx:FoxySheepParser.SameContext):
flatten(ctx)
def exitSetContainment(self, ctx:FoxySheepParser.SetContainmentContext):
flatten(ctx)
def exitAndOr(self, ctx:FoxySheepParser.ExprContext, opText:str):
"""Unifies the code for flattening And/Nand and Or/Nor."""
# If the child isn't the same construct, nothing to do.
if ctx.getChild(0).__class__ != ctx.__class__:
return
lhs = ctx.getChild(0)
op = ctx.getChild(1)
rhs = ctx.getChild(2)
lhsop = lhs.getChild(1).getText()
# Here's the part that's different from flatten().
# If childOp is an Nand or parentOp is a Nand, then we need child==parent.
if (lhsop == opText or op.getText() == opText) and op.getText() != lhsop:
return
# Clear all children.
ctx.children.clear()
# Add all children of lhs. (Also adds the operator of the lhs.)
ctx.children.extend(lhs.children)
# Finally, add the rhs back in.
ctx.children.append(rhs)
def exitAnd(self, ctx:FoxySheepParser.AndContext):
self.exitAndOr(ctx, '\u22bc')
def exitOr(self, ctx:FoxySheepParser.OrContext):
self.exitAndOr(ctx, '\u22bd')
def exitXor(self, ctx:FoxySheepParser.XorContext):
flatten(ctx)
def exitEquivalent(self, ctx:FoxySheepParser.EquivalentContext):
flatten(ctx)
def exitAlternatives(self, ctx:FoxySheepParser.AlternativesContext):
flatten(ctx)
def exitStringExpression(self, ctx:FoxySheepParser.StringExpressionContext):
flatten(ctx)
def exitColon(self, ctx:FoxySheepParser.ColonContext):
flatten(ctx)
def exitVerticalSeparator(self, ctx:FoxySheepParser.VerticalSeparatorContext):
flatten(ctx)
|
|
"""
Fast image interpolation using a pyramid.
"""
from __future__ import print_function
# TODO: This allows you to use "true" div (vs floordiv) in Python2 for the / operator;
# unfortunately it appears to also replace the overloads we've carefully added for Halide.
# Figure out if it's possible to allow this to leave our Halide stuff unaffected.
#
# from __future__ import division
import time, sys
import halide as hl
from datetime import datetime
from scipy.misc import imread, imsave
import numpy as np
import os.path
int_t = hl.Int(32)
float_t = hl.Float(32)
def get_interpolate(input, levels):
"""
Build function, schedules it, and invokes jit compiler
:return: halide.hl.Func
"""
# THE ALGORITHM
downsampled = [hl.Func('downsampled%d'%i) for i in range(levels)]
downx = [hl.Func('downx%d'%l) for l in range(levels)]
interpolated = [hl.Func('interpolated%d'%i) for i in range(levels)]
# level_widths = [hl.Param(int_t,'level_widths%d'%i) for i in range(levels)]
# level_heights = [hl.Param(int_t,'level_heights%d'%i) for i in range(levels)]
upsampled = [hl.Func('upsampled%d'%l) for l in range(levels)]
upsampledx = [hl.Func('upsampledx%d'%l) for l in range(levels)]
x = hl.Var('x')
y = hl.Var('y')
c = hl.Var('c')
clamped = hl.Func('clamped')
clamped[x, y, c] = input[hl.clamp(x, 0, input.width()-1), hl.clamp(y, 0, input.height()-1), c]
# This triggers a bug in llvm 3.3 (3.2 and trunk are fine), so we
# rewrite it in a way that doesn't trigger the bug. The rewritten
# form assumes the input alpha is zero or one.
# downsampled[0][x, y, c] = hl.select(c < 3, clamped[x, y, c] * clamped[x, y, 3], clamped[x, y, 3])
downsampled[0][x,y,c] = clamped[x, y, c] * clamped[x, y, 3]
for l in range(1, levels):
prev = hl.Func()
prev = downsampled[l-1]
if l == 4:
# Also add a boundary condition at a middle pyramid level
# to prevent the footprint of the downsamplings to extend
# too far off the base image. Otherwise we look 512
# pixels off each edge.
w = input.width()/(1 << l)
h = input.height()/(1 << l)
prev = hl.lambda3D(x, y, c, prev[hl.clamp(x, 0, w), hl.clamp(y, 0, h), c])
downx[l][x,y,c] = (prev[x*2-1,y,c] + 2.0 * prev[x*2,y,c] + prev[x*2+1,y,c]) * 0.25
downsampled[l][x,y,c] = (downx[l][x,y*2-1,c] + 2.0 * downx[l][x,y*2,c] + downx[l][x,y*2+1,c]) * 0.25
interpolated[levels-1][x,y,c] = downsampled[levels-1][x,y,c]
for l in range(levels-1)[::-1]:
upsampledx[l][x,y,c] = (interpolated[l+1][x/2, y, c] + interpolated[l+1][(x+1)/2, y, c]) / 2.0
upsampled[l][x,y,c] = (upsampledx[l][x, y/2, c] + upsampledx[l][x, (y+1)/2, c]) / 2.0
interpolated[l][x,y,c] = downsampled[l][x,y,c] + (1.0 - downsampled[l][x,y,3]) * upsampled[l][x,y,c]
normalize = hl.Func('normalize')
normalize[x,y,c] = interpolated[0][x, y, c] / interpolated[0][x, y, 3]
final = hl.Func('final')
final[x,y,c] = normalize[x,y,c]
print("Finished function setup.")
# THE SCHEDULE
sched = 2
target = hl.get_target_from_environment()
if target.has_gpu_feature():
sched = 4
else:
sched = 2
if sched == 0:
print ("Flat schedule.")
for l in range(levels):
downsampled[l].compute_root()
interpolated[l].compute_root()
final.compute_root()
elif sched == 1:
print("Flat schedule with vectorization.")
for l in range(levels):
downsampled[l].compute_root().vectorize(x, 4)
interpolated[l].compute_root().vectorize(x, 4)
final.compute_root()
elif sched == 2:
print("Flat schedule with parallelization + vectorization")
xi, yi = hl.Var('xi'), hl.Var('yi')
clamped.compute_root().parallel(y).bound(c, 0, 4).reorder(c, x, y).reorder_storage(c, x, y).vectorize(c, 4)
for l in range(1, levels - 1):
if l > 0:
downsampled[l].compute_root().parallel(y).reorder(c, x, y).reorder_storage(c, x, y).vectorize(c, 4)
interpolated[l].compute_root().parallel(y).reorder(c, x, y).reorder_storage(c, x, y).vectorize(c, 4)
interpolated[l].unroll(x, 2).unroll(y, 2);
final.reorder(c, x, y).bound(c, 0, 3).parallel(y)
final.tile(x, y, xi, yi, 2, 2).unroll(xi).unroll(yi)
final.bound(x, 0, input.width())
final.bound(y, 0, input.height())
elif sched == 3:
print("Flat schedule with vectorization sometimes.")
for l in range(levels):
if l + 4 < levels:
yo, yi = hl.Var('yo'), hl.Var('yi')
downsampled[l].compute_root().vectorize(x, 4)
interpolated[l].compute_root().vectorize(x, 4)
else:
downsampled[l].compute_root()
interpolated[l].compute_root()
final.compute_root();
elif sched == 4:
print("GPU schedule.")
# Some gpus don't have enough memory to process the entire
# image, so we process the image in tiles.
yo, yi, xo, xi, ci = hl.Var('yo'), hl.Var('yi'), hl.Var('xo'), hl.Var("ci")
final.reorder(c, x, y).bound(c, 0, 3).vectorize(x, 4)
final.tile(x, y, xo, yo, xi, yi, input.width()/4, input.height()/4)
normalize.compute_at(final, xo).reorder(c, x, y).gpu_tile(x, y, xi, yi, 16, 16, GPU_Default).unroll(c)
# Start from level 1 to save memory - level zero will be computed on demand
for l in range(1, levels):
tile_size = 32 >> l;
if tile_size < 1: tile_size = 1
if tile_size > 16: tile_size = 16
downsampled[l].compute_root().gpu_tile(x, y, c, xi, yi, ci, tile_size, tile_size, 4, GPU_Default)
interpolated[l].compute_at(final, xo).gpu_tile(x, y, c, xi, yi, ci, tile_size, tile_size, 4, GPU_Default)
else:
print("No schedule with this number.")
exit(1)
# JIT compile the pipeline eagerly, so we don't interfere with timing
final.compile_jit(target)
return final
def get_input_data():
image_path = os.path.join(os.path.dirname(__file__), "../../apps/images/rgba.png")
assert os.path.exists(image_path), "Could not find %s" % image_path
rgba_data = imread(image_path)
#print("rgba_data", type(rgba_data), rgba_data.shape, rgba_data.dtype)
input_data = np.copy(rgba_data, order="F").astype(np.float32) / 255.0
# input data is in range [0, 1]
#print("input_data", type(input_data), input_data.shape, input_data.dtype)
return input_data
def main():
input = hl.ImageParam(float_t, 3, "input")
levels = 10
interpolate = get_interpolate(input, levels)
# preparing input and output memory buffers (numpy ndarrays)
input_data = get_input_data()
assert input_data.shape[2] == 4
input_image = hl.Buffer(input_data)
input.set(input_image)
input_width, input_height = input_data.shape[:2]
t0 = datetime.now()
output_image = interpolate.realize(input_width, input_height, 3)
t1 = datetime.now()
print('Interpolated in %.5f secs' % (t1-t0).total_seconds())
output_data = hl.buffer_to_ndarray(output_image)
# save results
input_path = "interpolate_input.png"
output_path = "interpolate_result.png"
imsave(input_path, input_data)
imsave(output_path, output_data)
print("\nblur realized on output image.",
"Result saved at", output_path,
"( input data copy at", input_path, ")")
print("\nEnd of game. Have a nice day!")
if __name__ == '__main__':
main()
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import numpy as np
import pandas as pd
from pymatgen.core.structure import Molecule
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.xyz import XYZ
from pymatgen.util.testing import PymatgenTest
class XYZTest(unittest.TestCase):
def setUp(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
coords2 = [[x + 10.0 for x in atom] for atom in coords]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
self.multi_mols = [Molecule(["C", "H", "H", "H", "H"], coords) for coords in [coords, coords2]]
self.xyz = XYZ(self.mol)
self.multi_xyz = XYZ(self.multi_mols)
def test_str(self):
ans = """5
H4 C1
C 0.000000 0.000000 0.000000
H 0.000000 0.000000 1.089000
H 1.026719 0.000000 -0.363000
H -0.513360 -0.889165 -0.363000
H -0.513360 0.889165 -0.363000"""
self.assertEqual(str(self.xyz), ans)
mxyz = XYZ(self.multi_mols, coord_precision=3)
mxyz_text = str(mxyz)
ans_multi = """5
H4 C1
C 0.000 0.000 0.000
H 0.000 0.000 1.089
H 1.027 0.000 -0.363
H -0.513 -0.889 -0.363
H -0.513 0.889 -0.363
5
H4 C1
C 10.000 10.000 10.000
H 10.000 10.000 11.089
H 11.027 10.000 9.637
H 9.487 9.111 9.637
H 9.487 10.889 9.637"""
self.assertEqual(mxyz_text, ans_multi)
def test_from_string(self):
ans = """5
H4 C1
C 0.000000 0.000000 0.000000
H 0.000000 0.000000 1.089000
H 1.026719 0.000000 -0.363000
H -0.513360 -0.889165 -0.363000
H -0.513360 0.889165 -0.363000"""
xyz = XYZ.from_string(ans)
mol = xyz.molecule
sp = ["C", "H", "H", "H", "H"]
for i, site in enumerate(mol):
self.assertEqual(site.species_string, sp[i])
self.assertEqual(len(site.coords), 3)
if i == 0:
self.assertTrue(all([c == 0 for c in site.coords]))
mol_str = """2
Random
C 2.39132145462 -0.700993488928 -7.22293142224e-06
C 1.16730636786 -1.38166622735 -2.77112970359e-06
"""
xyz = XYZ.from_string(mol_str)
mol = xyz.molecule
self.assertTrue(abs(mol[0].z) < 1e-5)
self.assertTrue(abs(mol[1].z) < 1e-5)
mol_str = """2
Random, Alternate Scientific Notation
C 2.39132145462 -0.700993488928 -7.222*^-06
C 1.16730636786 -1.38166622735 -2.771*^-06
"""
xyz = XYZ.from_string(mol_str)
mol = xyz.molecule
self.assertEqual(mol[0].z, -7.222e-06)
self.assertEqual(mol[1].z, -2.771e-06)
mol_str = """3
Random
C 0.000000000000E+00 2.232615992397E+01 0.000000000000E+00
C -2.383225420567E-31 1.116307996198E+01 1.933502166311E+01
C -4.440892098501D-01 -1.116307996198d+01 1.933502166311E+01
"""
xyz = XYZ.from_string(mol_str)
mol = xyz.molecule
self.assertAlmostEqual(mol[0].x, 0)
self.assertAlmostEqual(mol[1].y, 11.16307996198)
self.assertAlmostEqual(mol[2].x, -0.4440892098501)
self.assertAlmostEqual(mol[2].y, -11.16307996198)
# self.assertTrue(abs(mol[1].z) < 1e-5)
mol_str = """ 5
C32-C2-1
C 2.70450 1.16090 -0.14630 1 3 23 2
C 1.61930 1.72490 -0.79330 2 1 5 26
C 2.34210 1.02670 1.14620 3 1 8 6
C -0.68690 2.16170 -0.13790 4 5 18 7
C 0.67160 2.15830 0.14350 5 4 2 6
"""
xyz = XYZ.from_string(mol_str)
mol = xyz.molecule
self.assertAlmostEqual(mol[0].x, 2.70450)
self.assertAlmostEqual(mol[1].y, 1.72490)
self.assertAlmostEqual(mol[2].x, 2.34210)
self.assertAlmostEqual(mol[3].z, -0.13790)
def test_from_file(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "multiple_frame_xyz.xyz")
mxyz = XYZ.from_file(filepath)
self.assertEqual(len(mxyz.all_molecules), 302)
self.assertEqual(
list(mxyz.all_molecules[0].cart_coords[0]),
[0.20303525080000001, 2.8569761204000002, 0.44737723190000001],
)
self.assertEqual(
list(mxyz.all_molecules[-1].cart_coords[-1]),
[5.5355550720000002, 0.0282305931, -0.30993102189999999],
)
self.assertEqual(
list(mxyz.molecule.cart_coords[-1]),
[5.5355550720000002, 0.0282305931, -0.30993102189999999],
)
def test_init_from_structure(self):
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")
poscar = Poscar.from_file(filepath)
struct = poscar.structure
xyz = XYZ(struct)
ans = """24
Fe4 P4 O16
Fe 2.277347 4.550379 2.260125
Fe 2.928536 1.516793 4.639870
Fe 7.483231 4.550379 0.119620
Fe 8.134420 1.516793 2.499364
P 0.985089 1.516793 1.990624
P 4.220794 4.550379 4.370369
P 6.190973 1.516793 0.389120
P 9.426677 4.550379 2.768865
O 0.451582 4.550379 3.365614
O 1.006219 1.516793 3.528306
O 1.725331 0.279529 1.358282
O 1.725331 2.754057 1.358282
O 3.480552 3.313115 3.738027
O 3.480552 5.787643 3.738027
O 4.199665 4.550379 1.148562
O 4.754301 1.516793 0.985870
O 5.657466 4.550379 3.773620
O 6.212102 1.516793 3.610928
O 6.931215 0.279529 1.021463
O 6.931215 2.754057 1.021463
O 8.686436 3.313115 3.401208
O 8.686436 5.787643 3.401208
O 9.405548 4.550379 1.231183
O 9.960184 1.516793 1.393875"""
self.assertEqual(str(xyz), ans)
def test_as_dataframe(self):
coords = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
]
test_df = pd.DataFrame(coords, columns=["x", "y", "z"])
test_df.insert(0, "atom", ["C", "H", "H", "H", "H"])
test_df.index += 1
coords2 = [
[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, 0.363000],
[0.513360, 0.889165, 0.363000],
[0.513360, 0.889165, 0.363000],
]
test_df2 = pd.DataFrame(coords2, columns=["x", "y", "z"])
test_df2.insert(0, "atom", ["C", "H", "H", "H", "H"])
test_df2.index += 1
mol_df = self.xyz.as_dataframe()
# body tests
pd.testing.assert_frame_equal(mol_df, test_df)
# index tests
np.testing.assert_array_equal(mol_df.columns, test_df.columns)
np.testing.assert_array_equal(mol_df.index, test_df.index)
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
import json
import asyncio
import aiohttp
import datetime
import time
class Exchanges():
def __init__(self):
header = {
'User-Agent': 'curl/7.35.0',
'Accept': '*/*'}
self.session = aiohttp.ClientSession(headers=header)
self.order_types = ["bids", "asks"]
@asyncio.coroutine
def orderbook_aex(self, quote="btc", base="bts"):
try:
url = "http://api.aex.com/depth.php"
params = {'c': base, 'mk_type': quote}
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except:
print("Error fetching book from aex!")
@asyncio.coroutine
def orderbook_bter(self, quote="cny", base="bts"):
try:
url = "http://data.bter.com/api/1/depth/%s_%s" % (base, quote)
response = yield from asyncio.wait_for(self.session.get(
url), 120)
result = yield from response.json()
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except:
print("Error fetching book from bter!")
@asyncio.coroutine
def orderbook_yunbi(self, quote="cny", base="bts"):
try:
url = "https://yunbi.com/api/v2/depth.json"
params = {'market': base+quote, 'limit': 20}
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
result = yield from response.json()
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
time = int(result["timestamp"])
return {
"bids": order_book_bid, "asks": order_book_ask, "time": time}
except:
print("Error fetching book from yunbi!")
@asyncio.coroutine
def orderbook_btsbots(self, quote="CNY", base="BTS"):
try:
url = "https://btsbots.com/api/order?max_results=100&where="
# url = "http://localhost:5000/api/order?max_results=30&where="
params = "a_s==%s;a_b==%s" % (base, quote)
response = yield from asyncio.wait_for(self.session.get(
url+params), 120)
result = yield from response.json()
order_book_ask = []
for _o in result["_items"]:
order_book_ask.append([_o['p'], _o['b_s']])
params = "a_s==%s;a_b==%s" % (quote, base)
response = yield from asyncio.wait_for(self.session.get(
url+params), 120)
result = yield from response.json()
order_book_bid = []
for _o in result["_items"]:
order_book_bid.append([1/_o['p'], _o['b_b']])
return {
"bids": order_book_bid, "asks": order_book_ask}
except:
print("Error fetching book from btsbots!")
@asyncio.coroutine
def orderbook_poloniex(self, quote="btc", base="bts"):
try:
quote = quote.upper()
base = base.upper()
url = "http://poloniex.com/public"
params = {
"command": "returnOrderBook",
"currencyPair": "%s_%s" % (quote, base),
"depth": 150
}
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
result = yield from response.json()
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except Exception as e:
print("Error fetching book from poloniex!")
@asyncio.coroutine
def orderbook_bittrex(self, quote="btc", base="bts"):
try:
quote = quote.upper()
base = base.upper()
url = "https://bittrex.com/api/v1.1/public/getorderbook"
params = {
"type": "both",
"market": "%s-%s" % (quote, base)
}
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
result = yield from response.json()
result = result['result']
order_book_ask = []
order_book_bid = []
for order in result['buy']:
order_book_bid.append([float(order['Rate']), float(order['Quantity'])])
for order in result['sell']:
order_book_ask.append([float(order['Rate']), float(order['Quantity'])])
order_book_ask = sorted(order_book_ask)
order_book_bid = sorted(order_book_bid, reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except Exception as e:
print("Error fetching book from bittrex!")
@asyncio.coroutine
def orderbook_zb(self, quote="btc", base="bts"):
try:
quote = quote.lower()
base = base.lower()
url = "http://api.zb.com/data/v1/depth"
params = {
"market": "%s_%s" % (base, quote),
"size": 50
}
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except Exception as e:
print("Error fetching book from zb!")
print(e)
@asyncio.coroutine
def orderbook_lbank(self, quote="btc", base="bts"):
try:
quote = quote.lower()
base = base.lower()
url = "https://api.lbank.info/v1/depth.do"
params = {
"symbol": "%s_%s" % (base, quote),
"size": 60
}
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except Exception as e:
print("Error fetching book from lbank!")
print(e)
@asyncio.coroutine
def orderbook_binance(self, quote="btc", base="bts"):
try:
quote = quote.upper()
base = base.upper()
url = "https://www.binance.com/api/v1/depth"
params = {
"symbol": "%s%s" % (base, quote)
}
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
for order_type in self.order_types:
for idx, val in enumerate(result[order_type]):
result[order_type][idx] = [float(val[0]), float(val[1])]
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except Exception as e:
print("Error fetching book from binance!")
print(e)
@asyncio.coroutine
def orderbook_jubi(self, quote="cny", base="bts"):
try:
quote = quote.lower()
base = base.lower()
url = "https://www.jubi.com/api/v1/depth"
params = {
"coin": "%s" % (base)
}
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except Exception as e:
print("Error fetching book from jubi!")
print(e)
@asyncio.coroutine
def orderbook_19800(self, quote="cny", base="bts"):
try:
quote = quote.lower()
base = base.lower()
url = "https://www.19800.com/api/v1/depth"
params = {
"market": "%s_%s" % (quote, base)
}
response = yield from asyncio.wait_for(self.session.get(
url, params=params), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
result = result['data']
order_book_ask = []
order_book_bid = []
for order in result['bids']:
order_book_bid.append([float(order['Price']), float(order['Volume'])])
for order in result['asks']:
order_book_ask.append([float(order['Price']), float(order['Volume'])])
order_book_ask = sorted(order_book_ask)
order_book_bid = sorted(order_book_bid, reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except Exception as e:
print("Error fetching book from 19800!")
print(e)
@asyncio.coroutine
def ticker_btc38(self, quote="cny", base="bts"):
try:
url = "http://api.btc38.com/v1/ticker.php?c=%s&mk_type=%s" % (
base, quote)
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
_ticker = {}
_ticker["last"] = result['ticker']["last"]
_ticker["vol"] = result['ticker']["vol"]
_ticker["buy"] = result['ticker']["buy"]
_ticker["sell"] = result['ticker']["sell"]
_ticker["low"] = result['ticker']["low"]
_ticker["high"] = result['ticker']["high"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["name"] = "btc38"
return _ticker
except Exception as e:
print("Error fetching ticker from btc38!")
print(e)
@asyncio.coroutine
def ticker_poloniex(self, quote="USDT", base="BTC"):
try:
url = "https://poloniex.com/public?command=returnTicker"
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(
response.decode("utf-8-sig"))["%s_%s" % (quote, base)]
_ticker = {}
_ticker["last"] = result["last"]
_ticker["vol"] = result["baseVolume"]
_ticker["buy"] = result["highestBid"]
_ticker["sell"] = result["lowestAsk"]
_ticker["low"] = result["low24hr"]
_ticker["high"] = result["high24hr"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["name"] = "poloniex"
return _ticker
except Exception as e:
print("Error fetching ticker from poloniex!")
print(e)
@asyncio.coroutine
def ticker_btcchina(self, quote="cny", base="btc"):
try:
url = "https://data.btcchina.com/data/ticker?market=%s%s" % (
base, quote)
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
_ticker = {}
_ticker["last"] = result['ticker']["last"]
_ticker["vol"] = result['ticker']["vol"]
_ticker["buy"] = result['ticker']["buy"]
_ticker["sell"] = result['ticker']["sell"]
_ticker["low"] = result['ticker']["low"]
_ticker["high"] = result['ticker']["high"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["time"] = int(result['ticker']["date"])
_ticker["name"] = "btcchina"
return _ticker
except Exception as e:
print("Error fetching ticker from btcchina!")
print(e)
@asyncio.coroutine
def ticker_huobi(self, base="btc"):
try:
url = "http://api.huobi.com/staticmarket/ticker_%s_json.js" % base
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
_ticker = {}
_ticker["last"] = result['ticker']["last"]
_ticker["vol"] = result['ticker']["vol"]
_ticker["buy"] = result['ticker']["buy"]
_ticker["sell"] = result['ticker']["sell"]
_ticker["low"] = result['ticker']["low"]
_ticker["high"] = result['ticker']["high"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["time"] = int(result['time'])
_ticker["name"] = "huobi"
return _ticker
except Exception as e:
print("Error fetching ticker from huobi!")
print(e)
@asyncio.coroutine
def ticker_okcoin_cn(self, quote="cny", base="btc"):
try:
url = "https://www.okcoin.cn/api/ticker.do?symbol=%s_%s" % (
base, quote)
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
_ticker = {}
_ticker["last"] = result['ticker']["last"]
_ticker["vol"] = result['ticker']["vol"]
_ticker["buy"] = result['ticker']["buy"]
_ticker["sell"] = result['ticker']["sell"]
_ticker["low"] = result['ticker']["low"]
_ticker["high"] = result['ticker']["high"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["time"] = int(result['date'])
_ticker["name"] = "okcoin.cn"
return _ticker
except Exception as e:
print("Error fetching ticker from okcoin cn!")
print(e)
@asyncio.coroutine
def ticker_okcoin_com(self, quote="usd", base="btc"):
try:
url = "https://www.okcoin.com/api/v1/ticker.do?symbol=%s_%s" % (
base, quote)
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
_ticker = {}
_ticker["last"] = result['ticker']["last"]
_ticker["vol"] = result['ticker']["vol"]
_ticker["buy"] = result['ticker']["buy"]
_ticker["sell"] = result['ticker']["sell"]
_ticker["low"] = result['ticker']["low"]
_ticker["high"] = result['ticker']["high"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["time"] = int(result['date'])
_ticker['name'] = 'okcoin.com'
return _ticker
except Exception as e:
print("Error fetching ticker from okcoin com!")
print(e)
@asyncio.coroutine
def ticker_gdax(self, quote="usd", base="btc"):
try:
url = "https://api.gdax.com/products/%s-%s/ticker" % (
base.upper(), quote.upper())
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
_ticker = {}
_ticker["last"] = result["price"]
_ticker["vol"] = result["volume"]
_ticker["buy"] = result["bid"]
_ticker["sell"] = result["ask"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["low"] = None
_ticker["high"] = None
_ticker["time"] = int(
datetime.datetime.strptime(
result["time"][:19]+"+0000", "%Y-%m-%dT%H:%M:%S%z").timestamp())
_ticker["name"] = "gdax"
return _ticker
except Exception as e:
print("Error fetching ticker from gdax.com!")
print(e)
@asyncio.coroutine
def ticker_bitstamp(self, quote="usd", base="btc"):
try:
url = "https://www.bitstamp.net/api/v2/ticker/%s%s" % (
base, quote)
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
_ticker = {}
_ticker["last"] = result["last"]
_ticker["vol"] = result["volume"]
_ticker["buy"] = result["bid"]
_ticker["sell"] = result["ask"]
_ticker["low"] = result["low"]
_ticker["high"] = result["high"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["time"] = int(result['timestamp'])
_ticker["name"] = "bitstamp"
return _ticker
except Exception as e:
print("Error fetching ticker from bitstamp.net!")
print(e)
@asyncio.coroutine
def ticker_btce(self, quote="usd", base="btc"):
try:
url = "https://btc-e.com/api/3/ticker/%s_%s" % (
base, quote)
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
result = result["%s_%s" % (base, quote)]
_ticker = {}
_ticker["last"] = result["last"]
_ticker["vol"] = result["vol_cur"]
_ticker["buy"] = result["buy"]
_ticker["sell"] = result["sell"]
_ticker["low"] = result["low"]
_ticker["high"] = result["high"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["time"] = int(result['updated'])
_ticker["name"] = "btce"
return _ticker
except Exception as e:
print("Error fetching ticker from btc-e.com!")
print(e)
@asyncio.coroutine
def ticker_bitflyer(self, quote="usd", base="btc"):
try:
quote = quote.upper()
base = base.upper()
url = "https://api.bitflyer.com/v1/ticker?product_code=%s_%s" % (
base, quote)
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
_ticker = {}
_ticker["last"] = result["ltp"]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["time"] = int(time.time())
_ticker["name"] = "bitflyer_%s" % quote
return _ticker
except Exception as e:
print("Error fetching ticker from bitflyer.com!")
print(e)
@asyncio.coroutine
def ticker_bitfinex(self, quote="usd", base="btc"):
try:
quote = quote.upper()
base = base.upper()
url = "https://api.bitfinex.com/v2/ticker/t%s%s" % (
base, quote)
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
_ticker = {}
_ticker["last"] = result[6]
_ticker["vol"] = result[7]
_ticker["buy"] = result[0]
_ticker["sell"] = result[2]
_ticker["low"] = result[9]
_ticker["high"] = result[8]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["time"] = int(time.time())
_ticker["name"] = "bitfinex"
return _ticker
except Exception as e:
print("Error fetching ticker from bitfinex.com!")
print(e)
@asyncio.coroutine
def ticker_kraken(self, quote="eur", base="btc"):
try:
quote = quote.upper()
base = base.upper()
url = "https://api.kraken.com/0/public/Ticker?pair=%s%s" % (
base, quote)
response = yield from asyncio.wait_for(self.session.get(url), 120)
response = yield from response.read()
result = json.loads(response.decode("utf-8-sig"))
for key in result['result']:
result = result['result'][key]
_ticker = {}
_ticker["last"] = result['c'][0]
for key in _ticker:
_ticker[key] = float(_ticker[key])
_ticker["time"] = int(time.time())
_ticker["name"] = "kraken"
return _ticker
except Exception as e:
print("Error fetching ticker from kraken.com!")
print(e)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
exchanges = Exchanges()
@asyncio.coroutine
def run_task(coro, *args):
while True:
result = yield from coro(*args)
print(result)
yield from asyncio.sleep(120)
tasks = [
# loop.create_task(run_task(exchanges.orderbook_btsbots)),
# loop.create_task(run_task(exchanges.orderbook_btsbots, "OPEN.BTC", "BTS")),
# loop.create_task(run_task(exchanges.orderbook_aex))
# loop.create_task(run_task(exchanges.orderbook_lbank, "BTC", "BTS"))
loop.create_task(run_task(exchanges.orderbook_binance))
# loop.create_task(run_task(exchanges.orderbook_19800))
# loop.create_task(run_task(exchanges.orderbook_yunbi)),
# loop.create_task(run_task(exchanges.orderbook_poloniex))
# loop.create_task(run_task(exchanges.ticker_btc38)),
# loop.create_task(run_task(exchanges.ticker_gdax)),
# loop.create_task(run_task(exchanges.ticker_btcchina)),
# loop.create_task(run_task(exchanges.ticker_huobi)),
# loop.create_task(run_task(exchanges.ticker_okcoin_cn)),
# loop.create_task(run_task(exchanges.ticker_okcoin_com))
# loop.create_task(run_task(exchanges.ticker_bitfinex)),
# loop.create_task(run_task(exchanges.ticker_bitflyer, 'jpy', 'btc')),
# loop.create_task(run_task(exchanges.ticker_bitflyer, "usd", 'btc'))
]
loop.run_until_complete(asyncio.wait(tasks))
loop.run_forever()
|
|
import optparse
import sys
import re
import string
from pyang import plugin
from pyang import statements
paths_in_module = []
leafrefs = []
key = ''
class_keywords = ["container", "list", "case", "choice", "augment"]
servicepoints = ["servicepoint", "productpoint"]
classnamecolor = " {0.113725, 0.352941, 0.670588}"
mandatoryconfig = " {0.600000, 0.152941, 0.152941}"
optionalconfig = " {0.129412, 0.501961, 0.254902}"
notconfig = " {0.549020, 0.486275, 0.133333}"
#which line for containment, omnigraffles makes some bezier, override this
containsline = " tail type: \"FilledDiamond\", head type: \"None\", line type: \"Straight\" "
leafrefline = " line type: \"Straight\", head type: \"FilledArrow\" "
def pyang_plugin_init():
plugin.register_plugin(OmniPlugin())
class OmniPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
self.multiple_modules = True
fmts['omni'] = self
def add_opts(self, optparser):
optlist = [
optparse.make_option("--omni-path",
dest="omni_tree_path",
help="Subtree to print"),
]
g = optparser.add_option_group("OmniGraffle output specific options")
g.add_options(optlist)
def setup_fmt(self, ctx):
ctx.implicit_errors = False
def emit(self, ctx, modules, fd):
if ctx.opts.omni_tree_path is not None:
path = ctx.opts.omni_tree_path.split('/')
if path[0] == '':
path = path[1:]
else:
path = None
print_omni_header(modules, fd, path, ctx)
emit_modules(modules, fd, path, ctx)
post_process(fd, ctx)
print_omni_footer(modules, fd, path, ctx)
def print_omni_header(modules, fd, path, ctx):
# Build doc name from module names
name = ''
for m in modules:
name += m.arg
name = name[:32]
fd.write("""
tell application id "com.omnigroup.OmniGraffle6"
activate
make new document with properties {name:\"%s\"}
set bounds of window 1 to {50, 50, 1200, 800}
tell first canvas of document \"%s\"
set canvasSize to {600, 600}
set name to \"YANG Model\"
set adjusts pages to true
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {32.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "leafref"}, origin: {2403.202333, 169.219094}}
make new line at end of graphics with properties {point list: {{2513.245592418806, 185.5962102698529}, {2373.745592418806, 185.3149602698529}}, draws shadow: false, head type: "FilledArrow"}
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {105.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "Schema tree, containment"}, origin: {2397.741930, 138.863190}}
make new line at end of graphics with properties {point list: {{2374.993645107464, 154.4881903780727}, {2514.493645107464, 154.4881903780727}}, draws shadow: false, tail type: "FilledDiamond"}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 14.000000}, text: {alignment: center, font: "Helvetica-Bold", text: "Legend"}, text placement: top, origin: {2366.929155, 43.937008}, vertical padding: 0}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 56.000000}, text: {{color: {0.600000, 0.152941, 0.152941}, text: "Mandatory config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Optional config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Key leaf", underlined: true}, {color: {0.129412, 0.501961, 0.254902}, text: "
"}, {color: {0.549020, 0.486275, 0.133333}, text: "Not config"}}, text placement: top, origin: {2366.929155, 57.937008}, vertical padding: 0}
assemble graphics -2 through -1 table shape { 2, 1 }
assemble graphics -5 through -1
""" %(name, name))
def post_process(fd, ctx):
for s in leafrefs:
# dont try to connect to class not given as input to pyang
if (s.strip().split(" to ")[1].split(" with ")[0]in paths_in_module):
fd.write(s)
def print_omni_footer(modules, fd, path, ctx):
fd.write("""
layout
end tell
end tell
""")
def print_module_info(module, fd, ctx):
title = module.arg
print_text(title, fd, ctx)
def emit_modules(modules, fd, path, ctx):
for module in modules:
print_module_info(module, fd, ctx)
chs = [ch for ch in module.i_children]
if path is not None and len(path) > 0:
chs = [ch for ch in chs
if ch.arg == path[0]]
path = path[1:]
# TEST
for ch in chs:
print_node(module, ch, module, fd, path, ctx, 'true')
for augment in module.search('augment'):
print_node(module, augment, module, fd, path, ctx, 'true')
def iterate_children(parent, s, module, fd, path, ctx):
if hasattr(s, 'i_children'):
for ch in s.i_children:
print_node(s, ch, module, fd, path, ctx)
def print_class_header(s, fd, ctx, root='false'):
global servicepoints
service = ""
for sub in s.substmts:
if sub.keyword[1] in servicepoints:
service = "SERVICE\n"
fd.write("make new shape at end of graphics with properties {autosizing: full, size: {187.500000, 14.000000}, text: {{alignment: center, font: \"Helvetica-Bold\", text: \"%s \"}, {alignment: center, color:%s, font: \"Helvetica-Bold\", text: \"%s \"}}, text placement: top, origin: {150.000000, 11.500000}, vertical padding: 0}\n" %(service + s.keyword, classnamecolor, s.arg))
def print_class_stuff(s, fd, ctx):
number = print_attributes(s, fd, ctx)
#print_actions(s,fd, ctx)
close_class(number, s, fd, ctx)
print_associations(s,fd, ctx)
def print_attributes(s,fd, ctx):
global key
if s.keyword == 'list':
keystring = s.search_one('key')
if keystring is not None:
key = keystring.arg.split(" ")
else:
key = ''
if hasattr(s, 'i_children'):
found_attrs = False
found_actions = False
index = False
# Search attrs
for ch in s.i_children:
index = False
if ch.keyword in ["leaf", "leaf-list"]:
if found_attrs == False:
# first attr in attr section
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{")
found_attrs = True
else:
# comma before new textitem
fd.write(", ")
if ch.keyword == "leaf-list":
str = "[]"
else:
str = ""
if ch.arg in key:
index = True
print_leaf(ch, str, index, fd, ctx)
if found_attrs:
# close attr section
fd.write("}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# Search actions
for ch in s.i_children:
if ch.keyword == ('tailf-common', 'action'):
if found_actions == False:
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{text:\"")
found_actions = True
print_action(ch, fd, ctx)
if found_actions:
fd.write("\"}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# return number of sections in class
return (found_attrs + found_actions) + 1
def close_class(number, s, fd, ctx):
fd.write("local %s\n" %fullpath(s))
fd.write("set %s to assemble ( graphics -%s through -1 ) table shape {%s, 1}\n" %(fullpath(s), str(number), str(number) ))
def print_node(parent, s, module, fd, path, ctx, root='false'):
# We have a class
if (s.keyword in class_keywords):
print_class_header(s, fd, ctx, root)
paths_in_module.append(fullpath(s))
print_class_stuff(s, fd, ctx)
# Do not try to create relationship to module
if (parent != module):
presence = s.search_one("presence")
if presence is not None:
print_aggregation(parent, s, fd, "0", "1", ctx)
else:
print_aggregation(parent, s, fd, "1", "1", ctx)
iterate_children(parent, s, module, fd, path, ctx)
def print_associations(s, fd, ctx):
# find leafrefs and identityrefs
if hasattr(s, 'i_children'):
for ch in s.i_children:
if hasattr(ch, 'i_leafref_ptr') and (ch.i_leafref_ptr is not None):
to = ch.i_leafref_ptr[0]
print_association(s, to.parent, ch, to, "leafref", fd, ctx)
def print_aggregation(parent, this, fd, lower, upper, ctx):
fd.write("connect %s to %s with properties {%s} \n" %(fullpath(parent),fullpath(this), containsline))
def print_rpc(rpc, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(rpc), rpc.arg))
def print_action(action, fd, ctx, root='false'):
fd.write("%s()\n" %action.arg)
def print_notification(notification, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(notification), notification.arg))
def print_inout(parent, s, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s-%s\' " %(fullpath(s), parent.arg, s.keyword))
def print_leaf(leaf, str, index, fd, ctx):
if leaf.i_config == True:
c = '(rw)'
color = optionalconfig
else:
c = '(ro)'
color = notconfig
m = leaf.search_one('mandatory')
if m is None or m.arg == 'false':
mand = '?'
else:
mand = ''
color = mandatoryconfig
if not index:
fd.write("{font: \"Helvetica-Oblique\", color: %s, text: \"%s%s%s %s %s\n\"}" %(color, leaf.arg, str, mand, c, get_typename(leaf)))
else:
fd.write("{font: \"Helvetica-Oblique\", color: %s, underlined: true, text: \"%s%s%s %s %s\n\"}" %(color, leaf.arg, str, mand, c, get_typename(leaf)))
def print_association(fromclass, toclass, fromleaf, toleaf, association, fd, ctx):
leafrefs.append("connect " + (fullpath(fromclass)) + " to " + fullpath(toclass) + " with properties {" + leafrefline + "}\n", )
def print_text(t, fd, ctx):
fd.write("make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {57.000000, 30.000000}, text: {size: 16, alignment: center, font: \"HelveticaNeue\", text: \"%s\"}, origin: {100, 4.500000}}\n" %t)
def get_typename(s):
t = s.search_one('type')
if t is not None:
s = t.arg
# if t.arg == 'enumeration':
# s = s + ' : {'
# for enums in t.substmts[:10]:
# s = s + enums.arg + ','
# if len(t.substmts) > 3:
# s = s + "..."
# s = s + '}'
# elif t.arg == 'leafref':
# s = s + ' : '
# p = t.search_one('path')
# if p is not None:
# s = s + p.arg
return s
def fullpath(stmt):
pathsep = "_"
path = stmt.arg
# for augment paths we need to remove initial /
if path.find("/") == 0:
path = path[1:len(path)]
else:
if stmt.keyword == 'case':
path = path + '-case'
elif stmt.keyword == 'grouping':
path = path + '-grouping'
while stmt.parent is not None:
stmt = stmt.parent
if stmt.arg is not None:
path = stmt.arg + pathsep + path
path = path.replace('-', '_')
path = path.replace(':', '_')
path = path.replace('/', '_')
return path
|
|
# file build system
#
# The purpose of this file is to load a system configuration
# in the graphic data base
#
import json
import redis
from . import farm_template_py3
if __name__ == "__main__" :
print( "constructing graph")
cf = farm_template_py3.Construct_Farm()
#
#
# Construct Systems
#
#
cf.construct_system("LaCima Operations")
#
#
# Construction Sites for LaCima
#
#
cf.construct_site( name="LaCima",address="21005 Paseo Montana Murrieta, Ca 92562")
# we are going to construct the data store here
cf.add_header_node("APPLICATION_SUPPORT")
cf.add_header_node( "UTILITY_MODULE", properties = {}, json_flag= True )
cf.add_info_node( "CIMIS_EMAIL","CIMIS_EMAIL",properties = { "imap_username" :'lacima.ranch@gmail.com',"imap_password" : 'Gr1234gfd'} , json_flag = True)
cf.end_header_node("UTILITY_MODULE")
cf.add_header_node( "MOISTURE_CONTROLLERS", properties = {}, json_flag= True )
cf.add_info_node("MOISTURE_MANUAL_UPDATE_FLAG","MANUAL_UPDATE_FLAG",properties = {},json_flag = True)
description_map = ["Bank 10A Watermark 8 inch","Bank 10A Resistive 8 inch", "Bank 10A Resistive 18 inch", "empty",
"Bank 10B Watermark 8 inch", "Bank 10B Resistive 8 inch","Bank 10B Resistive 18 inch","empty",
"Bank 10C Watermark 8 inch","Bank 10C Resistive 8 inch", "Bank 10C Resistive 18 inch", "empty",
"Bank 10D Watermark 8 inch", "Bank 10D Resistive 8 inch","Bank 10D Resistive 18 inch","empty" ]
depth_map = [8,8,18,0,8,8,18,0,8,8,18,0,8,8,18,0]
properties = {}
properties["description"] = "Moisture Sensor for Irrigation Bank10"
properties["description_map"] = description_map
properties["update_time"] = 15
properties["depth_map"] = depth_map
properties["moisture_list_store"] = "MOISTURE_1_DATA_STORE"
properties["air_temp_list_store"] = "MOISTURE_1_AIR_TEMP_LIST_STORE"
properties["roll_over_list_store"] = "MOISTURE_1_ROLL_OVER_LIST_STORE"
properties["slave_controller_address"] = 40
cf.add_info_node( "MOISTURE_CTR","moisture_1", properties = properties, json_flag= True )
cf.end_header_node("MOISTURE_CONTROLLERS")
cf.add_info_node( "CLOUD_STATUS_STORE","status_store", properties = {"queue_name":"status_store"} )
#altitude = 2400
#cf.add_eto_setup_code(access_codes = access_codes, altitude = altitude)
#cf.start_info_store()
#cf.add_eto_store()
cf.add_header_node( "ETO_SITES", properties = {"integrated_measurement":"LACIMA_INTEGRATED_ETO_ESTIMATE",
"measurement":"LACIMA_ETO_MEASUREMENTS",
"mv_threshold_number":1 } )
properties = { "api-key":"e1d03467-5c0d-4a9b-978d-7da2c32d95de" , "url":"http://et.water.ca.gov/api/data" , "longitude": -117.299459 ,"latitude":33.578156 }
properties["altitude"] = 2400
properties["measurement_tag"] = "CIMIS_SATELLITE_ETO"
properties["list_length"] = 100
properties["measurement"] = "CIMIS_SATELLITE_ETO_STORE"
properties["majority_vote_flag"] = True
cf.add_info_node( "ETO_ENTRY","ETO_CIMIS_SATELLITE",properties=properties, json_flag=True)
properties = { "api-key":"e1d03467-5c0d-4a9b-978d-7da2c32d95de" , "url":"http://et.water.ca.gov/api/data" , "station":62 }
properties["altitude"] = 2400
properties["measurement_tag"] = "CIMIS_ETO"
properties["list_length"] = 100
properties["measurement"] = "CIMIS_ETO_STORE"
properties["majority_vote_flag"] = True
cf.add_info_node( "ETO_ENTRY","ETO_CIMIS",properties=properties, json_flag=True)
properties = {"api-key":"8b165ee73a734f379a8c91460afc98a1" ,"url":"http://api.mesowest.net/v2/stations/timeseries?" , "station":"SRUC1" }
properties["altitude"] = 2400
properties["measurement_tag"] = "SRUC1_ETO"
properties["list_length"] = 100
properties["measurement"] = "SRUC1_ETO_STORE"
properties["majority_vote_flag"] = True
cf.add_info_node( "ETO_ENTRY","Santa_Rosa_RAWS",properties=properties, json_flag=True)
properties = {"api-key":"8b165ee73a734f379a8c91460afc98a1" ,"url":"http://api.mesowest.net/v2/stations/timeseries?" , "station":"SRUC1" }
properties["altitude"] = 2400
properties["measurement_tag"] = "HYBRID_SITE"
properties["list_length"] = 100
properties["measurement"] = "HYBRID_SITE_STORE"
properties["rollover"] = "moisture_1_rollover"
properties["majority_vote_flag"] = False
cf.add_info_node( "ETO_ENTRY","LaCima_Ranch",properties=properties, json_flag=True)
cf.end_header_node("ETO_SITES")
cf.add_header_node("RAIN_SOURCES",properties = {"measurement":"LACIMA_RAIN_MEASUREMENTS" } )
properties = { "api-key":"e1d03467-5c0d-4a9b-978d-7da2c32d95de" , "url":"http://et.water.ca.gov/api/data" , "station":62 }
properties["measurement_tag"] = "CIMIS_RAIN"
properties["list_length"] = 100
properties["measurement"] = "CIMIS_RAIN_STORE"
cf.add_info_node( "RAIN_ENTRY","CIMIS_RAIN",properties=properties, json_flag=True)
properties = {"api-key":"8b165ee73a734f379a8c91460afc98a1" ,"url":"http://api.mesowest.net/v2/stations/precip?" , "station":"SRUC1" }
properties["measurement_tag"] ="SRUC1_RAIN"
properties["list_length"] = 100
properties["measurement"] = "SRCU1_RAIN_STORE"
cf.add_info_node( "RAIN_ENTRY","SRUC1_RAIN",properties=properties, json_flag=True)
cf.end_header_node("RAIN_SOURCES")
cf.add_header_node("IRRIGATION_SUPPORT")
cf.add_header_node("MASTER_VALVES")
cf.add_info_node("MASTER_VALVE_CONTROLLER","satellite_1",json_flag = True,
properties = { "remote":"satellite_1","master_valve":43, "cleaning_valve":44 })
cf.end_header_node("MASTER_VALVES")
cf.add_header_node("CURRENT_MEASUREMENT")
cf.add_info_node("CURRENT_DEVICE" ,"satellite_1",properties={ "remote":"satellite_1","register":"DF2", "conversion":1.0 },
json_flag = True)
cf.end_header_node("CURRENT_MEASUREMENT")
cf.add_header_node("FLOW_METERS")
cf.add_info_node( "FLOW_METER_CONTROL","main_flow_meter",json_flag = True,
properties={ "main_flow_meter" : "True", "type":"CLICK", "remote":"satellite_1",
"io_setup" : {"latch_bit":"C201",
"read_register":"DS301", "conversion_factor":0.0224145939 } } )
cf.end_header_node("FLOW_METERS")
cf.add_header_node("IRRIGATION_DATA" )
cf.add_info_node( "IRRIGATION_DATA_ELEMENT","MASTER_VALVE",json_flag = True,
properties={ "dict":"CONTROL_VARIABLES", "key":"MASTER_VALVE_SETUP" } )
cf.add_info_node( "IRRIGATION_DATA_ELEMENT","CURRENT",json_flag = True,
properties={ "dict":"CONTROL_VARIABLES", "key":"coil_current" } )
cf.end_header_node("IRRIGATION_DATA")
cf.end_header_node("IRRIGATION_SUPPORT")
cf.end_header_node("APPLICATION_SUPPORT")
cf.add_header_node("DATA_STORE",properties={"ip":"192.168.1.84","port":6379},json_flag = True)
cf.add_header_node( "DATA_ACQUISITION")
cf.add_header_node( "MINUTE_ACQUISITION",properties= {"measurement":"MINUTE_ACQUISITION","length":5760, "routing_key":"MINUTE_ACQUISITION" } )
properties = {}
properties["units"] = "mAmps"
properties["modbus_remote"] = "satellite_1"
properties["m_tag"] = "measure_analog"
properties["parameters"] = [ "DF1",1.0]
properties["exec_tag" ] = ["transfer_controller_current"]
cf.add_info_node( "MINUTE_ELEMENT","CONTROLLER_CURRENT",properties=properties, json_flag=True)
properties = {}
properties["units"] = "mAmps"
properties["modbus_remote"] = "satellite_1"
properties["m_tag"] = "measure_analog"
properties["parameters"] = ["DF2",1.0]
properties["exec_tag"] = ["transfer_irrigation_current"]
cf.add_info_node( "MINUTE_ELEMENT","IRRIGATION_VALVE_CURRENT",properties=properties, json_flag=True)
properties = {}
properties["units"] = "GPM"
properties["modbus_remote"] = "satellite_1"
properties["parameters"] = {"latch_bit":"C201",
"read_register":"DS301", "conversion_factor":0.0224145939 }
properties["m_tag"] = "measure_counter"
properties["exec_tag"] = ["measure_flow",0.0224145939,"main_sensor"]
cf.add_info_node( "MINUTE_ELEMENT","MAIN_FLOW_METER",properties=properties, json_flag=True)
properties = {}
properties["units"] = "AMPS"
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["parameters"] = [.0224145939]
properties["exec_tag"] = ["well_controller_output"]
cf.add_info_node( "MINUTE_ELEMENT","WELL_CONTROLLER_OUTPUT",properties=properties, json_flag = True )
properties = {}
properties["units"] = "AMPS"
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["parameters"] = [.0224145939]
properties["exec_tag"] = ["well_controller_input"]
cf.add_info_node( "MINUTE_ELEMENT","WELL_CONTROLLER_INPUT", properties=properties, json_flag = True)
properties = {}
properties["units"] = "PSI"
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["parameters"] = [.0224145939]
properties["exec_tag"] = ["filter_pressure"]
cf.add_info_node( "MINUTE_ELEMENT","FILTER_PRESSURE", properties=properties, json_flag = True )
properties = {}
properties["units"] = "PSI"
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["parameters"] = [.0224145939]
properties["exec_tag"] = ["well_pressure"]
cf.add_info_node( "MINUTE_ELEMENT", "WELL_PRESSURE", properties=properties, json_flag = True )
cf.end_header_node("MINUTE_ACQUISITION") #"MINUTE_ACQUISITION"
cf.end_header_node("DATA_ACQUISITION") #DATA_ACQUISITION
cf.add_header_node("MODBUS_STATISTICS")
cf.add_header_node( "HOUR_ACQUISTION",properties= {"measurement":"HOUR_LIST_STORE","length":300 , "routing_key":"HOUR_ACQUISTION"} , json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["init_tag"] = ["clear_daily_modbus_statistics"]
properties["exec_tag"] = ["accumulate_daily_modbus_statistics"]
cf.add_info_node( "HOUR_ELEMENT","MODBUS_STATISTICS",properties=properties,json_flag=True )
cf.end_header_node("HOUR_ACQUISTION") # HOUR_ACQUISTION
cf.add_header_node( "DAILY_ACQUISTION", properties= {"measurement":"DAILY_LIST_STORE","length":300, "routing_key":"DAILY_ACQUISTION"}, json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["log_daily_modbus_statistics"]
cf.add_info_node( "DAILY_ELEMENT","daily_modbus_statistics", properties=properties,json_flag=True )
cf.end_header_node("DAILY_ACQUISTION") # Daily Acquistion
cf.end_header_node("MODBUS_STATISTICS") #MODBUS_STATISTICS
cf.add_header_node( "LINUX_DATA_ACQUISITION")
cf.add_header_node( "LINUX_HOUR_ACQUISTION",properties= {"measurement":"LINUX_HOUR_LIST_STORE","length":300 , "routing_key":"linux_hour_measurement"
} , json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["python_processes"]
cf.add_info_node( "LINUX_HOUR_ELEMENT","python_processes",properties=properties,json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["pi_temperature"]
cf.add_info_node( "LINUX_HOUR_ELEMENT","pi_temperature_hourly",properties=properties,json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["linux_disk"]
cf.add_info_node( "LINUX_HOUR_ELEMENT","linux_disk", properties=properties,json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["linux_redis"]
cf.add_info_node( "LINUX_HOUR_ELEMENT","linux_redis", properties=properties,json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["linux_memory"]
cf.add_info_node( "LINUX_HOUR_ELEMENT","linux_memory", properties=properties,json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["free_cpu"]
cf.add_info_node( "LINUX_HOUR_ELEMENT","free_cpu", properties=properties,json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["proc_mem"]
cf.add_info_node( "LINUX_HOUR_ELEMENT","proc_mem", properties=properties,json_flag=True )
cf.end_header_node( "LINUX_HOUR_ACQUISTION") # HOUR_ACQUISTION
cf.end_header_node("LINUX_DATA_ACQUISITION")
#cf.add_info_node( "MINUTE_LIST_STORE", "MINUTE_LIST_STORE",properties = { "LIST_LENGTH" :10000} , json_flag = True) # about 1 week of data
#cf.add_info_node( "HOUR_LIST_STORE", "HOUR_LIST_STORE",properties = { "LIST_LENGTH" :10000} , json_flag = True) # about 1 week of data
cf.add_header_node("RAIN_MEASUREMENTS")
cf.add_info_node("RAIN_STORE","CIMIS_RAIN_STORE",properties={"list_length":300},json_flag = True)
cf.add_info_node("RAIN_STORE","SRCU1_RAIN_STORE",properties={"list_length":300},json_flag = True)
cf.end_header_node("RAIN_MEASUREMENTS")
cf.add_info_node("INTEGRATED_RAIN_ESTIMATE","LACIMA_INTEGRATED_RAIN_ESTIMATE",properties={},json_flag = True )
cf.add_info_node("RAIN_QUEUE","QUEUES:ETO:RAIN",properties={"list_length":300},json_flag = True )
cf.add_info_node("ETO_QUEUE","QUEUES:ETO:ETO",properties={"list_length":300},json_flag = True )
cf.add_info_node("INTEGRATED_ETO_ESTIMATE","LACIMA_INTEGRATED_ETO_ESTIMATE",properties={"list_length":300},json_flag = True )
cf.add_header_node("ETO_MEASUREMENTS")
cf.add_info_node("ETO_STORE","CIMIS_SATELLITE_ETO_STORE",properties={"list_length":300},json_flag = True)
cf.add_info_node("ETO_STORE","CIMIS_ETO_STORE",properties={"list_length":300},json_flag = True)
cf.add_info_node("ETO_STORE","SRUC1_ETO_STORE",properties={"list_length":300},json_flag = True)
cf.add_info_node("ETO_STORE","HYBRID_SITE_STORE",properties={"list_length":300},json_flag = True)
cf.end_header_node("ETO_MEASUREMENTS")
cf.add_header_node("MOISTURE_SENSOR_DATA")
cf.add_header_node("moisture_1")
cf.add_info_node("MOISTURE_DATA", "moisture_1",properties={"queue_name":"moisture_1_data","list_length":300},json_flag = True)
cf.add_info_node("MOISTURE_AIR_TEMP_LIST", "moisture_1",properties={"queue_name":"moisture_1_list","list_length":24},json_flag = True)
cf.add_info_node("MOISTURE_ROLLOVER", "moisture_1",properties={"queue_name":"moisture_1_rollover","list_length":24},json_flag = True)
cf.end_header_node("moisture_1") #moisture_1
cf.end_header_node("MOISTURE_SENSOR_DATA") #MOISTURE_DATA
cf.end_header_node("DATA_STORE")
properties = {}
properties["ip"] = "192.168.1.84"
properties["remote_type"] = "UDP"
properties["port"] = 5005
properties["redis_host"] = "192.168.1.84"
properties["redis_db"] = 0
properties["redis_rpc_db"] = 5
properties["redis_rpc_key"] = "#_RPC_QUEUE_"
properties["logging_key"] = "QUEUES:MODBUS_LOGGING"
cf.add_header_node( "UDP_IO_SERVER","main_remote", properties = properties, json_flag= True )
properties = {}
properties["type"] = "rs485_modbus",
properties["interface_parameters"] = { "interface":None, "timeout":.05, "baud_rate":38400 }
properties["search_device"] = "satellite_1"
cf.add_header_node( "SERIAL_LINK","rtu_2", properties = properties, json_flag= True )
properties = {}
properties["modbus_address"] = 100
properties["type"] = "click_44"
properties["function"] = ["irrigation","flow_meter","plc_current","valve_current","switches"]
properties["parameters"] = { "address":100 , "search_register":0, "register_number":1 }
cf.add_info_node( "REMOTE_UNIT","satellite_1", properties = properties, json_flag= True )
properties = {}
properties["modbus_address"] = 125
properties["type"] = "click_22"
properties["function"] = ["irrigation"]
properties["parameters"] = { "address":125 , "search_register":0 ,"register_number":1 }
cf.add_info_node( "REMOTE_UNIT","satellite_2", properties = properties, json_flag= True )
properties = {}
properties["modbus_address"] = 170
properties["type"] = "click_22"
properties["function"] = ["irrigation"]
properties["parameters"] = { "address":170 , "search_register":0, "register_number":1 }
cf.add_info_node( "REMOTE_UNIT","satellite_3", properties =properties, json_flag= True )
properties = {}
properties["modbus_address"] = 40
properties["type"] = "PSOC_4_Moisture"
properties["function"] = ["moisture"]
properties["parameters"] = { "address":40 , "search_register":1,"register_number":10 }
cf.add_info_node( "REMOTE_UNIT","moisture_1", properties =properties, json_flag= True )
properties = {}
properties["modbus_address"] = 121
properties["type"] = "esp32_relay"
properties["function"] = ["irrigation"]
properties["parameters"] = { "address":121 , "search_register":1,"register_number":1 }
cf.add_info_node( "REMOTE_UNIT","satellite_4", properties =properties, json_flag= True )
cf.end_header_node("SERIAL_LINK")
cf.end_header_node("UDP_IO_SERVER")
cf.add_header_node("RABBITMQ_CLIENTS")
cf.add_rabbitmq_status_queue( "LaCima",vhost="LaCima",queue="status_queue",port=5671,server = 'lacimaRanch.cloudapp.net' )
cf.end_header_node("RABBITMQ_CLIENTS")
properties = {}
properties["redis"] = {"ip":"127.0.0.1","port": 6379, "db":0 }
properties["error_queue_key"] = "PROCESS:ERROR_QUEUE"
properties["web_command_key"] = "PROCESS:WEB_COMMAND_KEY"
properties["web_process_data"] = "PROCESS:WEB_PROCESS_DATA"
properties["web_display_list"] = "PROCESS:WEB_DISPLAY_LIST"
properties["command_string_list"] = []
properties["command_string_list"].append( "linux_acquisition_py3.py")
properties["command_string_list"].append( "eto_py3.py")
properties["command_string_list"].append( "modbus_server_py3.py main_remote")
properties["command_string_list"].append( "rabbit_web_access_py3.py")
properties["command_string_list"].append("rabbit_cloud_status_publish_py3.py")
properties["command_string_list"].append("utilities_py3.py")
#properties["command_string_list"].append("flask_web_py3.py")
properties["command_string_list"].append("irrigation_monitoring_py3.py")
properties["command_string_list"].append("irrigation_ctrl_startup_py3.py")
cf.add_info_node("PROCESS_CONTROL","main_processor",properties=properties,json_flag = True )
#cf.construct_controller( name="PI_1", ip = "192.168.1.82",type="PI")
#cf.end_controller()
#cf.construct_web_server( name="main_web_server",url="https://192.168.1.84" )
#cf.add_rabbitmq_command_rpc_queue("LaCima" )
#cf.add_rabbitmq_web_rpc_queue("LaCima")
#cf.add_rabbitmq_event_queue("LaCima")
#cf.add_rabbitmq_status_queue( "LaCima",vhost="LaCima",queue="status_queue",port=5671,server = 'lacimaRanch.cloudapp.net' )
#cf.add_info_node( "CIMIS_EMAIL","CIMIS_EMAIL",properties = { "imap_username" :'lacima.ranch@gmail.com',"imap_password" : 'Gr1234gfd'} , json_flag = True)
#cf.add_ntpd_server("LaCima") #cf.add_moisture_monitoring("LaCima")
#cf.irrigation_monitoring("LaCima")
#cf.add_device_monitoring("LaCima")
#cf.add_watch_dog_monitoring("LaCima")
cf.end_site()
cf.end_system()
cf.check_namespace()
cf.store_keys()
|
|
import xml.etree.ElementTree as ET
from pyposterous.error import PyposterousError
from pyposterous.models import element_map, attribute_map
class Parser(object):
"""This object is responsible for parsing the Pyposterous API data and
returning nice Python objects."""
def __init__(self, api, resource, return_conf):
self.api = api
self.resource = resource
self.return_conf = return_conf
self.output = []
# If the following is failing, either Posterous is giving us garbage
# or there are connection issues occuring. Most likely connection issues.
try:
self.xml = ET.parse(self.resource)
except:
if self.resource.getcode() == 200:
raise PyposterousError("malformed XML returned by Posterous")
raise PyposterousError("%s connection error" % self.resource.getcode())
def parse(self):
# This is to handle the twitter api calls specifically.
if 'force_primative' in self.return_conf:
self.output = {}
for x in self.xml.getroot().getchildren():
if x.tag != 'err':
self.output[x.tag.lower()] = x.text
if self.output:
return self.output
else:
# if self.output is empty, then an error occured and we'll
# just let it continue on to the code below for it to be
# caught and thrown.
self.output = []
root = self.xml.getroot()
# Some V2 api calls return the output as the root of the document.
# this should handle those cases.
if root.tag in element_map:
self.output.append(self.build_object(root))
else:
# v1 stuff.
for element in root.getchildren():
obj = self.build_object(element)
if obj:
# Okay. This is a little weird. When the Posterous API returns
# results, it sometimes returns children elements as children
# of their parent (e.g. comments as children of their post),
# and sometimes they don't do that, and they just give
# everything as a big hairy list (e.g. both the post AND
# its comments at the top level). TODO: Ask dev group about this
# To fix this problem, I'm going to append subsequent elements
# to the previous element returned if the types don't match.
# 3 posts will return a list of 3 posts, 1 post and 2 comments
# will return 1 post with a list of 2 comments as an attrib.
try:
if type(obj) == type(self.output[-1]):
self.output.append(obj)
else:
attrib = obj.__class__.__name__.lower()
existing = getattr(self.output[-1], attrib, None)
if existing and type(existing) == list:
existing.append(obj)
elif not existing:
setattr(self.output[-1], attrib, [obj,])
else:
# If this happens, then my little XML inconsistency
# hack is overwritting a legitimate value.
raise PyposterousError("Posterous API response could not be parsed.")
except IndexError:
# There was no previous element!
self.output.append(obj)
self.output = self.clean_up(self.output)
output = self.output
if len(self.output) == 1 and 'force_list' not in self.return_conf:
output = self.output[0]
if len(self.output) == 0 and 'force_list' not in self.return_conf:
output = None
return output
def build_object(self, element):
"""Accepts an element tree element and builds an object based on the
type."""
if element.tag == 'err' or element.tag == 'error':
self.build_error(element)
obj = element_map.get(element.tag)
# Some Posterous calls don't seem to properly nest the XML. If the
# returned base tag isn't one of our base types, just add it to the
# last element parsed and hope for the best.
# TODO: Post something to the Post. API discusssion group asking about
# this.
# Troublesome api calls: get_post
tag = element.tag.lower()
val = element.text
if obj is None:
if element_map.get(tag):
prop_val = self.build_object(element)
else:
pro_val = val
try:
setattr(self.output[-1], tag, self.clean_value(tag, pro_val))
except IndexError:
# There was nothing in self.output - weird.
pass
return obj
obj = obj(self.api)
# Add properties for all of element's children
for prop in element.getchildren():
prop_tag = prop.tag.lower()
# If the element doesn't have any chidlren, using the element map obj
# will hide the returned data. We don't want that. Most notably, this
# occurs when a post has a video attached to it. The thumb attribute
# is typically an Image object, but in the case of the video media
# element it's just a URL. The "and prop.getchildren()" should
# prevent Pyposterous from doing a conversion to an object for this
# anomalous data.
if element_map.get(prop_tag) and prop.getchildren():
# If the element is one of our base types, we need to create
# an object of it.
existing = getattr(obj, prop_tag, None)
if not existing:
setattr(obj, prop_tag, self.build_object(prop),)
elif type(existing) == list:
getattr(obj, prop_tag).append(self.build_object(prop))
else:
setattr(obj, prop_tag, [existing,])
getattr(obj, prop_tag).append(self.build_object(prop))
else:
# Base case - set a property called prop.tag in obj
setattr(obj, prop_tag, self.clean_value(prop_tag, prop.text))
return obj
def clean_up(self, obj):
"""Preforms some miscellaneous cleanup for attribute names that
aren't quite right."""
def clean_it(obj):
# Rename comment list to 'comments' and force it to be a list.
try:
if obj.comment:
obj.comments = obj.comment
del obj.comment
if not type(obj.comments) == list:
obj.comments = [obj.comments,]
except AttributeError:
pass
# Force media to be a list.
try:
if not type(obj.media) == list:
obj.media = [obj.media,]
except AttributeError:
pass
return obj
if type(obj) == list:
obj = [clean_it(x) for x in obj]
else:
obj = clean_it(obj)
return obj
def build_error(self, element):
"""Throws a PyposterousError based on the element specified.
"""
# This handles v1 api errors (business as usual)
if element.tag == 'err':
raise PyposterousError(element.get('msg'), element.get('code'))
# Api v2 errors are formatted differently than API v1 errors. This is
# dirty, but it'll prase them.
message = "Unknown"
code = "Unknown"
for child in element.getchildren():
if child.tag == "message":
message = child.text
if child.tag == "code":
code = child.text
raise PyposterousError("%s" % message, "%s" % code)
def clean_value(self, name, value):
for names in attribute_map:
if name in names:
return attribute_map.get(names)(value)
return value
|
|
import copy
import operator
from functools import wraps, update_wrapper
import sys
from django.utils import six
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args+moreargs), **dict(kwargs, **morekwargs))
return _curried
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
@wraps(func)
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wrapper
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, type):
res = instance.__dict__[self.func.__name__] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for type_ in reversed(resultclass.mro()):
for (k, v) in type_.__dict__.items():
# All __promise__ return the same wrapper method, but they
# also do setup, inserting the method into the dispatch
# dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), "Cannot call lazy() with both bytes and text return types."
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, method):
# Builds a wrapper around some magic method and registers that magic
# method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = method
return __wrapper__
__promise__ = classmethod(__promise__)
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
__hash__ = object.__hash__
def __mod__(self, rhs):
if self._delegate_bytes and not six.PY3:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(six.itervalues(kwargs)):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wrapper
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
def __init__(self):
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError
# introspection support:
__dir__ = new_method_proxy(dir)
# Workaround for http://bugs.python.org/issue12370
_super = super
class SimpleLazyObject(LazyObject):
"""
A lazy object initialised from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
_super(SimpleLazyObject, self).__init__()
def _setup(self):
self._wrapped = self._setupfunc()
if six.PY3:
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
else:
__str__ = new_method_proxy(str)
__unicode__ = new_method_proxy(unicode)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
else:
return copy.deepcopy(self._wrapped, memo)
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. It also appears to stop __reduce__ from being
# called. So, we define __getstate__ in a way that cooperates with the way
# that pickle interprets this class. This fails when the wrapped class is a
# builtin, but it is better than nothing.
def __getstate__(self):
if self._wrapped is empty:
self._setup()
return self._wrapped.__dict__
# Python 3.3 will call __reduce__ when pickling; these methods are needed
# to serialize and deserialize correctly. They are not called in earlier
# versions of Python.
@classmethod
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __reduce__(self):
return (self.__newobj__, (self.__class__,), self.__getstate__())
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__hash__ = new_method_proxy(hash)
__bool__ = new_method_proxy(bool) # Python 3
__nonzero__ = __bool__ # Python 2
class lazy_property(property):
"""
A property that works with subclasses by wrapping the decorated
functions of the base class.
"""
def __new__(cls, fget=None, fset=None, fdel=None, doc=None):
if fget is not None:
@wraps(fget)
def fget(instance, instance_type=None, name=fget.__name__):
return getattr(instance, name)()
if fset is not None:
@wraps(fset)
def fset(instance, value, name=fset.__name__):
return getattr(instance, name)(value)
if fdel is not None:
@wraps(fdel)
def fdel(instance, name=fdel.__name__):
return getattr(instance, name)()
return property(fget, fset, fdel, doc)
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
if sys.version_info >= (2,7,2):
from functools import total_ordering
else:
# For Python < 2.7.2. Python 2.6 does not have total_ordering, and
# total_ordering in 2.7 versions prior to 2.7.2 is buggy. See
# http://bugs.python.org/issue10042 for details. For these versions use
# code borrowed from Python 2.7.3.
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
|
|
# -*- encoding: utf-8 -*-
__author__ = 'kotaimen'
__date__ = '04/12/2016'
from troposphere import Base64, FindInMap, GetAtt, Join, Select, Sub, Split
from troposphere import ImportValue, Export
from troposphere import Condition, And, Equals, If, Not, Or
from troposphere import Template, Parameter, Ref, Tags, Output
from troposphere import AWS_ACCOUNT_ID, AWS_REGION, AWS_STACK_ID, \
AWS_STACK_NAME, AWS_NO_VALUE, AWS_URL_SUFFIX, AWS_PARTITION
from troposphere import Delete, Retain, Snapshot
from troposphere.policies import CreationPolicy, ResourceSignal, UpdatePolicy, \
AutoScalingReplacingUpdate, AutoScalingRollingUpdate
import troposphere.cloudformation as cloudformation
import troposphere.ec2 as ec2
import troposphere.iam as iam
import troposphere.rds as rds
from awacs.aws import Policy, Allow, Deny, Statement, Principal, Everybody
from awacs.aws import Condition, Bool, ArnEquals, StringEquals, IpAddress, Null
from awacs.aws import CurrentTime, EpochTime, MultiFactorAuthAge, Referer, \
SecureTransport, SourceArn, SourceIp, UserAgent
import awacs.sts
import awacs.cloudformation
import awacs.iam
import awacs.ec2
import awacs.logs
import cfnutil
#
# Template
#
t = Template()
t.add_version('2010-09-09')
t.add_description('Creates a simple amazon RDS database instance with default '
'database parameters and optional encryption, iops storage, '
'multi-az deployment and read replicas in same region.')
#
# Interface
#
parameter_groups = [
{
'Label': {'default': 'Network Configuration'},
'Parameters':
[
'VpcId',
'SubnetIds',
'SecurityGroup',
]
},
{
'Label': {'default': 'Database Basic Configuration'},
'Parameters':
[
# 'DatabaseName',
'DatabaseSnapshot',
'DatabaseClass',
'DatabaseEngine',
'DatabaseEngineVersion',
'DatabaseParameterGroupFamily',
'DatabaseMultiAz',
'DatabaseUser',
'DatabasePassword',
]
},
{
'Label': {'default': 'Database Storage Configuration'},
'Parameters':
[
'StorageSize',
'StorageType',
'StorageIops',
'StorageEncrypted',
'KmsKeyId',
]
},
{
'Label': {'default': 'Database Security Configuration'},
'Parameters':
[
'ClientLocation',
'PubliclyAccessible',
]
},
{
'Label': {'default': 'Database Replication Configuration'},
'Parameters':
[
'DatabaseReplication',
]
},
{
'Label': {'default': 'Database Monitoring Configuration'},
'Parameters':
[
'EnhancedMonitoringInterval',
'SnsTopicArn',
]
}
]
t.add_metadata(
{
'AWS::CloudFormation::Interface': {
'ParameterGroups': parameter_groups,
'ParameterLabels':
dict(cfnutil.generate_parameter_labels(parameter_groups))
}
}
)
#
# Parameters
#
param_vpcid = t.add_parameter(Parameter(
'VpcId',
Description='VpcId of an existing VPC.',
Type='AWS::EC2::VPC::Id'
))
param_subnetids = t.add_parameter(Parameter(
'SubnetIds',
Description='SubnetIds of existing subnets of the VPC',
Type='List<AWS::EC2::Subnet::Id>',
))
param_sg = t.add_parameter(Parameter(
'SecurityGroup',
Description='Database security group id, a new security group will be '
'created this is left empty.',
Type='String',
Default='',
))
# param_dbname = t.add_parameter(Parameter(
# 'DatabaseName',
# Default='MyDatabase',
# Description='Database name',
# Type='String',
# MinLength='1',
# MaxLength='64',
# AllowedPattern='[a-zA-Z][a-zA-Z0-9]*',
# ConstraintDescription=('must begin with a letter and contain only '
# 'alphanumeric characters.')
# ))
param_db_snapshot = t.add_parameter(Parameter(
'DatabaseSnapshot',
Description='ARN of a DB snapshot to restore from',
Type='String',
Default='',
))
# https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html
param_db_class = t.add_parameter(Parameter(
'DatabaseClass',
Default='db.t2.micro',
Description='Database instance class',
Type='String',
AllowedValues=cfnutil.load_mapping('mapping/rds-instance-types.json'),
))
param_db_engine = t.add_parameter(Parameter(
'DatabaseEngine',
Default='postgres',
Description='Database engine',
Type='String',
AllowedValues=['postgres',
'mysql',
'mariadb'],
))
# https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html
param_db_engine_version = t.add_parameter(Parameter(
'DatabaseEngineVersion',
Default='postgres-9.6.6',
Description='Database engine version, must be a version matches '
'specified database engine.',
Type='String',
AllowedValues=cfnutil.load_mapping('mapping/rds-versions.json'),
))
param_db_param_group_family = t.add_parameter(Parameter(
'DatabaseParameterGroupFamily',
Default='postgres9.6',
Description='Database parameter group family',
Type='String',
AllowedValues=cfnutil.load_mapping('mapping/rds-parameter-groups.json')
))
param_db_user = t.add_parameter(Parameter(
'DatabaseUser',
NoEcho=True,
Description='The database admin account username, ignored when '
'a snapshot is specified',
Type='String',
MinLength='1',
MaxLength='16',
AllowedPattern='[a-zA-Z][a-zA-Z0-9]*',
ConstraintDescription=('must begin with a letter and contain only '
'alphanumeric characters.')
))
param_db_password = t.add_parameter(Parameter(
'DatabasePassword',
NoEcho=True,
Description='The database admin account password, ignored when a snapshot '
'is specified',
Type='String',
MinLength='1',
MaxLength='41',
# AllowedPattern='[a-zA-Z0-9]*',
# ConstraintDescription='must contain only alphanumeric characters.'
))
param_db_multi_az = t.add_parameter(Parameter(
'DatabaseMultiAz',
Description='Whether use a multi-AZ Deployment',
Type='String',
Default='false',
AllowedValues=['true', 'false'],
))
param_db_storage_size = t.add_parameter(Parameter(
'StorageSize',
Default='5',
Description='The size of the database storage in GB',
Type='Number',
MinValue='5',
MaxValue='6144',
ConstraintDescription='must be between 5GB and 6TB.',
))
param_db_stroage_type = t.add_parameter(Parameter(
'StorageType',
Description='Database storage type',
Type='String',
Default='gp2',
AllowedValues=['gp2', 'io1', 'default'],
))
param_db_storage_iops = t.add_parameter(Parameter(
'StorageIops',
Description='IOPS capability of the database storage, only used when the '
'volume type is io1',
Type='Number',
Default='100',
MinValue='100',
MaxValue='30000',
ConstraintDescription='IOPS range is 100 to 30000'
))
param_db_storage_encrypted = t.add_parameter(Parameter(
'StorageEncrypted',
Description='Indicates whether the DB instance is encrypted.',
Default='false',
Type='String',
AllowedValues=['true', 'false'],
))
param_db_kms_key = t.add_parameter(Parameter(
'KmsKeyId',
Description='The ARN of the KMS master key that is used to encrypt the DB '
'instance, If you enable the StorageEncrypted property but '
'don\'t specify this property, this template uses the '
'default master key.',
Default='',
Type='String'
))
param_db_client_location = t.add_parameter(Parameter(
'ClientLocation',
Description='Lockdown database access (default can be accessed '
'from anywhere)',
Type='String',
MinLength='9',
MaxLength='18',
Default='0.0.0.0/0',
AllowedPattern='(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})',
ConstraintDescription='must be a valid CIDR range of the form x.x.x.x/x.',
))
param_db_publicly_accessible = t.add_parameter(Parameter(
'PubliclyAccessible',
Description='Whether the database endpoint is publicly accessible',
Type='String',
Default='false',
AllowedValues=['false', 'true'],
))
param_db_read_replica = t.add_parameter(Parameter(
'DatabaseReadReplicas',
Description='Number of read replicas of the master database, set to 0 '
'disables read replica, please note not all RDS engines '
'supports read replica.',
Type='String',
Default='0',
AllowedValues=['0', '1', '2', '3'],
))
param_db_enhanced_monitoring_interval = t.add_parameter(Parameter(
'EnhancedMonitoringInterval',
Description='Interval, in seconds, between points when Enhanced Monitoring '
'metrics are collected for the DB instance, set to 0 disables '
'enhanced monitoring.',
Type='String',
Default='0',
AllowedValues=['0', '1', '5', '10', '15', '30', '60']
))
param_sns_topic_arn = t.add_parameter(Parameter(
'SnsTopicArn',
Description='ARN of an SNS topic that database send event notifications '
'are sent to, set this to blank disables event notification.',
Type='String',
Default=''
))
#
# Condition
#
conditions = [
(
'CreateSecurityGroupCondition',
Equals(Ref(param_sg), '')
),
(
'PostgresCondition',
Equals(Ref(param_db_engine), 'postgres'),
),
(
'MysqlCondition',
Equals(Ref(param_db_engine), 'mysql'),
),
(
'MariadbCondition',
Equals(Ref(param_db_engine), 'mariadb'),
),
(
'OrcaleCondition',
Or(
Equals(Ref(param_db_engine), 'oracle-se1'),
Equals(Ref(param_db_engine), 'oracle-se2'),
)
),
(
'NewDatabaseCondition',
Equals(Ref(param_db_snapshot), ''),
),
(
'UseSnapshotCondition',
Not(Equals(Ref(param_db_snapshot), ''))
),
(
'IopsStorageCondition',
Equals(Ref(param_db_stroage_type), 'io1'),
),
(
'StorageEncryptedConditon',
Equals(Ref(param_db_storage_encrypted), 'true'),
),
(
'DefaultKmsCondition',
Equals(Ref(param_db_kms_key), '')
),
# (
# 'ChinaRegionCondition',
# Equals(Ref(AWS_REGION), 'cn-north-1')
# ),
(
'EnhancedMonitoringCondition',
Not(Equals(Ref(param_db_enhanced_monitoring_interval), '0')),
),
(
'DatabaseReadReplicaCondition1',
Or(
Equals(Ref(param_db_read_replica), '1'),
Equals(Ref(param_db_read_replica), '2'),
Equals(Ref(param_db_read_replica), '3'),
)
),
(
'DatabaseReadReplicaCondition2',
Or(
Equals(Ref(param_db_read_replica), '2'),
Equals(Ref(param_db_read_replica), '3'),
)
),
(
'DatabaseReadReplicaCondition3',
Equals(Ref(param_db_read_replica), '3')
),
(
'EventNotificationCondition',
Not(Equals(Ref(param_sns_topic_arn), ''))
)
]
for args in conditions:
t.add_condition(*args)
#
# Resources
#
rds_sg = t.add_resource(ec2.SecurityGroup(
'RdsSecurityGroup',
Condition='CreateSecurityGroupCondition',
VpcId=Ref(param_vpcid),
GroupDescription='Enable local postgres access',
SecurityGroupIngress=[
If('PostgresCondition',
ec2.SecurityGroupRule(
IpProtocol='tcp',
FromPort='5432',
ToPort='5432',
CidrIp=Ref(param_db_client_location),
),
Ref(AWS_NO_VALUE)),
If('MysqlCondition',
ec2.SecurityGroupRule(
IpProtocol='tcp',
FromPort='3306',
ToPort='3306',
CidrIp=Ref(param_db_client_location),
),
Ref(AWS_NO_VALUE)),
If('MariadbCondition',
ec2.SecurityGroupRule(
IpProtocol='tcp',
FromPort='3306',
ToPort='3306',
CidrIp=Ref(param_db_client_location),
),
Ref(AWS_NO_VALUE)),
If('OrcaleCondition',
ec2.SecurityGroupRule(
IpProtocol='tcp',
FromPort='1521',
ToPort='1521',
CidrIp=Ref(param_db_client_location),
),
Ref(AWS_NO_VALUE)),
],
))
subnet_group = t.add_resource(rds.DBSubnetGroup(
'DatabaseSubnetGroup',
DBSubnetGroupDescription='RDS subnet group',
SubnetIds=Ref(param_subnetids)
))
param_group = t.add_resource(rds.DBParameterGroup(
'DatabaseParameterGroup',
Description='RDS parameter group',
Family=Ref(param_db_param_group_family),
))
enhanced_monitoring_role = t.add_resource(iam.Role(
'EnhancedMonitoringRole',
Condition='EnhancedMonitoringCondition',
AssumeRolePolicyDocument=Policy(
Statement=[Statement(
Effect=Allow,
Action=[awacs.sts.AssumeRole],
Principal=Principal(
'Service', Sub('monitoring.rds.${AWS::URLSuffix}')
))
]),
ManagedPolicyArns=[
Sub(
'arn:${AWS::Partition}:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole')
],
))
rds_instance = t.add_resource(rds.DBInstance(
'RdsInstance',
# DeletionPolicy=Retain,
# DBName=Ref(param_dbname),
DBSnapshotIdentifier=If('UseSnapshotCondition', Ref(param_db_snapshot),
Ref(AWS_NO_VALUE)),
MasterUsername=If('NewDatabaseCondition', Ref(param_db_user),
Ref(AWS_NO_VALUE)),
MasterUserPassword=If('NewDatabaseCondition', Ref(param_db_password),
Ref(AWS_NO_VALUE)),
Engine=Ref(param_db_engine),
LicenseModel=If('OrcaleCondition', 'license-included', Ref(AWS_NO_VALUE)),
EngineVersion=Select(1, Split('-', Ref(param_db_engine_version))),
AllowMajorVersionUpgrade=False,
AutoMinorVersionUpgrade=True,
DBInstanceClass=Ref(param_db_class),
MultiAZ=Ref(param_db_multi_az),
StorageType=Ref(param_db_stroage_type),
AllocatedStorage=Ref(param_db_storage_size),
Iops=If('IopsStorageCondition', Ref(param_db_storage_iops),
Ref(AWS_NO_VALUE)),
StorageEncrypted=Ref(param_db_storage_encrypted),
KmsKeyId=If('StorageEncryptedConditon',
If('DefaultKmsCondition',
Ref(AWS_NO_VALUE),
Ref(param_db_kms_key)),
Ref(AWS_NO_VALUE),
),
DBSubnetGroupName=Ref(subnet_group),
DBParameterGroupName=Ref(param_group),
VPCSecurityGroups=[
If(
'CreateSecurityGroupCondition',
Ref(rds_sg),
Ref(param_sg)
)
],
PubliclyAccessible=Ref(
param_db_publicly_accessible),
MonitoringInterval=If(
'EnhancedMonitoringCondition',
Ref(param_db_enhanced_monitoring_interval),
Ref(AWS_NO_VALUE)),
MonitoringRoleArn=If(
'EnhancedMonitoringCondition',
GetAtt(enhanced_monitoring_role, 'Arn'),
Ref(AWS_NO_VALUE)),
BackupRetentionPeriod=30,
CopyTagsToSnapshot=True,
))
for n in [1, 2, 3]:
t.add_resource(rds.DBInstance(
'RdsReadReplicaInstance%d' % n,
Condition='DatabaseReadReplicaCondition%d' % n,
DependsOn='RdsInstance',
SourceDBInstanceIdentifier=Ref(rds_instance),
Engine=Ref(param_db_engine),
EngineVersion=Select(1, Split('-', Ref(param_db_engine_version))),
AllowMajorVersionUpgrade=False,
AutoMinorVersionUpgrade=True,
DBInstanceClass=Ref(param_db_class),
StorageType=Ref(param_db_stroage_type),
AllocatedStorage=Ref(param_db_storage_size),
Iops=If('IopsStorageCondition', Ref(param_db_storage_iops),
Ref(AWS_NO_VALUE)),
StorageEncrypted=Ref(param_db_storage_encrypted),
KmsKeyId=If('StorageEncryptedConditon',
If('DefaultKmsCondition',
Ref(AWS_NO_VALUE),
Ref(param_db_kms_key)),
Ref(AWS_NO_VALUE),
),
VPCSecurityGroups=[
If(
'CreateSecurityGroupCondition',
Ref(rds_sg),
Ref(param_sg)
)
],
PubliclyAccessible=Ref(
param_db_publicly_accessible),
MonitoringInterval=If(
'EnhancedMonitoringCondition',
Ref(param_db_enhanced_monitoring_interval),
Ref(AWS_NO_VALUE)),
MonitoringRoleArn=If(
'EnhancedMonitoringCondition',
GetAtt(enhanced_monitoring_role, 'Arn'),
Ref(AWS_NO_VALUE)),
))
instance_event_subscription = t.add_resource(rds.EventSubscription(
'InstanceEventSubscription',
Condition='EventNotificationCondition',
Enabled=True,
SnsTopicArn=Ref(param_sns_topic_arn),
SourceType='db-instance',
SourceIds=[
Ref(rds_instance),
If('DatabaseReadReplicaCondition1', Ref('RdsReadReplicaInstance1'),
Ref(AWS_NO_VALUE)),
If('DatabaseReadReplicaCondition2', Ref('RdsReadReplicaInstance2'),
Ref(AWS_NO_VALUE)),
If('DatabaseReadReplicaCondition3', Ref('RdsReadReplicaInstance3'),
Ref(AWS_NO_VALUE)),
]
))
# security_group_event_subscription = t.add_resource(rds.EventSubscription(
# 'SecurityGroupEventSubscription',
# Condition='EventNotificationCondition',
# Enabled=True,
# SnsTopicArn=Ref(param_sns_topic_arn),
# SourceType='db-security-group',
# SourceIds=[
# If(
# 'CreateSecurityGroupCondition',
# Ref(rds_sg),
# Ref(param_sg)
# )]
# ))
#
# Output
#
t.add_output([
Output('EndpointAddress',
Description='Endpoint address',
Value=GetAtt(rds_instance, 'Endpoint.Address')
),
Output('EndpointPort',
Description='Endpoint port',
Value=GetAtt(rds_instance, 'Endpoint.Port')
),
Output('DBInstanceIdentifier',
Description='Database instance identifier',
Value=Ref(rds_instance)
),
Output('Replica1EndpointAddress',
Condition='DatabaseReadReplicaCondition1',
Description='Endpoint address',
Value=GetAtt('RdsReadReplicaInstance1', 'Endpoint.Address')
),
Output('Replica1EndpointPort',
Condition='DatabaseReadReplicaCondition1',
Description='Endpoint port',
Value=GetAtt('RdsReadReplicaInstance1', 'Endpoint.Port')
),
Output('Replica1InstanceIdentifier',
Condition='DatabaseReadReplicaCondition1',
Description='Database instance identifier',
Value=Ref('RdsReadReplicaInstance1')
),
Output('Replica2EndpointAddress',
Condition='DatabaseReadReplicaCondition2',
Description='Endpoint address',
Value=GetAtt('RdsReadReplicaInstance2', 'Endpoint.Address')
),
Output('Replica2EndpointPort',
Condition='DatabaseReadReplicaCondition2',
Description='Endpoint port',
Value=GetAtt('RdsReadReplicaInstance2', 'Endpoint.Port')
),
Output('Replica2InstanceIdentifier',
Condition='DatabaseReadReplicaCondition2',
Description='Database instance identifier',
Value=Ref('RdsReadReplicaInstance2')
),
Output('Replica3EndpointAddress',
Condition='DatabaseReadReplicaCondition3',
Description='Endpoint address',
Value=GetAtt('RdsReadReplicaInstance3', 'Endpoint.Address')
),
Output('Replica3EndpointPort',
Condition='DatabaseReadReplicaCondition3',
Description='Endpoint port',
Value=GetAtt('RdsReadReplicaInstance3', 'Endpoint.Port')
),
Output('Replica3InstanceIdentifier',
Condition='DatabaseReadReplicaCondition3',
Description='Database instance identifier',
Value=Ref('RdsReadReplicaInstance3')
),
# Output('EnvironmentVariables',
# Description='Database environment variables',
# Value=Join('', [
# 'PGHOST=', GetAtt(rds_instance, 'Endpoint.Address'), ' ',
# 'PGPORT=', GetAtt(rds_instance, 'Endpoint.Port'), ' ',
# 'PGUSER=', Ref(param_db_user), ' ',
# 'PGPASSWORD=', Ref(param_db_password), ' ',
# ])),
])
#
# Write template
#
cfnutil.write(t, __file__.replace('Template.py', '.template.yaml'))
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from mock import Mock, patch
from c7n_mailer import utils
class FormatStruct(unittest.TestCase):
def test_formats_struct(self):
expected = '{\n "foo": "bar"\n}'
actual = utils.format_struct({'foo': 'bar'})
self.assertEqual(expected, actual)
class StripPrefix(unittest.TestCase):
def test_strip_prefix(self):
self.assertEqual(utils.strip_prefix('aws.internet-gateway', 'aws.'), 'internet-gateway')
self.assertEqual(utils.strip_prefix('aws.s3', 'aws.'), 's3')
self.assertEqual(utils.strip_prefix('aws.webserver', 'aws.'), 'webserver')
self.assertEqual(utils.strip_prefix('nothing', 'aws.'), 'nothing')
self.assertEqual(utils.strip_prefix('azure.azserver', 'azure.'), 'azserver')
self.assertEqual(utils.strip_prefix('', 'aws.'), '')
class ResourceFormat(unittest.TestCase):
def test_efs(self):
self.assertEqual(
utils.resource_format(
{'Name': 'abc', 'FileSystemId': 'fsid', 'LifeCycleState': 'available'},
'efs'),
'name: abc id: fsid state: available')
def test_eip(self):
self.assertEqual(
utils.resource_format(
{'PublicIp': '8.8.8.8', 'Domain': 'vpc', 'AllocationId': 'eipxyz'},
'network-addr'),
'ip: 8.8.8.8 id: eipxyz scope: vpc')
def test_nat(self):
self.assertEqual(
utils.resource_format(
{'NatGatewayId': 'nat-xyz', 'State': 'available', 'VpcId': 'vpc-123'},
'nat-gateway'),
'id: nat-xyz state: available vpc: vpc-123')
def test_igw(self):
self.assertEqual(
utils.resource_format(
{'InternetGatewayId': 'igw-x', 'Attachments': []},
'aws.internet-gateway'),
'id: igw-x attachments: 0')
def test_s3(self):
self.assertEqual(
utils.resource_format(
{'Name': 'bucket-x'}, 'aws.s3'),
'bucket-x')
def test_alb(self):
self.assertEqual(
utils.resource_format(
{'LoadBalancerArn':
'arn:aws:elasticloadbalancing:us-east-1:367930536793:'
'loadbalancer/app/dev/1234567890',
'AvailabilityZones': [], 'Scheme': 'internal'},
'app-elb'),
'arn: arn:aws:elasticloadbalancing:us-east-1:367930536793:'
'loadbalancer/app/dev/1234567890'
' zones: 0 scheme: internal')
def test_cloudtrail(self):
self.assertEqual(
utils.resource_format(
{
"Name": "trail-x",
"S3BucketName": "trail-x-bucket",
"IncludeGlobalServiceEvents": True,
"IsMultiRegionTrail": False,
"HomeRegion": "eu-west-2",
"TrailARN": "arn:aws:cloudtrail:eu-west-2:123456789012:trail/trail-x",
"LogFileValidationEnabled": True,
"HasCustomEventSelectors": False,
"HasInsightSelectors": False,
"IsOrganizationTrail": False,
"Tags": [],
},
"aws.cloudtrail",
),
"trail-x",
)
class GetAwsUsernameFromEvent(unittest.TestCase):
# note principalId is very org/domain specific for federated?, it would be
# good to get confirmation from capone on this event / test.
CLOUDTRAIL_EVENT = {
'detail': {
'userIdentity': {
"type": "IAMUser",
"principalId": "AIDAJ45Q7YFFAREXAMPLE",
"arn": "arn:aws:iam::123456789012:user/michael_bolton",
"accountId": "123456789012",
"accessKeyId": "AKIAIOSFODNN7EXAMPLE",
"userName": "michael_bolton"
}
}
}
def test_get(self):
username = utils.get_aws_username_from_event(
Mock(), self.CLOUDTRAIL_EVENT
)
self.assertEqual(username, 'michael_bolton')
def test_get_username_none(self):
self.assertEqual(
utils.get_aws_username_from_event(Mock(), None),
None
)
def test_get_username_identity_none(self):
evt = {'detail': {}}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
None
)
def test_get_username_assumed_role(self):
evt = {
'detail': {
'userIdentity': {
'type': 'AssumedRole',
'arn': 'foo'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'foo'
)
def test_get_username_assumed_role_instance(self):
evt = {
'detail': {
'userIdentity': {
'type': 'AssumedRole',
'arn': 'foo/i-12345678'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
None
)
def test_get_username_assumed_role_lambda(self):
evt = {
'detail': {
'userIdentity': {
'type': 'AssumedRole',
'arn': 'foo/awslambda'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
None
)
def test_get_username_assumed_role_colons(self):
evt = {
'detail': {
'userIdentity': {
'type': 'AssumedRole',
'arn': 'foo/bar:baz:blam'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'baz:blam'
)
def test_get_username_iam(self):
evt = {
'detail': {
'userIdentity': {
'type': 'IAMUser',
'userName': 'bar'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'bar'
)
def test_get_username_root(self):
evt = {
'detail': {
'userIdentity': {
'type': 'Root'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
None
)
def test_get_username_principalColon(self):
evt = {
'detail': {
'userIdentity': {
'type': 'foo',
'principalId': 'bar:baz'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'baz'
)
def test_get_username_principal(self):
evt = {
'detail': {
'userIdentity': {
'type': 'foo',
'principalId': 'blam'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'blam'
)
class ProviderSelector(unittest.TestCase):
def test_get_providers(self):
self.assertEqual(utils.get_provider({'queue_url': 'asq://'}), utils.Providers.Azure)
self.assertEqual(utils.get_provider({'queue_url': 'sqs://'}), utils.Providers.AWS)
class DecryptTests(unittest.TestCase):
@patch('c7n_mailer.utils.kms_decrypt')
def test_kms_decrypt(self, kms_decrypt_mock):
utils.decrypt({'queue_url': 'aws', 'test': 'test'}, Mock(), Mock(), 'test')
kms_decrypt_mock.assert_called_once()
@patch('c7n_mailer.azure_mailer.utils.azure_decrypt')
def test_azure_decrypt(self, azure_decrypt_mock):
utils.decrypt({'queue_url': 'asq://', 'test': 'test'}, Mock(), Mock(), 'test')
azure_decrypt_mock.assert_called_once()
def test_decrypt_none(self):
self.assertEqual(utils.decrypt({'queue_url': 'aws'}, Mock(), Mock(), 'test'), None)
self.assertEqual(utils.decrypt({'queue_url': 'asq://'}, Mock(), Mock(), 'test'), None)
|
|
import sys,os, pickle, numpy, pylab, operator, itertools
import cv2
from shutil import copy as copyfile
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
from DataParseApp import dataparseDialog
from sklearn.decomposition import NMF
projectpath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(os.path.join(projectpath,'ui'))
pythoncodepath=os.path.split(projectpath)[0]
jcapdataprocesspath=os.path.join(pythoncodepath, 'JCAPDataProcess')
sys.path.append(jcapdataprocesspath)
from VisualizeDataApp import visdataDialog
sys.path.append(os.path.join(jcapdataprocesspath,'AuxPrograms'))
from fcns_ui import *
from fcns_io import *
platemapvisprocesspath=os.path.join(pythoncodepath, 'JCAPPlatemapVisualize')
sys.path.append(platemapvisprocesspath)
from plate_image_align_Dialog import plateimagealignDialog
import numpy as np
###############UPDATE THIS TO BE THE FOLDER CONTAINING parameters.py
paramsfolder=r'K:\users\hte\Raman\39642\20170706analysis_bottom'
#paramsfolder=r'K:\users\hte\Raman\33444\20170608analysis'
#if not paramsfolder is None:
sys.path.append(paramsfolder)
from parameters import *
#else:
# plateidstr='3344'
#
# pathd={'ramanfile':r'K:\users\hte\Raman\33444\HSS_33444_map-1-_CRR-EM-copy.txt'}
# pathd['mainfolder']=os.path.split(pathd['ramanfile'])[0]
# pathd['savefolder']=os.path.join(pathd['mainfolder'], '20170607analysis')
# pathd['infopck']=pathd['ramanfile'][:-4]+'__info.pck'
# pathd['allspectra']=os.path.join(pathd['savefolder'],'allspectra.npy')
# pathd['nmfdata']=os.path.join(pathd['savefolder'],'nmf4.pck')
# pathd['edges']=os.path.join(pathd['savefolder'],'edges.png')
# pathd['mapfill']=os.path.join(pathd['savefolder'],'blobmap.png')
# pathd['blobd']=os.path.join(pathd['savefolder'],'blobd.pck')
# pathd['alignedsamples']=os.path.join(pathd['savefolder'],'alignedsamples.png')
# pathd['alignedsamplestxt']=os.path.join(pathd['savefolder'],'alignedsamples.txt')
# pathd['spectrafolder']=os.path.join(pathd['savefolder'],'sample_spectra')
# pathd['map']=os.path.join(pathd['spectrafolder'],'raman_sample_index_map.map')
# pathd['samplepixels']=os.path.join(pathd['spectrafolder'],'samplepixels.png')
# pathd['udibasepath']=os.path.join(pathd['savefolder'],'ave_rmn_')
#
# udi_ternary_projection_inds=[0, 1, 2]#only used for the all.udi file
#
# sample_list=[1850,1851,1852,1853,1854,1855,1905,1906,1907,1908,1909,1910,1911,1912,1913,1914,1915,1916,1917,1918,1919,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,2033,2034,2035,2036,2037,2038,2039,2040,2041,2042,2043,2044,2045,2046,2047,2097,2098,2099,2100,2101,2102,2103,2104,2105,2106,2107,2108,2109,2110,2111]
# dx_smp=1.
# dy_smp=1.
#
# default_sample_blob_dict=dict({}, \
# smp_is_square=0, smp_width=1., bcknd_is_square=0, bcknd_min_width=1.3, bcknd_max_width=1.4, removedups=1\
# )
#
# show_help_messages=True
platemappath=getplatemappath_plateid(plateidstr)
if not os.path.isdir(pathd['mainfolder']):
print 'NOT A VALID FOLDER'
if not os.path.isdir(pathd['savefolder']):
os.mkdir(pathd['savefolder'])
if not os.path.isdir(pathd['spectrafolder']):
os.mkdir(pathd['spectrafolder'])
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):
super(MainMenu, self).__init__(None)
self.parseui=dataparseDialog(self, title='Visualize ANA, EXP, RUN data', **kwargs)
self.alignui=plateimagealignDialog(self, manual_image_init_bool=False)
if execute:
self.parseui.exec_()
def doNMF(datan,n_components=4):
# from Mitsu
#alternatively PCA ... might me faster
nmf=NMF(n_components=n_components,init='nndsvd')
data_decomp_all=nmf.fit_transform(datan)
data_components_all=nmf.components_
return data_decomp_all,data_components_all
def rgb_comp(arr2d, affine=True):
cmy_cmyk=lambda a:a[:3]*(1.-a[3])+a[3]
rgb_cmy=lambda a:1.-a
rgb_cmyk=lambda a:rgb_cmy(cmy_cmyk(a))
return numpy.array([rgb_cmyk(a) for a in arr2d])
def imGen(data_decomp_all,ramaninfod,cmykindeces=[3, 2, 1, 0]):
cmykvals=copy.copy(data_decomp_all[:, cmykindeces])
cmykvals/=cmykvals.max(axis=0)[numpy.newaxis, :]
img=numpy.reshape(rgb_comp(cmykvals), (ramaninfod['xshape'], ramaninfod['yshape'], 3))
return img
def findEdges(img_gray, sigma = 0.33):
#this uses automatic thresholding from one of the cv2 tutorials
v = np.median(img_gray[img_gray>0])
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edges = cv2.Canny(np.uint8(img_gray),lower,upper)
return edges
def findContours(edges):
#the contours are now found by searching the most external convex hull
#this way mos of the not fully closed samples are detected as well
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
iWithContour = cv2.drawContours(edges, contours, -1, (255,20,100), 5)
mapimage = np.zeros_like(edges)
#this fills the contours
for i in range(len(contours)):
cv2.drawContours(mapimage, contours, i, color=255, thickness=-1)
#this is to calculate the center of each contour
x=[]
y=[]
for c in contours:
# compute the center of the contour
M = cv2.moments(c)
try:
x.append(M['m10']/(M['m00']))
y.append(M['m01']/(M['m00']))
except:
#this was nessesary as the divisor is sometimes 0
#yield good results but should be done with caution
x.append(M['m10']/(M['m00']+1e-23))
y.append(M['m01']/(M['m00']+1e-23))
return iWithContour, mapimage, contours, x, y
mainapp=QApplication(sys.argv)
form=MainMenu(None, execute=False)
#form.show()
#form.setFocus()
#mainapp.exec_()
parseui=form.parseui
alignui=form.alignui
parseui.rawpathLineEdit.setText(pathd['ramanfile'])
parseui.infopathLineEdit.setText(pathd['infopck'])
parseui.getinfo(ramaninfop=pathd['infopck'], ramanfp=pathd['ramanfile'])#opens or creates
alignui.motimage_sample_marker_color=motimage_sample_marker_color
parseui.OutlierAveDoubleSpinBox.setValue(raman_spectrum_outlier_fraction_removal)
if os.path.isfile(pathd['allspectra']):
with open(pathd['allspectra'], mode='rb') as f:
fullramandataarray=numpy.load(f)
elif 1:
fullramandataarray=parseui.readfullramanarray(pathd['ramanfile'])#opens or creates
with open(pathd['allspectra'], mode='wb') as f:
numpy.save(f, fullramandataarray)
ramaninfod=parseui.ramaninfod
#parseui.exec_()
#ramaninfod['number of spectra']
#ramaninfod['xdata']
#ramaninfod['ydata']
#ramaninfod['Wavenumbers_str']
#ramaninfod['Spectrum 0 index']
ramaninfod['xdata']/=1000.
ramaninfod['ydata']/=1000.#convert to mm
ramaninfod['xshape']= len(np.unique(ramaninfod['xdata']))
ramaninfod['yshape']= len(np.unique(ramaninfod['ydata']))
ramaninfod['dx']= (ramaninfod['xdata'].max()-ramaninfod['xdata'].min())/(ramaninfod['xshape']-1)
ramaninfod['dy']= (ramaninfod['ydata'].max()-ramaninfod['ydata'].min())/(ramaninfod['yshape']-1)
nx=dx_smp/ramaninfod['dx']
ny=dy_smp/ramaninfod['dy']
ntot=nx*ny
ramanreshape=lambda arr: np.reshape(arr, (ramaninfod['xshape'], ramaninfod['yshape'])).T[::-1, ::-1]
ramannewshape=(ramaninfod['yshape'], ramaninfod['xshape'])
image_of_x=ramanreshape(ramaninfod['xdata'])
image_of_y=ramanreshape(ramaninfod['ydata'])
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].min(), ramaninfod['ydata'].max()]
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].max(), ramaninfod['ydata'].min()]
extent=[image_of_x[0, 0], image_of_x[-1, -1], image_of_y[0, 0], image_of_y[-1, -1]]
def ramanimshow(im, **kwargs):
plt.imshow(im, origin='lower', interpolation='none', aspect=1, extent=extent, **kwargs)
if os.path.isfile(pathd['nmfdata']):
with open(pathd['nmfdata'], mode='rb') as f:
tempd=pickle.load(f)
data_decomp_all,data_components_all,rgbimagedata=[tempd[k] for k in 'data_decomp_all,data_components_all,rgbimagedata'.split(',')]
else:
data_decomp_all,data_components_all = doNMF(fullramandataarray,4)
#rgbimagedata=imGen(data_decomp_all,ramaninfod)
rgbimagedata=np.zeros(ramannewshape+(3,), dtype='float32')
for i, arr in enumerate(data_decomp_all[:, :3].T):
if nmf_scaling_algorithm_for_image=='scale_by_max':
arr/=arr.max()
elif nmf_scaling_algorithm_for_image=='scale_log_by_max':
arr[arr!=0]=numpy.log10(arr[arr!=0])
arr/=arr.max()
rgbimagedata[:, :, i]=np.array([ramanreshape(arr)])
tempd={}
tempd['data_decomp_all']=data_decomp_all
tempd['data_components_all']=data_components_all
tempd['rgbimagedata']=rgbimagedata
with open(pathd['nmfdata'], mode='wb') as f:
tempd=pickle.dump(tempd, f)
#plt.clf()
#rgbimagedata=np.zeros(ramannewshape+(3,), dtype='float32')
#for i, arr in enumerate(data_decomp_all[:, :3].T):
# arr[arr!=0]=numpy.log10(arr[arr!=0])
# rgbimagedata[:, :, i]=np.array([ramanreshape(arr/arr.max())])
#ramanimshow(rgbimagedata)
#plt.show()
if 1 and os.path.isfile(pathd['blobd']):
with open(pathd['blobd'], mode='rb') as f:
blobd=pickle.load(f)
else:
edges = np.zeros(ramannewshape, dtype='uint8')
searchforoptimalbool=isinstance(find_edges_sigma_value, list)
ltemp=find_edges_sigma_value if searchforoptimalbool else [find_edges_sigma_value]
plt.clf()
for sigmacount, sigmaval in enumerate(ltemp):
if searchforoptimalbool:
plt.subplot(2, len(find_edges_sigma_value), sigmacount+1)
plt.title('edges for sigma %.2f' %sigmaval)
for i in range(data_decomp_all.shape[1]):
if nmf_scaling_algorithm_for_edge=='scale_by_max':
datadecomptemp=data_decomp_all[:,i]/data_decomp_all[:,i].max()
elif nmf_scaling_algorithm_for_edge=='scale_log_by_max':
datadecomptemp=data_decomp_all[:,i]
datadecomptemp[datadecomptemp!=0]=numpy.log10(datadecomptemp[datadecomptemp!=0])
datadecomptemp/=datadecomptemp.max()
arr=np.uint8(ramanreshape(datadecomptemp)*254)
edgetemp=findEdges(arr, sigma=sigmaval)
# plt.imshow(edgetemp)
# plt.show()
edges[np.where(edgetemp>0)] = 244
ramanimshow(edges)
if searchforoptimalbool:
plt.subplot(2, len(find_edges_sigma_value), len(find_edges_sigma_value)+sigmacount+1)
plt.title('mapfill for sigma %.2f' %sigmaval)
else:
plt.savefig(pathd['edges'])
plt.clf()
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
image_of_inds=ramanreshape(numpy.arange(ramaninfod['number of spectra']))
mapfill = np.zeros(ramannewshape, dtype='uint8')
blobd={}
l_mask=[cv2.drawContours(np.zeros(ramannewshape, dtype='uint8'), contours, i, color=1, thickness=-1) for i in range(len(contours))]
l_imageinds=[numpy.where(maski==1) for maski in l_mask]
l_xycen=np.array([[image_of_x[imageindsi].mean(), image_of_y[imageindsi].mean()] for imageindsi in l_imageinds])
indstomerge=sorted([(count2+count+1, count) for count, xy0 in enumerate(l_xycen[:-1]) for count2, xy1 in enumerate(l_xycen[count+1:]) if ((xy0-xy1)**2).sum()<(dx_smp**2+dy_smp**2)/5.])[::-1]
#indstomerge has highest index first so merge going down
for indhigh, indlow in indstomerge:
# imageinds=l_imageinds.pop(indhigh)
# mask=l_mask.pop(indhigh)
imageinds=l_imageinds[indhigh]
mask=l_mask[indhigh]
l_mask[indlow][imageinds]=1#update only the masks and then update everythign else afterwards
l_imageinds=[numpy.where(maskj==1) for maskj in l_mask]
l_xycen=np.array([[image_of_x[imageindsj].mean(), image_of_y[imageindsj].mean()] for imageindsj in l_imageinds])
for imageinds, mask in zip(l_imageinds, l_mask):
indsinblob=sorted(list(image_of_inds[imageinds]))
relx=(image_of_x[imageinds].max()-image_of_x[imageinds].min())/dx_smp
rely=(image_of_y[imageinds].max()-image_of_y[imageinds].min())/dy_smp
if relx<0.5 or relx>1.4 or rely<0.5 or rely>1.4 or len(indsinblob)<ntot*0.5 or len(indsinblob)>ntot*1.5:
print 'skipped blob that was %.2f, %.2f of expected size with %d pixels' %(relx, rely, len(indsinblob))
continue
if numpy.any(mapfill[imageinds]==1):
print 'overlapping blobs detected'
xc=image_of_x[imageinds].mean()
yc=image_of_y[imageinds].mean()
mapfill[imageinds]=1
blobd[(xc, yc)]=indsinblob
ramanimshow(mapfill)
if searchforoptimalbool:
plt.show()
else:
plt.savefig(pathd['mapfill'])
if show_help_messages:
messageDialog(form, 'The auto detected and cleaned up blobs will be shown.\nThis is an image using the Raman motor coordinates').exec_()
plt.show()
with open(pathd['blobd'], mode='wb') as f:
pickle.dump(blobd, f)
if force_new_alignment or not os.path.isfile(pathd['map']):
alignui.knownblobsdict=blobd
alignui.mindistformatchingtoknownposition=mindistformatchingtoknownposition
alignui.openAddFile(p=platemappath)
alignui.image=rgbimagedata
alignui.motimage_extent=extent #left,right,bottom,top in mm
alignui.reloadimagewithextent()
#alignui.plotw_motimage.axes.imshow(alignui.image, origin='lower', interpolation='none', aspect=1, extent=alignui.motimage_extent)
xarr, yarr=np.array(blobd.keys()).T
alignui.plotw_motimage.axes.plot(xarr, yarr, 'wx', ms=4)
alignui.plotw_motimage.fig.canvas.draw()
if show_help_messages:
messageDialog(form, 'NMF analysis done and now plotting NMF image\nwith identified samples marked +. User can choose sample_no and \nright click to add calibration points.\nDo this for at least 1 sample marked with +.').exec_()
alignui.exec_()
alignui.sampleLineEdit.setText(','.join(['%d' %smp for smp in sample_list]))
alignui.addValuesSample()
if show_help_messages:
messageDialog(form, 'sample_no for export have been added. Check that \nthere are no NaN and if there are manually add calibration points\nas necessary and then remove+re-add the NaN samples.').exec_()
alignui.exec_()
alignui.plotw_motimage.fig.savefig(pathd['alignedsamples'])
with open(pathd['alignedsamplestxt'], mode='w') as f:
f.write(str(alignui.browser.toPlainText()))
alignui.openpckinfo(p=pathd['infopck'])
alignui.infox/=1000.
alignui.infoy/=1000.
alignui.perform_genmapfile(p=pathd['map'], **default_sample_blob_dict)
mapfill2=np.zeros(ramaninfod['number of spectra'], dtype='uint8')
for smp, inds in alignui.smp_inds_list__map:
mapfill2[inds]=2 if smp>0 else 1
mapfill2=ramanreshape(mapfill2)
plt.clf()
ramanimshow(mapfill2, vmin=0, vmax=2, cmap='gnuplot')
plt.savefig(pathd['samplepixels'])
if show_help_messages:
messageDialog(form, 'The NMF-identified samples use custom blob shapes and\nthe rest of the requested samples use default sample shape, resulting\nin the following map of pixels that will be exported.').exec_()
plt.show()
parseui.savepathLineEdit.setText(pathd['spectrafolder'])
parseui.match(copypath=pathd['map'])
parseui.extract()
parseui.saveave()
#parseui.readresultsfolder()
if show_help_messages:
messageDialog(form, 'The .rmn files have now been saved, so you can use\nthis next dialog to visualize data or close it to generate\nthe .udi files and open in JCAPDataProcess Visualizer').exec_()
parseui.exec_()
#only initialize visdataDialog so only created when necessary
visui=visdataDialog(form, title='Visualize ANA, EXP, RUN data')
visui.openontheflyfolder(folderpath=pathd['spectrafolder'], plateidstr=plateidstr)
visui.BatchComboBox.setCurrentIndex(2)
visui.runbatchprocess()
savep=pathd['udibasepath']+'all.udi'
visui.get_xy_plate_info_browsersamples(saveudibool=True, ternary_el_inds_for_udi_export=udi_ternary_projection_inds, savep=savep)
numelsincompspacebeforeternaryprojection=visui.fomplotd['comps'].shape[1]
if numelsincompspacebeforeternaryprojection>3:
for i, indstup in enumerate(itertools.combinations(range(numelsincompspacebeforeternaryprojection), 3)):
excludeinds=[ind for ind in range(numelsincompspacebeforeternaryprojection) if not ind in indstup]
inds_where_excluded_els_all_zero=numpy.where(visui.fomplotd['comps'][:, excludeinds].max(axis=1)==0)[0]
if len(inds_where_excluded_els_all_zero)==0:
continue
smplist=[visui.fomplotd['sample_no'][fomplotind] for fomplotind in inds_where_excluded_els_all_zero]
visui.remallsamples()
visui.addrem_select_fomplotdinds(remove=False, smplist=smplist)
savep=''.join([pathd['udibasepath']]+[visui.ellabels[ind] for ind in indstup]+['.udi'])
visui.get_xy_plate_info_browsersamples(saveudibool=True, ternary_el_inds_for_udi_export=indstup, savep=savep)
if show_help_messages:
messageDialog(form, 'udi files now saved and JCAPDataProcess\nVisualizer will be opened for your use.').exec_()
visui.exec_()
if show_help_messages:
messageDialog(form, 'There is nothing more to do and continuing will raise an error.').exec_()
errorattheend
|
|
from __future__ import absolute_import, unicode_literals
from anyjson import loads
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.views import main as main_views
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
from celery import current_app
from celery import states
from celery.task.control import broadcast, revoke, rate_limit
from celery.utils.text import abbrtask
from .admin_utils import action, display_field, fixedwidth
from .models import (
TaskState, WorkerState,
PeriodicTask, IntervalSchedule, CrontabSchedule,
)
from .humanize import naturaldate
from .utils import is_database_scheduler
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text # noqa
TASK_STATE_COLORS = {states.SUCCESS: 'green',
states.FAILURE: 'red',
states.REVOKED: 'magenta',
states.STARTED: 'yellow',
states.RETRY: 'orange',
'RECEIVED': 'blue'}
NODE_STATE_COLORS = {'ONLINE': 'green',
'OFFLINE': 'gray'}
class MonitorList(main_views.ChangeList):
def __init__(self, *args, **kwargs):
super(MonitorList, self).__init__(*args, **kwargs)
self.title = self.model_admin.list_page_title
@display_field(_('state'), 'state')
def colored_state(task):
state = escape(task.state)
color = TASK_STATE_COLORS.get(task.state, 'black')
return '<b><span style="color: {0};">{1}</span></b>'.format(color, state)
@display_field(_('state'), 'last_heartbeat')
def node_state(node):
state = node.is_alive() and 'ONLINE' or 'OFFLINE'
color = NODE_STATE_COLORS[state]
return '<b><span style="color: {0};">{1}</span></b>'.format(color, state)
@display_field(_('ETA'), 'eta')
def eta(task):
if not task.eta:
return '<span style="color: gray;">none</span>'
return escape(task.eta)
@display_field(_('when'), 'tstamp')
def tstamp(task):
return '<div title="{0}">{1}</div>'.format(
escape(str(task.tstamp)), escape(naturaldate(task.tstamp)),
)
@display_field(_('name'), 'name')
def name(task):
short_name = abbrtask(task.name, 16)
return '<div title="{0}"><b>{1}</b></div>'.format(
escape(task.name), escape(short_name),
)
class ModelMonitor(admin.ModelAdmin):
can_add = False
can_delete = False
def get_changelist(self, request, **kwargs):
return MonitorList
def change_view(self, request, object_id, extra_context=None):
extra_context = extra_context or {}
extra_context.setdefault('title', self.detail_title)
return super(ModelMonitor, self).change_view(
request, object_id, extra_context=extra_context,
)
def has_delete_permission(self, request, obj=None):
if not self.can_delete:
return False
return super(ModelMonitor, self).has_delete_permission(request, obj)
def has_add_permission(self, request):
if not self.can_add:
return False
return super(ModelMonitor, self).has_add_permission(request)
class TaskMonitor(ModelMonitor):
detail_title = _('Task detail')
list_page_title = _('Tasks')
rate_limit_confirmation_template = 'djcelery/confirm_rate_limit.html'
date_hierarchy = 'tstamp'
fieldsets = (
(None, {
'fields': ('state', 'task_id', 'name', 'args', 'kwargs',
'eta', 'runtime', 'worker', 'tstamp'),
'classes': ('extrapretty', ),
}),
('Details', {
'classes': ('collapse', 'extrapretty'),
'fields': ('result', 'traceback', 'expires'),
}),
)
list_display = (
fixedwidth('task_id', name=_('UUID'), pt=8),
colored_state,
name,
fixedwidth('args', pretty=True),
fixedwidth('kwargs', pretty=True),
eta,
tstamp,
'worker',
)
readonly_fields = (
'state', 'task_id', 'name', 'args', 'kwargs',
'eta', 'runtime', 'worker', 'result', 'traceback',
'expires', 'tstamp',
)
list_filter = ('state', 'name', 'tstamp', 'eta', 'worker')
search_fields = ('name', 'task_id', 'args', 'kwargs', 'worker__hostname')
actions = ['revoke_tasks',
'terminate_tasks',
'kill_tasks',
'rate_limit_tasks']
class Media:
css = {'all': ('djcelery/style.css', )}
@action(_('Revoke selected tasks'))
def revoke_tasks(self, request, queryset):
with current_app.default_connection() as connection:
for state in queryset:
revoke(state.task_id, connection=connection)
@action(_('Terminate selected tasks'))
def terminate_tasks(self, request, queryset):
with current_app.default_connection() as connection:
for state in queryset:
revoke(state.task_id, connection=connection, terminate=True)
@action(_('Kill selected tasks'))
def kill_tasks(self, request, queryset):
with current_app.default_connection() as connection:
for state in queryset:
revoke(state.task_id, connection=connection,
terminate=True, signal='KILL')
@action(_('Rate limit selected tasks'))
def rate_limit_tasks(self, request, queryset):
tasks = set([task.name for task in queryset])
opts = self.model._meta
app_label = opts.app_label
if request.POST.get('post'):
rate = request.POST['rate_limit']
with current_app.default_connection() as connection:
for task_name in tasks:
rate_limit(task_name, rate, connection=connection)
return None
context = {
'title': _('Rate limit selection'),
'queryset': queryset,
'object_name': force_text(opts.verbose_name),
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'opts': opts,
'app_label': app_label,
}
return render_to_response(
self.rate_limit_confirmation_template, context,
context_instance=RequestContext(request),
)
def get_actions(self, request):
actions = super(TaskMonitor, self).get_actions(request)
actions.pop('delete_selected', None)
return actions
def get_queryset(self, request):
qs = super(TaskMonitor, self).get_queryset(request)
return qs.select_related('worker')
class WorkerMonitor(ModelMonitor):
can_add = True
detail_title = _('Node detail')
list_page_title = _('Worker Nodes')
list_display = ('hostname', node_state)
readonly_fields = ('last_heartbeat', )
actions = ['shutdown_nodes',
'enable_events',
'disable_events']
@action(_('Shutdown selected worker nodes'))
def shutdown_nodes(self, request, queryset):
broadcast('shutdown', destination=[n.hostname for n in queryset])
@action(_('Enable event mode for selected nodes.'))
def enable_events(self, request, queryset):
broadcast('enable_events',
destination=[n.hostname for n in queryset])
@action(_('Disable event mode for selected nodes.'))
def disable_events(self, request, queryset):
broadcast('disable_events',
destination=[n.hostname for n in queryset])
def get_actions(self, request):
actions = super(WorkerMonitor, self).get_actions(request)
actions.pop('delete_selected', None)
return actions
admin.site.register(TaskState, TaskMonitor)
admin.site.register(WorkerState, WorkerMonitor)
# ### Periodic Tasks
class LaxChoiceField(forms.ChoiceField):
def valid_value(self, value):
return True
def periodic_task_form():
current_app.loader.import_default_modules()
tasks = list(sorted(name for name in current_app.tasks
if not name.startswith('celery.')))
choices = (('', ''), ) + tuple(zip(tasks, tasks))
class PeriodicTaskForm(forms.ModelForm):
regtask = LaxChoiceField(label=_('Task (registered)'),
choices=choices, required=False)
task = forms.CharField(label=_('Task (custom)'), required=False,
max_length=200)
class Meta:
model = PeriodicTask
exclude = ()
def clean(self):
data = super(PeriodicTaskForm, self).clean()
regtask = data.get('regtask')
if regtask:
data['task'] = regtask
if not data['task']:
exc = forms.ValidationError(_('Need name of task'))
self._errors['task'] = self.error_class(exc.messages)
raise exc
return data
def _clean_json(self, field):
value = self.cleaned_data[field]
try:
loads(value)
except ValueError as exc:
raise forms.ValidationError(
_('Unable to parse JSON: %s') % exc,
)
return value
def clean_args(self):
return self._clean_json('args')
def clean_kwargs(self):
return self._clean_json('kwargs')
return PeriodicTaskForm
class PeriodicTaskAdmin(admin.ModelAdmin):
model = PeriodicTask
form = periodic_task_form()
search_fields = ('name', 'task',)
list_display = (
'__unicode__',
'task',
'interval',
'args',
'kwargs',
'enabled',
)
actions = ['enable_tasks',
'disable_tasks']
fieldsets = (
(None, {
'fields': ('name', 'regtask', 'task', 'enabled'),
'classes': ('extrapretty', 'wide'),
}),
('Schedule', {
'fields': ('interval', 'crontab'),
'classes': ('extrapretty', 'wide', ),
}),
('Arguments', {
'fields': ('args', 'kwargs'),
'classes': ('extrapretty', 'wide', 'collapse'),
}),
('Execution Options', {
'fields': ('expires', 'queue', 'exchange', 'routing_key'),
'classes': ('extrapretty', 'wide', 'collapse'),
}),
)
def __init__(self, *args, **kwargs):
super(PeriodicTaskAdmin, self).__init__(*args, **kwargs)
self.form = periodic_task_form()
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
scheduler = getattr(settings, 'CELERYBEAT_SCHEDULER', None)
extra_context['wrong_scheduler'] = not is_database_scheduler(scheduler)
return super(PeriodicTaskAdmin, self).changelist_view(request,
extra_context)
def get_queryset(self, request):
qs = super(PeriodicTaskAdmin, self).get_queryset(request)
return qs.select_related('interval', 'crontab')
@action(_('Enable selected periodic tasks'))
def enable_tasks(self, request, queryset):
queryset.update(enabled=True)
@action(_('Disable selected periodic tasks'))
def disable_tasks(self, request, queryset):
queryset.update(enabled=False)
admin.site.register(IntervalSchedule)
admin.site.register(CrontabSchedule)
admin.site.register(PeriodicTask, PeriodicTaskAdmin)
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import re
from test.unit import FakeLogger
from swift.container import sync
from swift.common import utils
from swiftclient import ClientException
utils.HASH_PATH_SUFFIX = 'endcap'
class FakeRing(object):
def __init__(self):
self.devs = [{'ip': '10.0.0.%s' % x, 'port': 1000 + x, 'device': 'sda'}
for x in xrange(3)]
def get_nodes(self, account, container=None, obj=None):
return 1, list(self.devs)
class FakeContainerBroker(object):
def __init__(self, path, metadata=None, info=None, deleted=False,
items_since=None):
self.db_file = path
self.metadata = metadata if metadata else {}
self.info = info if info else {}
self.deleted = deleted
self.items_since = items_since if items_since else []
self.sync_point1 = -1
self.sync_point2 = -1
def get_info(self):
return self.info
def is_deleted(self):
return self.deleted
def get_items_since(self, sync_point, limit):
if sync_point < 0:
sync_point = 0
return self.items_since[sync_point:sync_point + limit]
def set_x_container_sync_points(self, sync_point1, sync_point2):
self.sync_point1 = sync_point1
self.sync_point2 = sync_point2
class TestContainerSync(unittest.TestCase):
def test_Iter2FileLikeObject(self):
flo = sync._Iter2FileLikeObject(iter(['123', '4567', '89', '0']))
expect = '1234567890'
got = flo.read(2)
self.assertTrue(len(got) <= 2)
self.assertEquals(got, expect[:len(got)])
expect = expect[len(got):]
got = flo.read(5)
self.assertTrue(len(got) <= 5)
self.assertEquals(got, expect[:len(got)])
expect = expect[len(got):]
self.assertEquals(flo.read(), expect)
self.assertEquals(flo.read(), '')
self.assertEquals(flo.read(2), '')
flo = sync._Iter2FileLikeObject(iter(['123', '4567', '89', '0']))
self.assertEquals(flo.read(), '1234567890')
self.assertEquals(flo.read(), '')
self.assertEquals(flo.read(2), '')
def test_init(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
self.assertTrue(cs.container_ring is cring)
self.assertTrue(cs.object_ring is oring)
def test_run_forever(self):
# This runs runs_forever with fakes to succeed for two loops, the first
# causing a report but no interval sleep, the second no report but an
# interval sleep.
time_calls = [0]
sleep_calls = []
audit_location_generator_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # Elapsed time for "under interval" (no)
3602, # Start time
3603, # Is it report time (no)
3603, # Elapsed time for "under interval" (yes)
]
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_sleep(amount):
sleep_calls.append(amount)
def fake_audit_location_generator(*args, **kwargs):
audit_location_generator_calls[0] += 1
# Makes .container_sync() short-circuit because 'path' doesn't end
# with .db
return [('path', 'device', 'partition')]
orig_time = sync.time
orig_sleep = sync.sleep
orig_audit_location_generator = sync.audit_location_generator
try:
sync.time = fake_time
sync.sleep = fake_sleep
sync.audit_location_generator = fake_audit_location_generator
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.run_forever()
except Exception, err:
if str(err) != 'we are now done':
raise
finally:
sync.time = orig_time
sync.sleep = orig_sleep
sync.audit_location_generator = orig_audit_location_generator
self.assertEquals(time_calls, [9])
self.assertEquals(len(sleep_calls), 2)
self.assertTrue(sleep_calls[0] <= cs.interval)
self.assertTrue(sleep_calls[1] == cs.interval - 1)
self.assertEquals(audit_location_generator_calls, [2])
self.assertEquals(cs.reported, 3602)
def test_run_once(self):
# This runs runs_once with fakes twice, the first causing an interim
# report, the second with no interm report.
time_calls = [0]
audit_location_generator_calls = [0]
def fake_time():
time_calls[0] += 1
returns = [1, # Initialized reported time
1, # Start time
3602, # Is it report time (yes)
3602, # Report time
3602, # End report time
3602, # For elapsed
3602, # Start time
3603, # Is it report time (no)
3604, # End report time
3605, # For elapsed
]
if time_calls[0] == len(returns) + 1:
raise Exception('we are now done')
return returns[time_calls[0] - 1]
def fake_audit_location_generator(*args, **kwargs):
audit_location_generator_calls[0] += 1
# Makes .container_sync() short-circuit because 'path' doesn't end
# with .db
return [('path', 'device', 'partition')]
orig_time = sync.time
orig_audit_location_generator = sync.audit_location_generator
try:
sync.time = fake_time
sync.audit_location_generator = fake_audit_location_generator
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.run_once()
self.assertEquals(time_calls, [6])
self.assertEquals(audit_location_generator_calls, [1])
self.assertEquals(cs.reported, 3602)
cs.run_once()
except Exception, err:
if str(err) != 'we are now done':
raise
finally:
sync.time = orig_time
sync.audit_location_generator = orig_audit_location_generator
self.assertEquals(time_calls, [10])
self.assertEquals(audit_location_generator_calls, [2])
self.assertEquals(cs.reported, 3604)
def test_container_sync_not_db(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
self.assertEquals(cs.container_failures, 0)
def test_container_sync_missing_db(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
def test_container_sync_not_my_db(self):
# Db could be there due to handoff replication so test that we ignore
# those.
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c'})
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1 # No match
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
cs._myips = ['127.0.0.1'] # No match
cs._myport = 1000 # Match
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_deleted(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c'}, deleted=False)
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will cause the 1 container failure since the
# broker's info doesn't contain sync point keys
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c'}, deleted=True)
# This complete match will not cause any more container failures
# since the broker indicates deletion
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_sync_no_to_or_key(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to or x-container-sync-key
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 1)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-key
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 2)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
# This complete match will be skipped since the broker's metadata
# has no x-container-sync-to
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = []
# This complete match will cause a container failure since the
# sync-to won't validate as allowed.
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 3)
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)})
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This complete match will succeed completely since the broker
# get_items_since will return no new rows.
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 3)
finally:
sync.ContainerBroker = orig_ContainerBroker
def test_container_stop_at(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
orig_time = sync.time
try:
sync.ContainerBroker = lambda p: FakeContainerBroker(p,
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=['erroneous data'])
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
# This sync will fail since the items_since data is bad.
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
# Set up fake times to make the sync short-circuit as having taken
# too long
fake_times = [
1.0, # Compute the time to move on
100000.0, # Compute if it's time to move on from first loop
100000.0] # Compute if it's time to move on from second loop
def fake_time():
return fake_times.pop(0)
sync.time = fake_time
# This same sync won't fail since it will look like it took so long
# as to be time to move on (before it ever actually tries to do
# anything).
cs.container_sync('isa.db')
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.time = orig_time
def test_container_first_loop(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
orig_hash_path = sync.hash_path
orig_delete_object = sync.delete_object
try:
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for full syncing, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, None)
self.assertEquals(fcb.sync_point2, 1)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for full syncing, ordinal is 0
# and all hashes are 1
return '\x01' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 1,
'x_container_sync_point2': 1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because the two sync points haven't deviated yet
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, -1)
self.assertEquals(fcb.sync_point2, -1)
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because container_sync_row will fail since the row has no
# 'deleted' key
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, -1)
self.assertEquals(fcb.sync_point2, -1)
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because delete_object fails
self.assertEquals(cs.container_failures, 2)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, -1)
self.assertEquals(fcb.sync_point2, -1)
def fake_delete_object(*args, **kwargs):
pass
sync.delete_object = fake_delete_object
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': 2,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because delete_object succeeds
self.assertEquals(cs.container_failures, 2)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, None)
self.assertEquals(fcb.sync_point2, 1)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.hash_path = orig_hash_path
sync.delete_object = orig_delete_object
def test_container_second_loop(self):
cring = FakeRing()
oring = FakeRing()
cs = sync.ContainerSync({}, container_ring=cring, object_ring=oring)
orig_ContainerBroker = sync.ContainerBroker
orig_hash_path = sync.hash_path
orig_delete_object = sync.delete_object
try:
# We'll ensure the first loop is always skipped by keeping the two
# sync points equal
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that no rows match for second loop, ordinal is 0 and
# all hashes are 1
return '\x01' * 16
sync.hash_path = fake_hash_path
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because no rows match
self.assertEquals(cs.container_failures, 0)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, 1)
self.assertEquals(fcb.sync_point2, None)
def fake_hash_path(account, container, obj, raw_digest=False):
# Ensures that all rows match for second loop, ordinal is 0 and
# all hashes are 0
return '\x00' * 16
def fake_delete_object(*args, **kwargs):
pass
sync.hash_path = fake_hash_path
sync.delete_object = fake_delete_object
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o'}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Fails because row is missing 'deleted' key
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, -1)
self.assertEquals(fcb.sync_point2, -1)
fcb = FakeContainerBroker('path',
info={'account': 'a', 'container': 'c',
'x_container_sync_point1': -1,
'x_container_sync_point2': -1},
metadata={'x-container-sync-to': ('http://127.0.0.1/a/c', 1),
'x-container-sync-key': ('key', 1)},
items_since=[{'ROWID': 1, 'name': 'o', 'created_at': '1.2',
'deleted': True}])
sync.ContainerBroker = lambda p: fcb
cs._myips = ['10.0.0.0'] # Match
cs._myport = 1000 # Match
cs.allowed_sync_hosts = ['127.0.0.1']
cs.container_sync('isa.db')
# Succeeds because row now has 'deleted' key and delete_object
# succeeds
self.assertEquals(cs.container_failures, 1)
self.assertEquals(cs.container_skips, 0)
self.assertEquals(fcb.sync_point1, 1)
self.assertEquals(fcb.sync_point2, None)
finally:
sync.ContainerBroker = orig_ContainerBroker
sync.hash_path = orig_hash_path
sync.delete_object = orig_delete_object
def test_container_sync_row_delete(self):
orig_delete_object = sync.delete_object
try:
def fake_delete_object(path, name=None, headers=None, proxy=None):
self.assertEquals(path, 'http://sync/to/path')
self.assertEquals(name, 'object')
self.assertEquals(headers,
{'x-container-sync-key': 'key', 'x-timestamp': '1.2'})
self.assertEquals(proxy, 'http://proxy')
sync.delete_object = fake_delete_object
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.proxy = 'http://proxy'
# Success
self.assertTrue(cs.container_sync_row({'deleted': True,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info'))
self.assertEquals(cs.container_deletes, 1)
exc = []
def fake_delete_object(path, name=None, headers=None, proxy=None):
exc.append(Exception('test exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row({'deleted': True,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info'))
self.assertEquals(cs.container_deletes, 1)
self.assertEquals(len(exc), 1)
self.assertEquals(str(exc[-1]), 'test exception')
def fake_delete_object(path, name=None, headers=None, proxy=None):
exc.append(ClientException('test client exception'))
raise exc[-1]
sync.delete_object = fake_delete_object
# Failure because of delete_object exception
self.assertFalse(cs.container_sync_row({'deleted': True,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info'))
self.assertEquals(cs.container_deletes, 1)
self.assertEquals(len(exc), 2)
self.assertEquals(str(exc[-1]), 'test client exception')
def fake_delete_object(path, name=None, headers=None, proxy=None):
exc.append(ClientException('test client exception',
http_status=404))
raise exc[-1]
sync.delete_object = fake_delete_object
# Success because the object wasn't even found
self.assertTrue(cs.container_sync_row({'deleted': True,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), 'info'))
self.assertEquals(cs.container_deletes, 2)
self.assertEquals(len(exc), 3)
self.assertEquals(str(exc[-1]), 'test client exception: 404')
finally:
sync.delete_object = orig_delete_object
def test_container_sync_row_put(self):
orig_shuffle = sync.shuffle
orig_put_object = sync.put_object
orig_direct_get_object = sync.direct_get_object
try:
sync.shuffle = lambda x: x
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
self.assertEquals(sync_to, 'http://sync/to/path')
self.assertEquals(name, 'object')
self.assertEquals(headers, {'x-container-sync-key': 'key',
'x-timestamp': '1.2',
'other-header': 'other header value',
'etag': 'etagvalue'})
self.assertEquals(contents.read(), 'contents')
self.assertEquals(proxy, 'http://proxy')
sync.put_object = fake_put_object
cs = sync.ContainerSync({}, container_ring=FakeRing(),
object_ring=FakeRing())
cs.proxy = 'http://proxy'
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
return ({'other-header': 'other header value',
'etag': '"etagvalue"', 'x-timestamp': '1.2'},
iter('contents'))
sync.direct_get_object = fake_direct_get_object
# Success as everything says it worked
self.assertTrue(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 1)
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
return ({'date': 'date value',
'last-modified': 'last modified value',
'x-timestamp': '1.2',
'other-header': 'other header value',
'etag': '"etagvalue"'},
iter('contents'))
sync.direct_get_object = fake_direct_get_object
# Success as everything says it worked, also checks 'date' and
# 'last-modified' headers are removed and that 'etag' header is
# stripped of double quotes.
self.assertTrue(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
exc = []
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
exc.append(Exception('test exception'))
raise exc[-1]
sync.direct_get_object = fake_direct_get_object
# Fail due to completely unexpected exception
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assertEquals(len(exc), 3)
self.assertEquals(str(exc[-1]), 'test exception')
exc = []
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
exc.append(ClientException('test client exception'))
raise exc[-1]
sync.direct_get_object = fake_direct_get_object
# Fail due to all direct_get_object calls failing
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assertEquals(len(exc), 3)
self.assertEquals(str(exc[-1]), 'test client exception')
def fake_direct_get_object(node, part, account, container, obj,
resp_chunk_size=1):
return ({'other-header': 'other header value',
'x-timestamp': '1.2', 'etag': '"etagvalue"'},
iter('contents'))
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
raise ClientException('test client exception', http_status=401)
sync.direct_get_object = fake_direct_get_object
sync.put_object = fake_put_object
cs.logger = FakeLogger()
# Fail due to 401
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assert_(re.match('Unauth ',
cs.logger.log_dict['info'][0][0][0]))
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
raise ClientException('test client exception', http_status=404)
sync.put_object = fake_put_object
# Fail due to 404
cs.logger = FakeLogger()
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assert_(re.match('Not found ',
cs.logger.log_dict['info'][0][0][0]))
def fake_put_object(sync_to, name=None, headers=None,
contents=None, proxy=None):
raise ClientException('test client exception', http_status=503)
sync.put_object = fake_put_object
# Fail due to 503
self.assertFalse(cs.container_sync_row({'deleted': False,
'name': 'object', 'created_at': '1.2'}, 'http://sync/to/path',
'key', FakeContainerBroker('broker'), {'account': 'a',
'container': 'c'}))
self.assertEquals(cs.container_puts, 2)
self.assertTrue(
cs.logger.log_dict['exception'][0][0][0].startswith(
'ERROR Syncing '))
finally:
sync.shuffle = orig_shuffle
sync.put_object = orig_put_object
sync.direct_get_object = orig_direct_get_object
if __name__ == '__main__':
unittest.main()
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'GroupBallotPositionDocEvent'
db.delete_table('doc_groupballotpositiondocevent')
# Adding model 'BallotType'
db.create_table('doc_ballottype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('doc_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['name.DocTypeName'], null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('question', self.gf('django.db.models.fields.TextField')(blank=True)),
('used', self.gf('django.db.models.fields.BooleanField')(default=True)),
('order', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('doc', ['BallotType'])
# Adding M2M table for field positions on 'BallotType'
db.create_table('doc_ballottype_positions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('ballottype', models.ForeignKey(orm['doc.ballottype'], null=False)),
('ballotpositionname', models.ForeignKey(orm['name.ballotpositionname'], null=False))
))
db.create_unique('doc_ballottype_positions', ['ballottype_id', 'ballotpositionname_id'])
# Adding model 'BallotDocEvent'
db.create_table('doc_ballotdocevent', (
('docevent_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['doc.DocEvent'], unique=True, primary_key=True)),
('ballot_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['doc.BallotType'])),
))
db.send_create_signal('doc', ['BallotDocEvent'])
# Adding field 'BallotPositionDocEvent.ballot'
db.add_column('doc_ballotpositiondocevent', 'ballot', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['doc.BallotDocEvent'], null=True), keep_default=False)
def backwards(self, orm):
# Adding model 'GroupBallotPositionDocEvent'
db.create_table('doc_groupballotpositiondocevent', (
('block_comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('ad', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['person.Person'])),
('comment_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('block_comment_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('pos', self.gf('django.db.models.fields.related.ForeignKey')(default='norecord', to=orm['name.GroupBallotPositionName'])),
('docevent_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['doc.DocEvent'], unique=True, primary_key=True)),
))
db.send_create_signal('doc', ['GroupBallotPositionDocEvent'])
# Deleting model 'BallotType'
db.delete_table('doc_ballottype')
# Removing M2M table for field positions on 'BallotType'
db.delete_table('doc_ballottype_positions')
# Deleting model 'BallotDocEvent'
db.delete_table('doc_ballotdocevent')
# Deleting field 'BallotPositionDocEvent.ballot'
db.delete_column('doc_ballotpositiondocevent', 'ballot_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'doc.ballotdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'BallotDocEvent', '_ormbases': ['doc.DocEvent']},
'ballot_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.BallotType']"}),
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'})
},
'doc.ballotpositiondocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'BallotPositionDocEvent', '_ormbases': ['doc.DocEvent']},
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'ballot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.BallotDocEvent']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'comment_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'discuss': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'discuss_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'pos': ('django.db.models.fields.related.ForeignKey', [], {'default': "'norecord'", 'to': "orm['name.BallotPositionName']"})
},
'doc.ballottype': {
'Meta': {'ordering': "['order']", 'object_name': 'BallotType'},
'doc_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'positions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.BallotPositionName']", 'symmetrical': 'False', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'doc.docalias': {
'Meta': {'object_name': 'DocAlias'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'doc.docevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'DocEvent'},
'by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'desc': ('django.db.models.fields.TextField', [], {}),
'doc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'doc.dochistory': {
'Meta': {'object_name': 'DocHistory'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ad_dochistory_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['person.Email']", 'symmetrical': 'False', 'through': "orm['doc.DocHistoryAuthor']", 'blank': 'True'}),
'doc': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'history_set'", 'to': "orm['doc.Document']"}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IntendedStdLevelName']", 'null': 'True', 'blank': 'True'}),
'internal_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notify': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.DocAlias']", 'symmetrical': 'False', 'through': "orm['doc.RelatedDocHistory']", 'blank': 'True'}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shepherd_dochistory_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StdLevelName']", 'null': 'True', 'blank': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StreamName']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['name.DocTagName']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'})
},
'doc.dochistoryauthor': {
'Meta': {'ordering': "['document', 'order']", 'object_name': 'DocHistoryAuthor'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocHistory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {})
},
'doc.docreminder': {
'Meta': {'object_name': 'DocReminder'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'due': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocReminderTypeName']"})
},
'doc.document': {
'Meta': {'object_name': 'Document'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ad_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['person.Email']", 'symmetrical': 'False', 'through': "orm['doc.DocumentAuthor']", 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'intended_std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IntendedStdLevelName']", 'null': 'True', 'blank': 'True'}),
'internal_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notify': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'reversely_related_document_set'", 'blank': 'True', 'through': "orm['doc.RelatedDocument']", 'to': "orm['doc.DocAlias']"}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shepherd_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StdLevelName']", 'null': 'True', 'blank': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StreamName']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['name.DocTagName']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'})
},
'doc.documentauthor': {
'Meta': {'ordering': "['document', 'order']", 'object_name': 'DocumentAuthor'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'doc.initialreviewdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'InitialReviewDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'doc.lastcalldocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'LastCallDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'doc.newrevisiondocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'NewRevisionDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'doc.relateddochistory': {
'Meta': {'object_name': 'RelatedDocHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocRelationshipName']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocHistory']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reversely_related_document_history_set'", 'to': "orm['doc.DocAlias']"})
},
'doc.relateddocument': {
'Meta': {'object_name': 'RelatedDocument'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocRelationshipName']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocAlias']"})
},
'doc.state': {
'Meta': {'ordering': "['type', 'order']", 'object_name': 'State'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'next_states': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'previous_states'", 'symmetrical': 'False', 'to': "orm['doc.State']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.StateType']"}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'doc.statetype': {
'Meta': {'object_name': 'StateType'},
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'})
},
'doc.telechatdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'TelechatDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'returning_item': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'telechat_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'doc.writeupdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'WriteupDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'group.group': {
'Meta': {'object_name': 'Group'},
'acronym': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True', 'blank': 'True'}),
'charter': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'chartered_group'", 'unique': 'True', 'null': 'True', 'to': "orm['doc.Document']"}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_archive': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'list_email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'list_subscribe': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupStateName']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupTypeName']", 'null': 'True'}),
'unused_states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False'}),
'unused_tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.DocTagName']", 'symmetrical': 'False'})
},
'name.ballotpositionname': {
'Meta': {'ordering': "['order']", 'object_name': 'BallotPositionName'},
'blocking': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.docrelationshipname': {
'Meta': {'ordering': "['order']", 'object_name': 'DocRelationshipName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.docremindertypename': {
'Meta': {'ordering': "['order']", 'object_name': 'DocReminderTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.doctagname': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTagName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.doctypename': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.groupstatename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupStateName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.grouptypename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.intendedstdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'IntendedStdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.stdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'StdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.streamname': {
'Meta': {'ordering': "['order']", 'object_name': 'StreamName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'person.email': {
'Meta': {'object_name': 'Email'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ascii': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ascii_short': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['doc']
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import george
from george.kernels import ExpSquaredKernel, RBFKernel
from ktransit import LCModel, FitTransit
from scipy import optimize
def ret_opt(params,time,flux,yerr):
period, T0, rprs, impact, noiseA, noiseW = params
M = LCModel()
M.add_star(rho=0.0073,ld1=0.5,ld2=0.4)
M.add_planet(T0=T0,period=period,
impact=impact,rprs=rprs)
M.add_data(time=time)
resid = flux - M.transitmodel
kernel = noiseA * ExpSquaredKernel(noiseW)
gp = george.GaussianProcess(kernel)
lnlike = 0.
for i in np.arange(len(time) // 1000)[0:10]:
section = np.arange(i*1000,i*1000 + 1000)
gp.compute(time[section], yerr[section])
lnlike += gp.lnlikelihood(resid[section])
return -lnlike
def ret_product(params,time,flux,yerr):
(period, T0, rprs, impact, noiseA1, noiseW1,
noiseA2, noiseW2, noiseM2) = params
M = LCModel()
M.add_star(rho=0.0073,ld1=0.5,ld2=0.4)
M.add_planet(T0=T0,period=period,
impact=impact,rprs=rprs)
M.add_data(time=time)
resid = flux - M.transitmodel
kernel = (((noiseA1 * ExpSquaredKernel(noiseW1)) *
(noiseA2 * ExpSquaredKernel(noiseW2))) + noiseM2)
gp = george.GaussianProcess(kernel)
lnlike = 0.
for i in np.arange(len(time) // 1000)[0:10]:
section = np.arange(i*1000,i*1000 + 1000)
gp.compute(time[section], yerr[section])
lnlike += gp.lnlikelihood(resid[section])
return -lnlike
def ret_sum(params,time,flux,yerr):
(period, T0, rprs, impact,
alb,occ,ell,ecosw, esinw,
noiseA1, noiseW1,
noiseA2, noiseW2 )= params
M = LCModel()
M.add_star(rho=0.0073,ld1=0.5,ld2=0.4)
M.add_planet(T0=T0,period=period,
impact=impact,rprs=rprs,
alb=alb,occ=occ,ell=ell,
ecosw=ecosw, esinw=esinw)
M.add_data(time=time)
resid = flux - M.transitmodel
kernel = ((noiseA1**2 * RBFKernel(noiseW1)) +
(noiseA2**2 * RBFKernel(noiseW2)))
gp = george.GaussianProcess(kernel)
lnlike = 0.
for i in np.arange(len(time) // 1000)[0:10]:
section = np.arange(i*1000,i*1000 + 1000)
gp.compute(time[section], yerr[section])
lnlike += gp.lnlikelihood(resid[section])
return -lnlike
def ret_sum_ln(params,time,flux,yerr):
(period, T0, rprs, impact,
alb,occ,ell,ecosw, esinw,
lnnoiseA1, lnnoiseW1,
lnnoiseA2, lnnoiseW2 )= params
M = LCModel()
M.add_star(rho=0.0073,ld1=0.5,ld2=0.4)
M.add_planet(T0=T0,period=period,
impact=impact,rprs=rprs,
alb=alb,occ=occ,ell=ell,
ecosw=ecosw, esinw=esinw)
M.add_data(time=time)
resid = flux - M.transitmodel
kernel = ((np.exp(lnnoiseA1)**2 * RBFKernel(np.exp(lnnoiseW1))) +
(np.exp(lnnoiseA2)**2 * RBFKernel(np.exp(lnnoiseW2))))
gp = george.GaussianProcess(kernel)
lnlike = 0.
for i in np.arange(len(time) // 1000)[0:10]:
section = np.arange(i*1000,i*1000 + 1000)
gp.compute(time[section], yerr[section])
lnlike += gp.lnlikelihood(resid[section])
return -lnlike
def ret_simple_ln(params,time,flux,yerr):
(period, T0, rprs, impact,
alb,occ,ell,ecosw, esinw,
lnnoiseA1, lnnoiseW1)= params
M = LCModel()
M.add_star(rho=0.0073,ld1=0.5,ld2=0.4)
M.add_planet(T0=T0,period=period,
impact=impact,rprs=rprs,
alb=alb,occ=occ,ell=ell,
ecosw=ecosw, esinw=esinw)
M.add_data(time=time)
resid = flux - M.transitmodel
kernel = (np.exp(lnnoiseA1)**2 * RBFKernel(np.exp(lnnoiseW1)))
gp = george.GaussianProcess(kernel)
lnlike = 0.
for i in np.arange(len(time) // 1000):
section = np.arange(i*1000,i*1000 + 1000)
gp.compute(time[section], yerr[section])
lnlike += gp.lnlikelihood(resid[section])
return -lnlike
def ret_simple(params,time,flux,yerr):
(period, T0, rprs, impact,
alb,occ,ell,ecosw, esinw,
noiseA1, noiseW1)= params
M = LCModel()
M.add_star(rho=0.0073,ld1=0.5,ld2=0.4)
M.add_planet(T0=T0,period=period,
impact=impact,rprs=rprs,
alb=alb,occ=occ,ell=ell,
ecosw=ecosw, esinw=esinw)
M.add_data(time=time)
resid = flux - M.transitmodel
kernel = (noiseA1**2 * RBFKernel(noiseW1))
gp = george.GaussianProcess(kernel)
lnlike = 0.
for i in np.arange(len(time) // 300):
section = np.arange(i*300,i*300 + 300)
gp.compute(time[section], yerr[section])
lnlike += gp.lnlikelihood(resid[section])
return -lnlike
def ret_simplest(params,time,flux,yerr,fixed):
(period, T0, rprs, impact,
alb,occ,ell,ecosw, esinw) = fixed
(noiseA1, noiseW1)= params
M = LCModel()
M.add_star(rho=0.0073,ld1=0.5,ld2=0.4)
M.add_planet(T0=T0,period=period,
impact=impact,rprs=rprs,
alb=alb,occ=occ,ell=ell,
ecosw=ecosw, esinw=esinw)
M.add_data(time=time)
resid = flux - M.transitmodel
kernel = (noiseA1**2 * RBFKernel(noiseW1))
gp = george.GaussianProcess(kernel)
lnlike = 0.
for i in np.arange(len(time) // 300):
section = np.arange(i*300,i*300 + 300)
gp.compute(time[section], yerr[section])
lnlike += gp.lnlikelihood(resid[section])
return -lnlike
if __name__ == '__main__':
data = np.genfromtxt('/Users/tom/Projects/koi2133/data/lc.dat').T
time = data[0] #test on shorter data set
flux = data[1]
ferr = (data[2] / 4.)
product = False
sumkernel = False
simple = False
even_simpler = True
if not product and not sumkernel and not simple and not even_simpler:
## vary the period, T0, rprs, b, noiseA, noiseW
bounds = ((None,None),(None,None),
(0.01,0.04),(0.00001,0.999),(None,None),(None,None))
guess = (6.24658,136.3966,0.02255,0.5,0.05,0.01)
lsqout = optimize.fmin_l_bfgs_b(ret_opt,guess,
args=(time,flux,ferr),approx_grad=True,bounds=bounds)
period, T0, rprs, impact, noiseA, noiseW = lsqout[0]
kernel = noiseA * ExpSquaredKernel(noiseW)
gp = george.GaussianProcess(kernel)
elif product:
## vary the period, T0, rprs, b, noiseA1, noiseW1,
## noiseA2, noiseW2, noiseM2
bounds = ((None,None),(None,None),
(0.01,0.04),(0.00001,0.999),(None,None),(None,None),
(None,None),(None,None),(None,None))
guess = (6.24658,136.3966,0.02255,0.5,0.01,6.,
0.01,0.12,0.0)
lsqout = optimize.fmin_l_bfgs_b(ret_product,guess,
args=(time,flux,ferr),approx_grad=True,bounds=bounds,m=100,factr=1.E6)
(period, T0, rprs, impact, noiseA1, noiseW1,
noiseA2, noiseW2, noiseM2) = lsqout[0]
kernel = (((noiseA1 * ExpSquaredKernel(noiseW1)) *
(noiseA2 * ExpSquaredKernel(noiseW2))) + noiseM2)
gp = george.GaussianProcess(kernel)
elif sumkernel:
## vary the period, T0, rprs, b,
## alb,occ,ell,ecosw, esinw
## noiseA1, noiseW1,
## noiseA2, noiseW2
bounds = ((None,None),(None,None),
(0.01,0.03),(0.00001,0.999),
(None,None),(None,None),
(None,None),(None,None),(None,None),
(None,None),(None,None),
(None,None),(None,None))
guess = (6.24658,136.3966,0.02255,0.8,
30., 30., 60., 0.0, 0.0,
np.log(5.E-4), np.log(0.07),
np.log(2.E-4), np.log(3.0))
lsqout = optimize.fmin_l_bfgs_b(ret_sum_ln,guess,
args=(time,flux,ferr),approx_grad=True,bounds=bounds,m=300,factr=1.E7)
(period, T0, rprs, impact,
alb,occ,ell,ecosw, esinw,
lnnoiseA1, lnnoiseW1,
lnnoiseA2, lnnoiseW2) = lsqout[0]
kernel = ((np.exp(lnnoiseA1)**2 * RBFKernel(np.exp(lnnoiseW1))) +
(np.exp(lnnoiseA2)**2 * RBFKernel(np.exp(lnnoiseW2))))
gp = george.GaussianProcess(kernel)
elif simple:
bounds = ((None,None),(None,None),
(0.01,0.03),(0.00001,0.999),
(None,None),(None,None),
(None,None),(None,None),(None,None),
(None,None),(None,None))
guess = (6.2465796,136.39661,0.02255,0.4,
30., 30., 60., 0.0, 0.0,
5.E-4, 0.07)
lsqout = optimize.fmin_l_bfgs_b(ret_simple,guess,
args=(time,flux,ferr),approx_grad=True,bounds=bounds,
m=300,factr=1.E7)
(period, T0, rprs, impact,
alb,occ,ell,ecosw, esinw,
noiseA1, noiseW1) = lsqout[0]
kernel = (noiseA1**2 * RBFKernel(noiseW1))
gp = george.GaussianProcess(kernel)
elif even_simpler:
bounds = (
(None,None),(None,None))
guess = (5.E-4, 0.07)
fixed = (6.2465796,136.39661,0.02255,0.8,
13., 30., 45., 0.0, 0.0)
(period, T0, rprs, impact,
alb,occ,ell,ecosw, esinw) = fixed
lsqout = optimize.fmin_l_bfgs_b(ret_simplest,guess,
args=(time,flux,ferr,fixed),approx_grad=True,bounds=bounds,
m=300,factr=1.E7)
(noiseA1, noiseW1) = lsqout[0]
kernel = (noiseA1**2 * RBFKernel(noiseW1))
gp = george.GaussianProcess(kernel)
#transit fit
M = LCModel()
M.add_star(rho=0.0073,ld1=0.5,ld2=0.4)
M.add_planet(T0=T0,period=period,
impact=impact,rprs=rprs,
alb=alb,occ=occ,ell=ell,
ecosw=ecosw, esinw=esinw)
M.add_data(time=time)
#sample = np.array([])
#for i in np.arange(len(time) // 1000):
# section = np.arange(i*1000,i*1000 + 1000)
# gp.compute(time[section], ferr[section])
# sample = np.r_[sample,gp.sample_conditional(
# flux[section] - M.transitmodel[section],time[section])]
#gp.compute(time, ferr)
#samples = gp.sample_conditional(flux, time, N=100)
print('here')
sample = np.array([])
for i in np.arange(len(time) // 300):
section = np.arange(i*300,i*300 + 300)
gp.compute(time[section], ferr[section])
sample = np.r_[sample,gp.predict(
flux[section] - M.transitmodel[section],time[section])[0]]
|
|
"""
Base/mixin classes for the spatial backend database operations and the
`SpatialRefSys` model the backend.
"""
import re
from django.contrib.gis import gdal
from django.utils import six
class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
distance_functions = {}
geometry_functions = {}
geometry_operators = {}
geography_operators = {}
geography_functions = {}
gis_terms = {}
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geography type?
geography = False
area = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
bounding_circle = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
collect = False
extent = False
extent3d = False
make_line = False
unionagg = False
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
if isinstance(name, six.text_type):
name = name.encode('ascii')
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError
# Spatial SQL Construction
def spatial_aggregate_sql(self, agg):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_lookup_sql(self, lvalue, lookup_type, value, field):
raise NotImplementedError
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError
def spatial_ref_sys(self):
raise NotImplementedError
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception as e:
msg = e
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception as e:
msg = e
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m: return (float(m.group('major')), float(m.group('flattening')))
else: return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrive the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __unicode__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return six.text_type(self.srs)
except:
return six.text_type(self.wkt)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cinderclient.v1.contrib import list_extensions as cinder_list_extensions
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
# API static values
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
VERSIONS = base.APIVersionManager("volume", preferred_version=1)
try:
from cinderclient.v1 import client as cinder_client_v1
VERSIONS.load_supported_version(1, {"client": cinder_client_v1,
"version": 1})
except ImportError:
pass
try:
from cinderclient.v2 import client as cinder_client_v2
VERSIONS.load_supported_version(2, {"client": cinder_client_v2,
"version": 2})
except ImportError:
pass
class BaseCinderAPIResourceWrapper(base.APIResourceWrapper):
@property
def name(self):
# If a volume doesn't have a name, use its id.
return (getattr(self._apiresource, 'name', None) or
getattr(self._apiresource, 'display_name', None) or
getattr(self._apiresource, 'id', None))
@property
def description(self):
return (getattr(self._apiresource, 'description', None) or
getattr(self._apiresource, 'display_description', None))
class Volume(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status', 'created_at',
'volume_type', 'availability_zone', 'imageRef', 'bootable'
'snapshot_id', 'source_volid', 'attachments', 'tenant_name',
'os-vol-host-attr:host', 'os-vol-tenant-attr:tenant_id',
'metadata']
class VolumeSnapshot(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status',
'created_at', 'volume_id',
'os-extended-snapshot-attributes:project_id']
def cinderclient(request):
api_version = VERSIONS.get_active_version()
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
cinder_url = ""
try:
# The cinder client assumes that the v2 endpoint type will be
# 'volumev2'. However it also allows 'volume' type as a
# fallback if the requested version is 2 and there is no
# 'volumev2' endpoint.
if api_version['version'] == 2:
try:
cinder_url = base.url_for(request, 'volumev2')
except exceptions.ServiceCatalogException:
LOG.warning("Cinder v2 requested but no 'volumev2' service "
"type available in Keystone catalog. Falling back "
"to 'volume'.")
if cinder_url == "":
cinder_url = base.url_for(request, 'volume')
except exceptions.ServiceCatalogException:
LOG.debug('no volume service configured.')
return None
LOG.debug('cinderclient connection created using token "%s" and url "%s"' %
(request.user.token.id, cinder_url))
c = api_version['client'].Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=cinder_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = cinder_url
return c
def _replace_v2_parameters(data):
if VERSIONS.active < 2:
data['display_name'] = data['name']
data['display_description'] = data['description']
del data['name']
del data['description']
return data
def volume_list(request, search_opts=None):
"""To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
if c_client is None:
return []
return [Volume(v) for v in c_client.volumes.list(search_opts=search_opts)]
def volume_get(request, volume_id):
volume_data = cinderclient(request).volumes.get(volume_id)
for attachment in volume_data.attachments:
if "server_id" in attachment:
instance = nova.server_get(request, attachment['server_id'])
attachment['instance_name'] = instance.name
else:
# Nova volume can occasionally send back error'd attachments
# the lack a server_id property; to work around that we'll
# give the attached instance a generic name.
attachment['instance_name'] = _("Unknown instance")
return Volume(volume_data)
def volume_create(request, size, name, description, volume_type,
snapshot_id=None, metadata=None, image_id=None,
availability_zone=None, source_volid=None):
data = {'name': name,
'description': description,
'volume_type': volume_type,
'snapshot_id': snapshot_id,
'metadata': metadata,
'imageRef': image_id,
'availability_zone': availability_zone,
'source_volid': source_volid}
data = _replace_v2_parameters(data)
volume = cinderclient(request).volumes.create(size, **data)
return Volume(volume)
def volume_extend(request, volume_id, new_size):
return cinderclient(request).volumes.extend(volume_id, new_size)
def volume_delete(request, volume_id):
return cinderclient(request).volumes.delete(volume_id)
def volume_update(request, volume_id, name, description):
vol_data = {'name': name,
'description': description}
vol_data = _replace_v2_parameters(vol_data)
return cinderclient(request).volumes.update(volume_id,
**vol_data)
def volume_snapshot_get(request, snapshot_id):
snapshot = cinderclient(request).volume_snapshots.get(snapshot_id)
return VolumeSnapshot(snapshot)
def volume_snapshot_list(request):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeSnapshot(s) for s in c_client.volume_snapshots.list()]
def volume_snapshot_create(request, volume_id, name,
description=None, force=False):
data = {'name': name,
'description': description,
'force': force}
data = _replace_v2_parameters(data)
return VolumeSnapshot(cinderclient(request).volume_snapshots.create(
volume_id, **data))
def volume_snapshot_delete(request, snapshot_id):
return cinderclient(request).volume_snapshots.delete(snapshot_id)
def tenant_quota_get(request, tenant_id):
c_client = cinderclient(request)
if c_client is None:
return base.QuotaSet()
return base.QuotaSet(c_client.quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
return cinderclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(cinderclient(request).quotas.defaults(tenant_id))
def volume_type_list(request):
return cinderclient(request).volume_types.list()
def volume_type_create(request, name):
return cinderclient(request).volume_types.create(name)
def volume_type_delete(request, volume_type_id):
return cinderclient(request).volume_types.delete(volume_type_id)
def tenant_absolute_limits(request):
limits = cinderclient(request).limits.get().absolute
limits_dict = {}
for limit in limits:
# -1 is used to represent unlimited quotas
if limit.value == -1:
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def geo_tag_list(request):
return cinderclient(request).geo_tags.list()
def geo_tag_show(request, geo_tag_id):
return cinderclient(request).geo_tags.show(geo_tag_id)
def service_list(request, host=None, binary=None):
filter = {}
if host:
filter['host'] = host
if binary:
filter['binary'] = binary
return cinderclient(request).services.list(**filter)
def availability_zone_list(request, detailed=False):
return cinderclient(request).availability_zones.list(detailed=detailed)
@memoized
def list_extensions(request):
return cinder_list_extensions.ListExtManager(cinderclient(request))\
.show_all()
@memoized
def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name.
"""
extensions = list_extensions(request)
for extension in extensions:
if extension.name == extension_name:
return True
return False
|
|
"""
Handles routing and form processing for the PowerToken Flask app.\n
Created by Jasmine Jones in 11/2017.\n
Modified by Abigail Franz, J. Jones. Last on July 2019.
"""
import logging, sys
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
from datetime import datetime
import json
from flask import redirect, render_template, request, url_for
from werkzeug.urls import url_parse
from werkzeug.datastructures import MultiDict
from powertoken import app
import powertoken.db_util as db_util
import powertoken.api_util as api_util
from powertoken.forms import UserLoginForm, UserWcLoginForm, UserActivityForm
@app.route("/createDB")
def create_db():
from powertoken import db
db.create_all()
return render_template("user_home.html", username="db")
@app.route("/")
@app.route("/index")
@app.route("/home")
def user_home():
#return "Hello worlds!"
username = request.args.get("username") # or "TEST"
# If the user isn't logged in, redirect to the PowerToken login.
if username is None:
return redirect(url_for("user_login"))
# If the user is logged in, show the welcome page.
return render_template("user_home.html", username=username)
@app.route("/info")
def study_info():
return render_template("study_info.html")
@app.route("/user_login", methods=["GET", "POST"])
def user_login():
form = UserLoginForm()
#POST ...
if form.validate_on_submit():
username = form.username.data
#if user not in database, add new user
if not db_util.pt_userExists(username):
db_util.pt_addUser(username)
#user is in database, but tokens are missing, redirect to login
if not db_util.pt_userProfileComplete(username):
return redirect(url_for("user_wc_login", username=username))
#user exists with all API info intact, bypass login
return render_template("user_home.html", username=username)
# GET: Render the PowerToken login page.
error = request.args.get("error")
if error:
return render_template("user_login.html", form=form, error=error)
else:
return render_template("user_login.html", form=form)
@app.route("/user_wc_login", methods=["GET", "POST"])
def user_wc_login():
form = UserWcLoginForm()
# POST: Process the WEconnect login form.
if form.validate_on_submit():
username = request.args.get("username")
# If for whatever reason the username wasn't saved, return to the
# original PowerToken login page.
if username is None:
return redirect(url_for("user_login", error="Invalid username"))
# Get the user with that username from the database. go back to login page if
# invalid user. This shouldn't happen but just in case.
if not db_util.pt_userExists(username):
return redirect(url_for("user_login", error="Invalid user"))
# If everything is okay so far, get WEconnect info from the form and
# login to external WEconnect server.
email = form.email.data
password = form.password.data
success, result = api_util.login_to_wc(email, password)
# If the username or password is incorrect, prompt the user to re-enter
# credentials.
if not success:
error = "Incorrect username or password"
return render_template("user_wc_login.html", form=form, error=error)
# If the login was successful, store the WEconnect ID and access token
# in the database, pull the user's WEconnect activities into the
# database, and redirect to the Fitbit login.
wc_id = result[0]
wc_token = result[1]
activities = api_util.get_wc_activities(wc_id, wc_token)
errormsg = db_util.wc_addInfo(username, wc_id, wc_token, activities)
if errormsg is None:
logging.info("Added User Info for {}:{}".format(username, wc_id))
else:
return render_template("user_wc_login.html", form=form, error=errormsg)
#TODO: change order of execution to get weights added here, before FB login.
#Get Fitbit Token
return redirect(url_for("user_fb_login", username=username))
# GET: Render the WEconnect login page.
return render_template("user_wc_login.html", form=form)
@app.route("/user_fb_login", methods=["GET", "POST"])
def user_fb_login():
'''
fyi production callback url: https://powertoken.grouplens.org/fb_login
test callback url: http://localhost:5000/user_fb_login
'''
# POST: Process response from external Fitbit server.
username = request.args.get("username")
#return render_template("user_home.html", username=username)
if request.method == "POST":
# Extract the Fitbit token and username from the response data.
fb_token, username = api_util.complete_fb_login(request.data)
# If the username wasn't saved, return to the original PowerToken login
# page.
logging.debug("POST for Username {}".format(username))
if username is None:
return redirect(url_for("user_login", error="No username given"))
# Get the user with that username from the database. go back to login page if
# invalid user. This shouldn't happen but just in case.
if not db_util.pt_userExists(username):
return redirect(url_for("user_login", error="Invalid username. Please create user profile"))
#ADD INFO TO DB
db_util.fb_addInfo(username, fb_token)
#UPDATE USERS STEP GOAL IN FITBIT
api_util.fb_updateUserGoal(fb_token)
return render_template("user_home.html", username=username)
# GET: Render Fitbit page, which redirects to external login.
elif request.method == "GET":
username = request.args.get("username")
return render_template("user_fb_login.html", username=username)
@app.route("/user_activities", methods=["GET", "POST"])
def user_activities():
''' NOTE: This function is called via js redirect from user_fb_login.html'''
username = request.args.get("username")
# If for whatever reason the username wasn't saved, go back to the
# original login screen.
if username is None:
return redirect(url_for("user_login", error="Invalid username"))
if not db_util.pt_userExists(username):
return redirect(url_for("user_login", error="Invalid username"))
form = UserActivityForm()
# GET: Set up the form for activity weighting and render the page.
if request.method == "GET":
activities = db_util.wc_getUserActivities(username) #returns list of MultiDict
for a in activities:
form.activities.append_entry(data=a)
return render_template("user_activities.html", form=form)
# POST: Process the submitted activity weighting form.
elif request.method == "POST":
logging.debug(form.activities.entries)
act_weights = []
for entry in form.activities.entries:
# Strip '[' and ']' characters added by MultiDict representation
entry_id = entry.wc_act_id.data[1:-1]
#create and send a tuple of wc_act_id, weight
act_weights.append((entry_id, entry.weight.data))
db_util.wc_addActivityWeight(username, act_weights)
return redirect(url_for("user_home", username=username))
'''
user = User.query.filter_by(username=username).first()
form = UserActivityForm()
# POST: Process the submitted activity weighting form.
if request.method == "POST":
for entry in form.activities.entries:
# Strip '[' and ']' characters added by MultiDict representation
entry_id = entry.wc_act_id.data[1:-1]
activity = user.activities.filter_by(wc_act_id=entry_id).first()
activity.weight = entry.weight.data
db.session.commit()
return redirect(url_for("user_home", username=username))
'''
# GET: Set up the form for activity weighting and render the page.
''' elif request.method == "GET":
for act in user.activities.all():
# Don't show the user expired activities (but they still need to be
# in the database).
if act.expiration < datetime.now():
continue
# The append_entry method only takes a MultiDict data structure.
d = MultiDict([("wc_act_id", act.wc_act_id), ("act_name", act.name),
("weight", act.weight)])
form.activities.append_entry(data=d)
return render_template("user_activities.html", form=form)'''
@app.teardown_appcontext
def shutdown_session(exception=None):
db_util.close_session()
# define the visualization related stuff here
@app.route("/overview/<name>")
def user_overview(name):
dict = db_util.viz_dataDict(name)
return render_template("overview_viz.html", activity_data= dict)
@app.route("/practice/<name>")
def practice(name):
#username = request.args.get("username")
username = name
dict = db_util.viz_dataDict(name)
return render_template("overview_viz.html", activity_data= dict)
|
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import datetime
import common
import strategy_test
from engine.barfeed import yahoofeed
from engine.barfeed import membf
from engine.stratanalyzer import drawdown
from engine import broker
from engine import bar
def build_bars_from_closing_prices(closingPrices):
ret = []
nextDateTime = datetime.datetime.now()
for closePrice in closingPrices:
bar_ = bar.BasicBar(nextDateTime, closePrice, closePrice, closePrice, closePrice, closePrice, closePrice, bar.Frequency.DAY)
ret.append(bar_)
nextDateTime = nextDateTime + datetime.timedelta(days=1)
return ret
class TestBarFeed(membf.BarFeed):
def barsHaveAdjClose(self):
raise NotImplementedError()
class DDHelperCase(common.TestCase):
def testNoDrawDown1(self):
helper = drawdown.DrawDownHelper()
helper.update(datetime.datetime.now(), 10, 10)
self.assertEqual(helper.getMaxDrawDown(), 0)
self.assertEqual(helper.getCurrentDrawDown(), 0)
self.assertEqual(helper.getDuration(), datetime.timedelta())
def testNoDrawDown2(self):
helper = drawdown.DrawDownHelper()
dt = datetime.datetime.now()
helper.update(dt, 10, 10)
self.assertEqual(helper.getMaxDrawDown(), 0)
self.assertEqual(helper.getCurrentDrawDown(), 0)
self.assertEqual(helper.getDuration(), datetime.timedelta())
dt += datetime.timedelta(days=1)
helper.update(dt, 10.01, 10.01)
self.assertEqual(helper.getMaxDrawDown(), 0)
self.assertEqual(helper.getCurrentDrawDown(), 0)
self.assertEqual(helper.getDuration(), datetime.timedelta())
dt += datetime.timedelta(days=1)
helper.update(dt, 11, 11)
self.assertEqual(helper.getMaxDrawDown(), 0)
self.assertEqual(helper.getCurrentDrawDown(), 0)
self.assertEqual(helper.getDuration(), datetime.timedelta())
def testDrawDown1(self):
helper = drawdown.DrawDownHelper()
dt = datetime.datetime.now()
helper.update(dt, 10, 10)
self.assertEqual(helper.getMaxDrawDown(), 0)
self.assertEqual(helper.getCurrentDrawDown(), 0)
self.assertEqual(helper.getDuration(), datetime.timedelta())
dt += datetime.timedelta(days=1)
helper.update(dt, 5, 5)
self.assertEqual(helper.getMaxDrawDown(), -0.5)
self.assertEqual(helper.getCurrentDrawDown(), -0.5)
self.assertEqual(helper.getDuration(), datetime.timedelta(days=1))
dt += datetime.timedelta(days=1)
helper.update(dt, 4, 4)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(helper.getCurrentDrawDown(), -0.6)
self.assertEqual(helper.getDuration(), datetime.timedelta(days=2))
dt += datetime.timedelta(days=1)
helper.update(dt, 4, 4)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(helper.getCurrentDrawDown(), -0.6)
self.assertEqual(helper.getDuration(), datetime.timedelta(days=3))
dt += datetime.timedelta(days=1)
helper.update(dt, 5, 5)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(helper.getCurrentDrawDown(), -0.5)
self.assertEqual(helper.getDuration(), datetime.timedelta(days=4))
dt += datetime.timedelta(days=1)
helper.update(dt, 9, 9)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(helper.getCurrentDrawDown(), -0.1)
self.assertEqual(helper.getDuration(), datetime.timedelta(days=5))
dt += datetime.timedelta(days=1)
helper.update(dt, 9.9, 9.9)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(round(helper.getCurrentDrawDown(), 2), -0.01)
self.assertEqual(helper.getDuration(), datetime.timedelta(days=6))
def testDrawDown2(self):
helper = drawdown.DrawDownHelper()
dt = datetime.datetime.now()
helper.update(dt, 10, 10)
self.assertEqual(helper.getMaxDrawDown(), 0)
self.assertEqual(helper.getCurrentDrawDown(), 0)
self.assertEqual(helper.getDuration(), datetime.timedelta())
dt += datetime.timedelta(minutes=1)
helper.update(dt, 5, 5)
self.assertEqual(helper.getMaxDrawDown(), -0.5)
self.assertEqual(helper.getCurrentDrawDown(), -0.5)
self.assertEqual(helper.getDuration(), datetime.timedelta(minutes=1))
dt += datetime.timedelta(minutes=1)
helper.update(dt, 4, 4)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(helper.getCurrentDrawDown(), -0.6)
self.assertEqual(helper.getDuration(), datetime.timedelta(minutes=2))
dt += datetime.timedelta(minutes=1)
helper.update(dt, 4, 4)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(helper.getCurrentDrawDown(), -0.6)
self.assertEqual(helper.getDuration(), datetime.timedelta(minutes=3))
dt += datetime.timedelta(minutes=1)
helper.update(dt, 5, 5)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(helper.getCurrentDrawDown(), -0.5)
self.assertEqual(helper.getDuration(), datetime.timedelta(minutes=4))
dt += datetime.timedelta(minutes=1)
helper.update(dt, 9, 9)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(helper.getCurrentDrawDown(), -0.1)
self.assertEqual(helper.getDuration(), datetime.timedelta(minutes=5))
dt += datetime.timedelta(minutes=1)
helper.update(dt, 9.9, 9.9)
self.assertEqual(helper.getMaxDrawDown(), -0.6)
self.assertEqual(round(helper.getCurrentDrawDown(), 2), -0.01)
self.assertEqual(helper.getDuration(), datetime.timedelta(minutes=6))
dt += datetime.timedelta(minutes=1)
helper.update(dt, 20, 20)
self.assertEqual(helper.getMaxDrawDown(), 0)
self.assertEqual(helper.getCurrentDrawDown(), 0)
self.assertEqual(helper.getDuration(), datetime.timedelta())
dt += datetime.timedelta(minutes=1)
helper.update(dt, 10, 10)
self.assertEqual(helper.getMaxDrawDown(), -0.5)
self.assertEqual(helper.getCurrentDrawDown(), -0.5)
self.assertEqual(helper.getDuration(), datetime.timedelta(minutes=1))
class AnalyzerTestCase(common.TestCase):
def testNoTrades(self):
barFeed = yahoofeed.Feed()
barFeed.addBarsFromCSV("ige", common.get_data_file_path("sharpe-ratio-test-ige.csv"))
barFeed.addBarsFromCSV("spy", common.get_data_file_path("sharpe-ratio-test-spy.csv"))
strat = strategy_test.TestStrategy(barFeed, 1000)
strat.setBrokerOrdersGTC(True)
strat.setUseAdjustedValues(True)
stratAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(stratAnalyzer)
strat.run()
self.assertTrue(strat.getBroker().getCash() == 1000)
self.assertEqual(strat.orderUpdatedCalls, 0)
self.assertTrue(stratAnalyzer.getMaxDrawDown() == 0)
self.assertTrue(stratAnalyzer.getLongestDrawDownDuration() == datetime.timedelta())
def __testIGE_BrokerImpl(self, quantity):
initialCash = 42.09*quantity
# This testcase is based on an example from Ernie Chan's book:
# 'Quantitative Trading: How to Build Your Own Algorithmic Trading Business'
barFeed = yahoofeed.Feed()
barFeed.addBarsFromCSV("ige", common.get_data_file_path("sharpe-ratio-test-ige.csv"))
strat = strategy_test.TestStrategy(barFeed, initialCash)
strat.setUseAdjustedValues(True)
strat.setBrokerOrdersGTC(True)
stratAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(stratAnalyzer)
# Disable volume checks to match book results.
strat.getBroker().getFillStrategy().setVolumeLimit(None)
# Manually place the order to get it filled on the first bar.
order = strat.getBroker().createMarketOrder(broker.Order.Action.BUY, "ige", quantity, True) # Adj. Close: 42.09
order.setGoodTillCanceled(True)
strat.getBroker().submitOrder(order)
strat.addOrder(datetime.datetime(2007, 11, 13), strat.getBroker().createMarketOrder, broker.Order.Action.SELL, "ige", quantity, True) # Adj. Close: 127.64
strat.run()
self.assertTrue(round(strat.getBroker().getCash(), 2) == initialCash + (127.64 - 42.09) * quantity)
self.assertEqual(strat.orderUpdatedCalls, 6)
self.assertTrue(round(stratAnalyzer.getMaxDrawDown(), 5) == 0.31178)
self.assertTrue(stratAnalyzer.getLongestDrawDownDuration() == datetime.timedelta(days=623))
def testIGE_Broker(self):
self.__testIGE_BrokerImpl(1)
def testIGE_Broker2(self):
self.__testIGE_BrokerImpl(2)
def __testManualImpl(self, closingPrices, cash):
barFeed = TestBarFeed(bar.Frequency.DAY)
bars = build_bars_from_closing_prices(closingPrices)
barFeed.addBarsFromSequence("orcl", bars)
strat = strategy_test.TestStrategy(barFeed, cash)
stratAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(stratAnalyzer)
# Manually place the order to get it filled on the first bar.
order = strat.getBroker().createMarketOrder(broker.Order.Action.BUY, "orcl", 1, True)
order.setGoodTillCanceled(True)
strat.getBroker().submitOrder(order)
strat.run()
return stratAnalyzer
def testManual_NoDD(self):
# No drawdown
stratAnalyzer = self.__testManualImpl([10, 10, 10], 10)
self.assertEqual(round(stratAnalyzer.getMaxDrawDown(), 2), 0)
self.assertEqual(stratAnalyzer.getLongestDrawDownDuration(), datetime.timedelta())
def testManual_1DD(self):
stratAnalyzer = self.__testManualImpl([10, 9, 8], 10)
self.assertEqual(round(stratAnalyzer.getMaxDrawDown(), 2), 0.2)
self.assertEqual(stratAnalyzer.getLongestDrawDownDuration(), datetime.timedelta(days=2))
def testManual_2DD(self):
stratAnalyzer = self.__testManualImpl([10, 9.5, 9, 8, 11, 8], 10)
self.assertEqual(round(stratAnalyzer.getMaxDrawDown(), 2), 0.27)
self.assertEqual(stratAnalyzer.getLongestDrawDownDuration(), datetime.timedelta(days=3))
|
|
"""
CIF format file I/O operations.
"""
import shlex
from datetime import date
class CIF(object):
def __init__(self, name="structure", file=None):
self.name = name
self._data = {}
self._headings = {}
self._element_labels = {}
self.non_loops = ["data", "cell", "sym", "end"]
self.block_order = ["data", "sym", "sym_loop", "cell", "atoms", "bonds"]
if file is not None:
self.read(file)
def read(self, filename):
filestream = open(filename, 'r')
filelines = filestream.readlines()
blocks = []
loopcount = 0
loopentries = {}
loopread = False
blockread = False
self.block_order = []
for line in filelines:
#line=line.replace("\n", "")
# why not strip here?
line = line.strip()
if line.startswith("data_"):
self.name = line[5:]
self.insert_block_order("data")
self.add_data("data", data_=self.name)
if loopread and line.startswith("_"):
# change block names, easier for adding data to structure graph
if ('_atom_' in line and '_bond_' not in line):
self.insert_block_order('atoms', loopcount, _REPLACE=True)
elif ('_geom_bond' in line):
self.insert_block_order('bonds', loopcount, _REPLACE=True)
loopentries[loopcount].append(line)
elif loopread and not line.startswith("_"):
loopread = False
blockread = True
elif not loopread and line.startswith("_"):
block = self.get_non_loop_block(line)
self.insert_block_order(block)
# hopefully all non-loop entries are just single value entries,
# otherwise this is invalid.
try:
key, val = line.strip().split()
except ValueError:
key, val = line.strip().split()[:2]
if val.endswith("(0)"):
val = val[:-3]
self.add_data(block, **{key.strip():self.general_label(val)})
if blockread and (line.startswith("loop_") or line.startswith("_") or not line):
blockread = False
if line == "loop_":
loopcount += 1
loopentries[loopcount] = []
loopread = True
blockread = False
self.insert_block_order(loopcount)
if blockread:
#split_line = line.strip().split()
split_line = shlex.split(line)
assert len(loopentries[loopcount]) == len(split_line)
for key, val in zip(loopentries[loopcount], split_line):
self.add_data(loopcount, **{key:self.general_label(val)})
filestream.close()
def get_time(self):
t = date.today()
return t.strftime("%A %d %B %Y")
def insert_block_order(self, name, index=None, _REPLACE=False):
"""Adds a block to the cif file in a specified order, unless index is specified,
will not override existing order"""
if index is None and name in self.block_order:
return
elif index is not None and (self.block_order[index] == name):
return
elif index is None and name not in self.block_order:
index = len(self.block_order)
elif index is not None and name in self.block_order and index < len(self.block_order) and not _REPLACE:
old = self.block_order.index(name)
self.block_order.pop(old)
elif index is not None and name not in self.block_order and index < len(self.block_order) and _REPLACE:
self.block_order.pop(index)
elif index is not None and name in self.block_order and index >= len(self.block_order):
old = self.block_order.index(name)
self.block_order.pop(old)
index = len(self.block_order)
self.block_order = self.block_order[:index] + [name] + \
self.block_order[index:]
def add_data(self, block, **kwargs):
self._headings.setdefault(block, [])
for key, val in kwargs.items():
try:
self._data[key].append(val)
except KeyError:
self._headings[block].append(key)
if block in self.non_loops:
self._data[key] = val
else:
self._data[key] = [val]
except:
print(self._data.keys())
def get_element_label(self, el):
self._element_labels.setdefault(el, 0)
self._element_labels[el] += 1
return el + str(self._element_labels[el])
def __str__(self):
line = ""
for block in self.block_order:
# NOTE still be able to write CIFS if blond bock not specified
if block in self._headings:
heads = self._headings[block]
if block in self.non_loops:
vals = zip([CIF.label(i) for i in heads], [self._data[i] for i in heads])
else:
line += "loop_\n"+"\n".join([CIF.label(i) for i in heads])+"\n"
vals = zip(*[self._data[i] for i in heads])
for ll in vals:
line += "".join(ll) + "\n"
return line
def get_non_loop_block(self, line):
if line.startswith("_cell"):
return "cell"
elif line.startswith("_symmetry"):
return "sym"
elif line.startswith("_audit"):
return "data"
# terrible idea for formatting.. but oh well :)
@staticmethod
def atom_site_fract_x(x):
return "%10.5f "%(x)
@staticmethod
def atom_site_fract_y(x):
return "%10.5f "%(x)
@staticmethod
def atom_site_fract_z(x):
return "%10.5f "%(x)
@staticmethod
def atom_type_partial_charge(x):
return "%10.5f "%(x)
@staticmethod
def atom_site_label(x):
return "%-7s "%(x)
@staticmethod
def atom_site_type_symbol(x):
return "%-6s "%(x)
@staticmethod
def atom_site_description(x):
return "%-5s "%(x)
@staticmethod
def geom_bond_atom_site_label_1(x):
return "%-7s "%(x)
@staticmethod
def geom_bond_atom_site_label_2(x):
return "%-7s "%(x)
@staticmethod
def geom_bond_distance(x):
return "%7.3f "%(x)
@staticmethod
def geom_bond_site_symmetry_2(x):
return "%-5s "%(x)
@staticmethod
def ccdc_geom_bond_type(x):
return "%5s "%(x)
@staticmethod
def cell_length_a(x):
return "%-7.4f "%(x)
@staticmethod
def cell_length_b(x):
return "%-7.4f "%(x)
@staticmethod
def cell_length_c(x):
return "%-7.4f "%(x)
@staticmethod
def cell_angle_alpha(x):
return "%-7.4f "%(x)
@staticmethod
def cell_angle_beta(x):
return "%-7.4f "%(x)
@staticmethod
def cell_angle_gamma(x):
return "%-7.4f "%(x)
@staticmethod
def atom_site_fragment(x):
return "%-4i "%(x)
@staticmethod
def atom_site_constraints(x):
return "%-4i "%(x)
@staticmethod
def label(x):
"""special cases"""
if x == "data_":
return x
elif x == "_symmetry_space_group_name_H_M":
# replace H_M with H-M.
x = x[:28] + "-" + x[29:]
return "%-34s"%(x)
@staticmethod
def general_label(x):
return "%s "%(x)
def get_time():
t = date.today()
return t.strftime("%A %d %B %Y")
|
|
"""Tests the changelog activity."""
import os
from rever import vcsutils
from rever.logger import current_logger
from rever.main import env_main
REVER_XSH = """
$ACTIVITIES = ['changelog']
$DAG['changelog'].kwargs = {
'filename': 'CHANGELOG.rst',
'ignore': ['TEMPLATE.rst'],
'news': 'nuws',
}
"""
CHANGELOG_RST = """.. current developments
v42.1.0
============
* And some other stuff happeneded.
"""
TEMPLATE_RST = """**Added:**
* <news item>
**Changed:**
* <news item>
**Deprecated:**
* <news item>
**Removed:**
* <news item>
**Fixed:**
* <news item>
**Security:**
* <news item>
"""
N0_RST = """**Added:**
* from n0
**Changed:**
* <news item>
**Deprecated:**
* <news item>
**Removed:**
* here
* and here
**Fixed:**
* <news item>
**Security:**
* <news item>
"""
N1_RST = """**Added:**
* from n1
**Changed:**
* But what martial arts are they mixing?
**Deprecated:**
* <news item>
**Removed:**
* There
**Fixed:**
* <news item>
**Security:** None
"""
CHANGELOG_42_1_1 = """.. current developments
v42.1.1
====================
**Added:**
* from n0
* from n1
**Changed:**
* But what martial arts are they mixing?
**Removed:**
* here
* and here
* There
v42.1.0
============
* And some other stuff happeneded.
"""
def test_changelog(gitrepo):
os.makedirs('nuws', exist_ok=True)
files = [('rever.xsh', REVER_XSH),
('CHANGELOG.rst', CHANGELOG_RST),
('nuws/TEMPLATE.rst', TEMPLATE_RST),
('nuws/n0.rst', N0_RST),
('nuws/n1.rst', N1_RST),
]
for filename, body in files:
with open(filename, 'w') as f:
f.write(body)
vcsutils.track('.')
vcsutils.commit('initial changelog and news')
env_main(['42.1.1'])
# now see if this worked
newsfiles = os.listdir('nuws')
assert 'TEMPLATE.rst' in newsfiles
assert 'n0.rst' not in newsfiles
assert 'n1.rst' not in newsfiles
with open('CHANGELOG.rst') as f:
cl = f.read()
assert CHANGELOG_42_1_1 == cl
# ensure that the updates were commited
logger = current_logger()
entries = logger.load()
assert entries[-2]['rev'] != entries[-1]['rev']
SETUP_XSH = """
$PROJECT = 'castlehouse'
$ACTIVITIES = ['changelog']
$REVER_DIR = 'rvr'
$CHANGELOG_FILENAME = 'CHANGELOG.rst'
$CHANGELOG_NEWS = 'nuws'
$CHANGELOG_TEMPLATE = 'TEMPLATE.rst'
"""
def test_changelog_setup(gitrepo):
os.makedirs('nuws', exist_ok=True)
files = [('rever.xsh', SETUP_XSH),
]
for filename, body in files:
with open(filename, 'w') as f:
f.write(body)
vcsutils.track('.')
vcsutils.commit('initial changelog')
env_main(['setup'])
# now see if this worked
newsfiles = os.listdir('nuws')
assert 'TEMPLATE.rst' in newsfiles
basefiles = os.listdir('.')
assert 'CHANGELOG.rst' in basefiles
with open('CHANGELOG.rst') as f:
cl = f.read()
assert 'castlehouse' in cl
assert '.gitignore' in basefiles
with open('.gitignore') as f:
gi = f.read()
assert '\n# Rever\nrvr/\n' in gi
CONDA_BUILD_REVER_XSH = """
$ACTIVITIES = ['changelog']
$RELEASE_DATE = "2001-01-02"
$CHANGELOG_FILENAME = "CHANGELOG.txt"
$CHANGELOG_PATTERN = "# current developments"
$CHANGELOG_HEADER = '''# current developments
$RELEASE_DATE $VERSION:
------------------
'''
$CHANGELOG_CATEGORIES = (
"Enhancements",
"Bug fixes",
"Deprecations",
"Docs",
"Other",
)
def title_formatter(category):
s = category + ':\\n'
s += "-" * (len(category) + 1)
s += "\\n\\n"
return s
$CHANGELOG_CATEGORY_TITLE_FORMAT = title_formatter
$CHANGELOG_AUTHORS_TITLE = "Contributors"
"""
CONDA_BUILD_CHANGELOG_RST = """# current developments
1999-09-12 42.1.0:
------------------
* And some other stuff happeneded.
"""
CONDA_BUILD_TEMPLATE_RST = """Enhancements:
-------------
* <news item>
Bug fixes:
----------
* <news item>
Deprecations:
-------------
* <news item>
Docs:
-----
* <news item>
Other:
------
* <news item>
"""
CONDA_BUILD_N0_RST = """
Enhancements:
-------------
* from n0
Bug fixes:
----------
* <news item>
Deprecations:
-------------
* here
* and here
Docs:
-----
* <news item>
Other:
------
* <news item>
"""
CONDA_BUILD_N1_RST = """
Enhancements:
-------------
* from n1
Bug fixes:
----------
* <news item>
Deprecations:
-------------
* There
Docs:
-----
* But what martial arts are they mixing?
Other:
------
* <news item>
"""
CONDA_BUILD_CHANGELOG_42_1_1 = """# current developments
2001-01-02 42.1.1:
------------------
Enhancements:
-------------
* from n0
* from n1
Deprecations:
-------------
* here
* and here
* There
Docs:
-----
* But what martial arts are they mixing?
1999-09-12 42.1.0:
------------------
* And some other stuff happeneded.
"""
def test_changelog_conda_build_style(gitrepo):
os.makedirs('news', exist_ok=True)
files = [('rever.xsh', CONDA_BUILD_REVER_XSH),
('CHANGELOG.txt', CONDA_BUILD_CHANGELOG_RST),
('news/TEMPLATE', CONDA_BUILD_TEMPLATE_RST),
('news/n0', CONDA_BUILD_N0_RST),
('news/n1', CONDA_BUILD_N1_RST),
]
for filename, body in files:
with open(filename, 'w') as f:
f.write(body)
vcsutils.track('.')
vcsutils.commit('initial changelog and news')
env_main(['42.1.1'])
# now see if this worked
newsfiles = os.listdir('news')
assert 'TEMPLATE' in newsfiles
assert 'n0' not in newsfiles
assert 'n1' not in newsfiles
with open('CHANGELOG.txt') as f:
cl = f.read()
assert CONDA_BUILD_CHANGELOG_42_1_1 == cl
# ensure that the updates were commited
logger = current_logger()
entries = logger.load()
assert entries[-2]['rev'] != entries[-1]['rev']
|
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.assemblers.cyjs import CyJSAssembler
mek = Agent('MAP2K1', db_refs={'HGNC': '6840', 'TEXT': 'mek1'})
erk = Agent('MAPK1', db_refs={'UP': 'P28482'})
dusp = Agent('DUSP4')
st_phos = Phosphorylation(mek, erk)
st_phos_Y = Phosphorylation(mek, erk, residue='Y')
st_phos_T = Phosphorylation(mek, erk, residue='T')
st_dephos = Dephosphorylation(dusp, erk)
st_complex = Complex([mek, erk, dusp])
st_act = Activation(mek, erk)
st_gef = Gef(Agent('SOS1'), Agent('HRAS'))
st_gap = Gap(Agent('RASA1'), Agent('HRAS'))
st_incamount = IncreaseAmount(Agent('TP53'), Agent('MDM2'))
st_decamount = DecreaseAmount(Agent('MDM2'), Agent('TP53'))
st_act2 = Inhibition(dusp, erk)
st_cited = Phosphorylation(mek, erk, evidence=Evidence(pmid='12345',
text='MEK phosphorylates ERK'))
st_cited2 = Phosphorylation(mek, erk, evidence=Evidence(pmid='api35',
text='MEK phosphorylates ERK'))
st_selfmod = Autophosphorylation(Agent('AKT1'), 'S', '473')
def test_act():
cja = CyJSAssembler()
cja.add_statements([st_act, st_act2])
cja.make_model()
assert len(cja._nodes) == 3
assert len(cja._edges) == 2
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(set(polarities))==2
assert 'positive' in polarities
assert 'negative' in polarities
db_refs = [node['data']['db_refs'] for node in cja._nodes]
for node, refs in zip(cja._nodes, db_refs):
if node['data']['name'] == 'MAP2K1':
assert refs.get('HGNC') == \
'http://identifiers.org/hgnc/HGNC:6840', refs
assert refs.get('TEXT') == 'mek1', refs
if node['data']['name'] == 'MAPK1':
assert refs.get('UniProt')
if node['data']['name'] == 'DUSP4':
assert not refs
def test_regamount():
cja = CyJSAssembler()
cja.add_statements([st_incamount, st_decamount])
cja.make_model()
assert len(cja._nodes) == 2
assert len(cja._edges) == 2
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(set(polarities))==2
assert 'positive' in polarities
assert 'negative' in polarities
def test_ras():
cja = CyJSAssembler()
cja.add_statements([st_gef, st_gap])
cja.make_model()
assert len(cja._nodes) == 3
assert len(cja._edges) == 2
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(set(polarities))==2
assert 'positive' in polarities
assert 'negative' in polarities
def test_selfmod():
cja = CyJSAssembler()
cja.add_statements([st_selfmod])
cja.make_model()
assert len(cja._nodes) == 1
assert len(cja._edges) == 1
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(polarities) == 1
assert polarities[0] == 'positive'
def test_complex():
cja = CyJSAssembler()
cja.add_statements([st_complex])
cja.make_model()
assert len(cja._nodes) == 3
assert len(cja._edges) == 3
polarities = [edge['data']['polarity'] for edge in cja._edges]
assert len(set(polarities))==1
assert 'none' in polarities
def test_print_cyjs_graph():
cja = CyJSAssembler()
cja.add_statements([st_act, st_act2])
cja.make_model()
cyjs_str = cja.print_cyjs_graph()
# assert output is not empty
assert len(cyjs_str) > len('{\n "edges": [],\n "nodes": []\n}')
def test_no_grouping():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('A'), Agent('C'))
st3 = Phosphorylation(Agent('C'), Agent('B'))
cja = CyJSAssembler()
cja.add_statements([st1, st2, st3])
cja.make_model(grouping=True)
parents = [node['data']['parent'] for node in cja._nodes]
for parent in parents:
assert parent == ''
def test_grouping_block_targeting_node():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('C'), Agent('B'))
cja = CyJSAssembler()
cja.add_statements([st1, st2])
cja.make_model(grouping=True)
for node in cja._nodes:
if node['data']['name'] == 'A':
parent_a = node['data']['parent']
if node['data']['name'] == 'B':
parent_b = node['data']['parent']
assert parent_b == ''
if node['data']['name'] == 'C':
parent_c = node['data']['parent']
assert_element_properties(cja)
assert parent_a == parent_c
parent_a_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_a][0]
assert parent_a_name.startswith('Group')
assert len(cja._edges) == 3
virtual_edges = [x for x in cja._edges if
x['data']['i'] == 'Virtual']
assert len(virtual_edges) == 2
real_edges = [x for x in cja._edges if
x['data']['i'] != 'Virtual']
assert len(real_edges) == 1
def test_grouping_node_targeting_block():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('A'), Agent('C'))
cja = CyJSAssembler()
cja.add_statements([st1, st2])
cja.make_model(grouping=True)
for node in cja._nodes:
if node['data']['name'] == 'A':
parent_a = node['data']['parent']
assert parent_a == ''
if node['data']['name'] == 'B':
parent_b = node['data']['parent']
if node['data']['name'] == 'C':
parent_c = node['data']['parent']
assert_element_properties(cja)
assert parent_b == parent_c
parent_b_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_b][0]
assert parent_b_name.startswith('Group')
assert len(cja._edges) == 3
virtual_edges = [x for x in cja._edges if
x['data']['i'] == 'Virtual']
assert len(virtual_edges) == 2
real_edges = [x for x in cja._edges if
x['data']['i'] != 'Virtual']
assert len(real_edges) == 1
def test_grouping_node_targeting_block_targeting_node():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('A'), Agent('C'))
st3 = Phosphorylation(Agent('B'), Agent('D'))
st4 = Phosphorylation(Agent('C'), Agent('D'))
cja = CyJSAssembler()
cja.add_statements([st1, st2, st3, st4])
cja.make_model(grouping=True)
for node in cja._nodes:
if node['data']['name'] == 'A':
parent_a = node['data']['parent']
assert parent_a == ''
if node['data']['name'] == 'B':
parent_b = node['data']['parent']
if node['data']['name'] == 'C':
parent_c = node['data']['parent']
if node['data']['name'] == 'D':
parent_d = node['data']['parent']
assert parent_d == ''
assert_element_properties(cja)
assert parent_b == parent_c
parent_b_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_b][0]
assert parent_b_name.startswith('Group')
assert len(cja._edges) == 6
virtual_edges = [x for x in cja._edges if
x['data']['i'] == 'Virtual']
assert len(virtual_edges) == 4
real_edges = [x for x in cja._edges if
x['data']['i'] != 'Virtual']
assert len(real_edges) == 2
def test_grouping_block_targeting_block():
st1 = Phosphorylation(Agent('A'), Agent('B'))
st2 = Phosphorylation(Agent('A'), Agent('C'))
st3 = Phosphorylation(Agent('D'), Agent('B'))
st4 = Phosphorylation(Agent('D'), Agent('C'))
cja = CyJSAssembler()
cja.add_statements([st1, st2, st3, st4])
cja.make_model(grouping=True)
for node in cja._nodes:
if node['data']['name'] == 'A':
parent_a = node['data']['parent']
if node['data']['name'] == 'B':
parent_b = node['data']['parent']
if node['data']['name'] == 'C':
parent_c = node['data']['parent']
if node['data']['name'] == 'D':
parent_d = node['data']['parent']
assert_element_properties(cja)
assert parent_b == parent_c
assert parent_a == parent_d
parent_b_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_b][0]
parent_a_name = [x['data']['name'] for x in cja._nodes if
x['data']['id']==parent_a][0]
assert parent_b_name.startswith('Group')
assert parent_a_name.startswith('Group')
assert len(cja._edges) == 5
virtual_edges = [x for x in cja._edges if
x['data']['i'] == 'Virtual']
assert len(virtual_edges) == 4
real_edges = [x for x in cja._edges if
x['data']['i'] != 'Virtual']
assert len(real_edges) == 1
def test_edge_aggregation_between_nongroup_nodes():
cja = CyJSAssembler()
cja.add_statements([st_phos_Y, st_phos_T])
cja.make_model(grouping=False)
assert len(cja._nodes) == 2
assert len(cja._edges) == 1
for edge in cja._edges:
assert len(edge['data']['uuid_list']) == 2
for node in cja._nodes:
assert len(node['data']['uuid_list']) == 2
cja = CyJSAssembler()
cja.add_statements([st_phos_Y, st_phos_T])
cja.make_model(grouping=True)
assert len(cja._nodes) == 2
assert len(cja._edges) == 1
for edge in cja._edges:
assert len(edge['data']['uuid_list']) == 2
for node in cja._nodes:
assert len(node['data']['uuid_list']) == 2
def assert_element_properties(cja):
# each element needs an id
elements = ([n for n in cja._nodes] + [e for e in cja._edges])
for element in elements:
assert element['data']['id'] is not None, "Element ID is none"
assert element['data']['id'] != '', "Element ID is blank string!"
# each element should also have a list of uuids with at least one uuid
assert element['data']['uuid_list'] is not None, "uuid_list is None"
assert len(element['data']['uuid_list']) >= 1, "uuid_list is empty!"
for uuid in element['data']['uuid_list']:
assert type(uuid) == type('abc'), str(uuid) + ' is not a string'
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import imp
import subprocess
## Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill!
if 'check_output' not in dir(subprocess):
def check_output(cmd_args, *args, **kwargs):
proc = subprocess.Popen(
cmd_args, *args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(args)
return out
subprocess.check_output = check_output
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from distutils import spawn
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
# Add the current directory to the module search path.
sys.path.insert(0, os.path.abspath('.'))
## Constants
CODE_DIRECTORY = 'easy_tensorflow'
DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
PYTEST_FLAGS = ['--doctest-modules']
# Import metadata. Normally this would just be:
#
# from easy_tensorflow import metadata
#
# However, when we do this, we also import `easy_tensorflow/__init__.py'. If this
# imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
## Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project() and has_git():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk('.'):
for subdir in subdirs:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if f.startswith('.'):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir('.git')
def has_git():
return bool(spawn.find_executable("git"))
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
'--cached', # All files cached in the index
'--others', # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
'--exclude-standard')
uncommitted_deleted_files = git_ls_files('--deleted')
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ['git', 'ls-files']
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET,
file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files()
if filename.endswith(b'.py')]
retcode = subprocess.call(
['flake8', '--max-complexity=10'] + project_python_files)
if retcode == 0:
print_success_message('No style errors')
return retcode
def _test():
"""Run the unit tests.
:return: exit code
"""
# Make sure to import pytest in this function. For the reason, see here:
# <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
import pytest
# This runs the unit tests.
# It also runs doctest, but only on the modules in TESTS_DIRECTORY.
return pytest.main(PYTEST_FLAGS + [TESTS_DIRECTORY])
def _test_all():
"""Run lint and tests.
:return: exit code
"""
return _lint() + _test()
# The following code is to allow tests to be run with `python setup.py test'.
# The main reason to make this possible is to allow tests to be run as part of
# Setuptools' automatic run of 2to3 on the source code. The recommended way to
# run tests is still `paver test_all'.
# See <http://pythonhosted.org/setuptools/python3.html>
# Code based on <http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands> # NOPEP8
class TestAllCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
# These are fake, and just set to appease distutils and setuptools.
self.test_suite = True
self.test_args = []
def run_tests(self):
raise SystemExit(_test_all())
# define install_requires for specific Python versions
python_version_specific_requires = []
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.rst'),
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Software Distribution',
],
packages=find_packages(exclude=(TESTS_DIRECTORY,)),
install_requires=[
# your module dependencies
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest==2.5.1',
'mock==1.0.1',
'flake8==2.1.0',
],
cmdclass={'test': TestAllCommand},
zip_safe=False, # don't use eggs
entry_points={
'console_scripts': [
'easy_tensorflow_cli = easy_tensorflow.main:entry_point'
],
# if you have a gui, use this
# 'gui_scripts': [
# 'easy_tensorflow_gui = easy_tensorflow.gui:entry_point'
# ]
}
)
def main():
setup(**setup_dict)
if __name__ == '__main__':
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wrappers around standard crypto data elements.
Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
"""
from __future__ import absolute_import
import hashlib
import os
import string
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
LOG = logging.getLogger(__name__)
crypto_opts = [
cfg.StrOpt('ca_file',
default='cacert.pem',
help=_('Filename of root CA')),
cfg.StrOpt('key_file',
default=os.path.join('private', 'cakey.pem'),
help=_('Filename of private key')),
cfg.StrOpt('crl_file',
default='crl.pem',
help=_('Filename of root Certificate Revocation List')),
cfg.StrOpt('keys_path',
default='$state_path/keys',
help=_('Where we keep our keys')),
cfg.StrOpt('ca_path',
default='$state_path/CA',
help=_('Where we keep our root CA')),
cfg.BoolOpt('use_project_ca',
default=False,
help=_('Should we use a CA for each project?')),
cfg.StrOpt('user_cert_subject',
default='/C=US/ST=California/O=OpenStack/'
'OU=NovaDev/CN=%.16s-%.16s-%s',
help=_('Subject for certificate for users, %s for '
'project, user, timestamp')),
cfg.StrOpt('project_cert_subject',
default='/C=US/ST=California/O=OpenStack/'
'OU=NovaDev/CN=project-ca-%.16s-%s',
help=_('Subject for certificate for projects, %s for '
'project, timestamp')),
]
CONF = cfg.CONF
CONF.register_opts(crypto_opts)
CONF.import_opt('state_path', 'nova.config')
def ca_folder(project_id=None):
if CONF.use_project_ca and project_id:
return os.path.join(CONF.ca_path, 'projects', project_id)
return CONF.ca_path
def ca_path(project_id=None):
return os.path.join(ca_folder(project_id), CONF.ca_file)
def key_path(project_id=None):
return os.path.join(ca_folder(project_id), CONF.key_file)
def crl_path(project_id=None):
return os.path.join(ca_folder(project_id), CONF.crl_file)
def fetch_ca(project_id=None):
if not CONF.use_project_ca:
project_id = None
ca_file_path = ca_path(project_id)
if not os.path.exists(ca_file_path):
raise exception.CryptoCAFileNotFound(project_id=project_id)
with open(ca_file_path, 'r') as cafile:
return cafile.read()
def ensure_ca_filesystem():
"""Ensure the CA filesystem exists."""
ca_dir = ca_folder()
if not os.path.exists(ca_path()):
genrootca_sh_path = os.path.join(os.path.dirname(__file__),
'CA',
'genrootca.sh')
start = os.getcwd()
fileutils.ensure_tree(ca_dir)
os.chdir(ca_dir)
utils.execute("sh", genrootca_sh_path)
os.chdir(start)
def _generate_fingerprint(public_key_file):
(out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', public_key_file)
fingerprint = out.split(' ')[1]
return fingerprint
def generate_fingerprint(public_key):
with utils.tempdir() as tmpdir:
try:
pubfile = os.path.join(tmpdir, 'temp.pub')
with open(pubfile, 'w') as f:
f.write(public_key)
return _generate_fingerprint(pubfile)
except exception.ProcessExecutionError:
raise exception.InvalidKeypair()
def generate_key_pair(bits=1024):
# what is the magic 65537?
with utils.tempdir() as tmpdir:
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '',
'-t', 'rsa', '-f', keyfile, '-C', 'Generated by Nova')
fingerprint = _generate_fingerprint('%s.pub' % (keyfile))
if not os.path.exists(keyfile):
raise exception.FileNotFound(keyfile)
private_key = open(keyfile).read()
public_key_path = keyfile + '.pub'
if not os.path.exists(public_key_path):
raise exception.FileNotFound(public_key_path)
public_key = open(public_key_path).read()
return (private_key, public_key, fingerprint)
def fetch_crl(project_id):
"""Get crl file for project."""
if not CONF.use_project_ca:
project_id = None
crl_file_path = crl_path(project_id)
if not os.path.exists(crl_file_path):
raise exception.CryptoCRLFileNotFound(project_id)
with open(crl_file_path, 'r') as crlfile:
return crlfile.read()
def decrypt_text(project_id, text):
private_key = key_path(project_id)
if not os.path.exists(private_key):
raise exception.ProjectNotFound(project_id=project_id)
try:
dec, _err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % private_key,
process_input=text)
return dec
except exception.ProcessExecutionError:
raise exception.DecryptionFailure()
def revoke_cert(project_id, file_name):
"""Revoke a cert by file name."""
start = os.getcwd()
os.chdir(ca_folder(project_id))
# NOTE(vish): potential race condition here
utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
file_name)
utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
'-out', CONF.crl_file)
os.chdir(start)
def revoke_certs_by_user(user_id):
"""Revoke all user certs."""
admin = context.get_admin_context()
for cert in db.certificate_get_all_by_user(admin, user_id):
revoke_cert(cert['project_id'], cert['file_name'])
def revoke_certs_by_project(project_id):
"""Revoke all project certs."""
# NOTE(vish): This is somewhat useless because we can just shut down
# the vpn.
admin = context.get_admin_context()
for cert in db.certificate_get_all_by_project(admin, project_id):
revoke_cert(cert['project_id'], cert['file_name'])
def revoke_certs_by_user_and_project(user_id, project_id):
"""Revoke certs for user in project."""
admin = context.get_admin_context()
for cert in db.certificate_get_all_by_user_and_project(admin,
user_id, project_id):
revoke_cert(cert['project_id'], cert['file_name'])
def _project_cert_subject(project_id):
"""Helper to generate user cert subject."""
return CONF.project_cert_subject % (project_id, timeutils.isotime())
def _user_cert_subject(user_id, project_id):
"""Helper to generate user cert subject."""
return CONF.user_cert_subject % (project_id, user_id, timeutils.isotime())
def generate_x509_cert(user_id, project_id, bits=1024):
"""Generate and sign a cert for user in project."""
subject = _user_cert_subject(user_id, project_id)
with utils.tempdir() as tmpdir:
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits))
utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out',
csrfile, '-batch', '-subj', subject)
private_key = open(keyfile).read()
csr = open(csrfile).read()
(serial, signed_csr) = sign_csr(csr, project_id)
fname = os.path.join(ca_folder(project_id), 'newcerts/%s.pem' % serial)
cert = {'user_id': user_id,
'project_id': project_id,
'file_name': fname}
db.certificate_create(context.get_admin_context(), cert)
return (private_key, signed_csr)
def _ensure_project_folder(project_id):
if not os.path.exists(ca_path(project_id)):
geninter_sh_path = os.path.join(os.path.dirname(__file__),
'CA',
'geninter.sh')
start = os.getcwd()
os.chdir(ca_folder())
utils.execute('sh', geninter_sh_path, project_id,
_project_cert_subject(project_id))
os.chdir(start)
def generate_vpn_files(project_id):
project_folder = ca_folder(project_id)
key_fn = os.path.join(project_folder, 'server.key')
crt_fn = os.path.join(project_folder, 'server.crt')
if os.path.exists(crt_fn):
return
# NOTE(vish): The 2048 is to maintain compatibility with the old script.
# We are using "project-vpn" as the user_id for the cert
# even though that user may not really exist. Ultimately
# this will be changed to be launched by a real user. At
# that point we will can delete this helper method.
key, csr = generate_x509_cert('project-vpn', project_id, 2048)
with open(key_fn, 'w') as keyfile:
keyfile.write(key)
with open(crt_fn, 'w') as crtfile:
crtfile.write(csr)
def sign_csr(csr_text, project_id=None):
if not CONF.use_project_ca:
project_id = None
if not project_id:
return _sign_csr(csr_text, ca_folder())
_ensure_project_folder(project_id)
project_folder = ca_folder(project_id)
return _sign_csr(csr_text, ca_folder(project_id))
def _sign_csr(csr_text, ca_folder):
with utils.tempdir() as tmpdir:
inbound = os.path.join(tmpdir, 'inbound.csr')
outbound = os.path.join(tmpdir, 'outbound.csr')
with open(inbound, 'w') as csrfile:
csrfile.write(csr_text)
LOG.debug(_('Flags path: %s'), ca_folder)
start = os.getcwd()
# Change working dir to CA
fileutils.ensure_tree(ca_folder)
os.chdir(ca_folder)
utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
'./openssl.cnf', '-infiles', inbound)
out, _err = utils.execute('openssl', 'x509', '-in', outbound,
'-serial', '-noout')
serial = string.strip(out.rpartition('=')[2])
os.chdir(start)
with open(outbound, 'r') as crtfile:
return (serial, crtfile.read())
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""Compute an md5 hash.
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
:rtype: tuple
:returns: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
|
|
"""
Interval Arithmetic for plotting.
This module does not implement interval arithmetic accurately and
hence cannot be used for purposes other than plotting. If you want
to use interval arithmetic, use mpmath's interval arithmetic.
The module implements interval arithmetic using numpy and
python floating points. The rounding up and down is not handled
and hence this is not an accurate implementation of interval
arithmetic.
The module uses numpy for speed which cannot be achieved with mpmath.
"""
# Q: Why use numpy? Why not simply use mpmath's interval arithmetic?
# A: mpmath's interval arithmetic simulates a floating point unit
# and hence is slow, while numpy evaluations are orders of magnitude
# faster.
# Q: Why create a separate class for intervals? Why not use sympy's
# Interval Sets?
# A: The functionalities that will be required for plotting is quite
# different from what Interval Sets implement.
# Q: Why is rounding up and down according to IEEE754 not handled?
# A: It is not possible to do it in both numpy and python. An external
# library has to used, which defeats the whole purpose i.e., speed. Also
# rounding is handled for very few functions in those libraries.
# Q Will my plots be affected?
# A It will not affect most of the plots. The interval arithmetic
# module based suffers the same problems as that of floating point
# arithmetic.
from __future__ import print_function, division
from sympy.external import import_module
from sympy.simplify.simplify import nsimplify
class interval(object):
""" Represents an interval containing floating points as start and
end of the interval
The is_valid variable tracks whether the interval obtained as the
result of the function is in the domain and is continuous.
- True: Represents the interval result of a function is continuous and
in the domain of the function.
- False: The interval argument of the function was not in the domain of
the function, hence the is_valid of the result interval is False
- None: The function was not continuous over the interval or
the function's argument interval is partly in the domain of the
function
The comparison of two intervals returns a tuple of two 3-valued logic
values.
The first value determines the comparison as follows:
- True: If the comparison is True throughout the intervals.
- False: If the comparison is False throughout the intervals.
- None: If the comparison is True for some part of the intervals.
The second value is determined as follows:
- True: If both the intervals in comparison are valid.
- False: If at least one of the intervals is False, else
- None
"""
def __init__(self, *args, **kwargs):
self.is_valid = kwargs.pop('is_valid', True)
if len(args) == 1:
if isinstance(args[0], interval):
self.start, self.end = args[0].start, args[0].end
else:
self.start = float(args[0])
self.end = float(args[0])
elif len(args) == 2:
if args[0] < args[1]:
self.start = float(args[0])
self.end = float(args[1])
else:
self.start = float(args[1])
self.end = float(args[0])
else:
raise ValueError("interval takes a maximum of two float values "
"as arguments")
@property
def mid(self):
return (self.start + self.end) / 2.0
@property
def width(self):
return self.end - self.start
def __repr__(self):
return "interval(%f, %f)" % (self.start, self.end)
def __str__(self):
return "[%f, %f]" % (self.start, self.end)
def __lt__(self, other):
if isinstance(other, (int, float)):
if self.end < other:
return (True, self.is_valid)
elif self.start > other:
return (False, self.is_valid)
else:
return (None, self.is_valid)
elif isinstance(other, interval):
if self.is_valid is False or other.is_valid is False:
valid = False
elif self.is_valid is None or other.is_valid is None:
valid = None
else:
valid = True
if self.end < other. start:
return (True, valid)
if self.start > other.end:
return (False, valid)
return (None, valid)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, (int, float)):
if self.start > other:
return (True, self.is_valid)
elif self.end < other:
return (False, self.is_valid)
else:
return (None, self.is_valid)
elif isinstance(other, interval):
return other.__lt__(self)
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, (int, float)):
if self.start == other and self.end == other:
return (True, self.is_valid)
if other in self:
return (None, self.is_valid)
else:
return (False, self.is_valid)
if isinstance(other, interval):
if self.is_valid is False or other.is_valid is False:
valid = False
elif self.is_valid is None or other.is_valid is None:
valid = None
else:
valid = True
if self.start == other.start and self.end == other.end:
return (True, valid)
elif self.__lt__(other)[0] is not None:
return (False, valid)
else:
return (None, valid)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, (int, float)):
if self.start == other and self.end == other:
return (False, self.is_valid)
if other in self:
return (None, self.is_valid)
else:
return (True, self.is_valid)
if isinstance(other, interval):
if self.is_valid is False or other.is_valid is False:
valid = False
elif self.is_valid is None or other.is_valid is None:
valid = None
else:
valid = True
if self.start == other.start and self.end == other.end:
return (False, valid)
if not self.__lt__(other)[0] is None:
return (True, valid)
return (None, valid)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, (int, float)):
if self.end <= other:
return (True, self.is_valid)
if self.start > other:
return (False, self.is_valid)
else:
return (None, self.is_valid)
if isinstance(other, interval):
if self.is_valid is False or other.is_valid is False:
valid = False
elif self.is_valid is None or other.is_valid is None:
valid = None
else:
valid = True
if self.end <= other.start:
return (True, valid)
if self.start > other.end:
return (False, valid)
return (None, valid)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, (int, float)):
if self.start >= other:
return (True, self.is_valid)
elif self.end < other:
return (False, self.is_valid)
else:
return (None, self.is_valid)
elif isinstance(other, interval):
return other.__le__(self)
def __add__(self, other):
if isinstance(other, (int, float)):
if self.is_valid:
return interval(self.start + other, self.end + other)
else:
start = self.start + other
end = self.end + other
return interval(start, end, is_valid=self.is_valid)
elif isinstance(other, interval):
start = self.start + other.start
end = self.end + other.end
if self.is_valid and other.is_valid:
return interval(start, end)
elif self.is_valid is False or other.is_valid is False:
return interval(start, end, is_valid=False)
else:
return interval(start, end, is_valid=None)
else:
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, (int, float)):
start = self.start - other
end = self.end - other
return interval(start, end, is_valid=self.is_valid)
elif isinstance(other, interval):
start = self.start - other.end
end = self.end - other.start
if self.is_valid and other.is_valid:
return interval(self.start - other.end, self.end - other.start)
elif self.is_valid is False or other.is_valid is False:
return interval(start, end, is_valid=False)
else:
return interval(start, end, is_valid=None)
else:
return NotImplemented
def __rsub__(self, other):
if isinstance(other, (int, float)):
start = other - self.end
end = other - self.start
return interval(start, end, is_valid=self.is_valid)
elif isinstance(other, interval):
return other.__sub__(self)
else:
return NotImplemented
def __neg__(self):
if self.is_valid:
return interval(-self.end, -self.start)
else:
return interval(-self.end, -self.start, is_valid=self.is_valid)
def __mul__(self, other):
if isinstance(other, interval):
if self.is_valid is False or other.is_valid is False:
return interval(-float('inf'), float('inf'), is_valid=False)
elif self.is_valid is None or other.is_valid is None:
return interval(-float('inf'), float('inf'), is_valid=None)
else:
inters = []
inters.append(self.start * other.start)
inters.append(self.end * other.start)
inters.append(self.start * other.end)
inters.append(self.end * other.end)
start = min(inters)
end = max(inters)
return interval(start, end)
elif isinstance(other, (int, float)):
return interval(self.start*other, self.end*other, is_valid=self.is_valid)
else:
return NotImplemented
__rmul__ = __mul__
def __contains__(self, other):
if isinstance(other, (int, float)):
return self.start <= other and self.end >= other
else:
return self.start <= other.start and other.end <= self.end
def __rdiv__(self, other):
if isinstance(other, (int, float)):
other = interval(other)
return other.__div__(self)
elif isinstance(other, interval):
return other.__div__(self)
else:
return NotImplemented
def __div__(self, other):
# Both None and False are handled
if not self.is_valid:
# Don't divide as the value is not valid
return interval(-float('inf'), float('inf'), is_valid=self.is_valid)
if isinstance(other, (int, float)):
if other == 0:
# Divide by zero encountered. valid nowhere
return interval(-float('inf'), float('inf'), is_valid=False)
else:
return interval(self.start / other, self.end / other)
elif isinstance(other, interval):
if other.is_valid is False or self.is_valid is False:
return interval(-float('inf'), float('inf'), is_valid=False)
elif other.is_valid is None or self.is_valid is None:
return interval(-float('inf'), float('inf'), is_valid=None)
else:
# denominator contains both signs, i.e. being divided by zero
# return the whole real line with is_valid = None
if 0 in other:
return interval(-float('inf'), float('inf'), is_valid=None)
# denominator negative
if other.end < 0:
self = -self
other = -other
# denominator positive
inters = []
inters.append(self.start / other.start)
inters.append(self.end / other.start)
inters.append(self.start / other.end)
inters.append(self.end / other.end)
start = max(inters)
end = min(inters)
return interval(start, end)
else:
return NotImplemented
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __pow__(self, other):
# Implements only power to an integer.
from .lib_interval import exp, log
if not self.is_valid:
return self
if isinstance(other, interval):
return exp(other * log(self))
elif isinstance(other, (float, int)):
if other < 0:
return 1 / self.__pow__(abs(other))
else:
if int(other) == other:
return _pow_int(self, other)
else:
return _pow_float(self, other)
else:
return NotImplemented
def __rpow__(self, other):
if isinstance(other, (float, int)):
if not self.is_valid:
#Don't do anything
return self
elif other < 0:
if self.width > 0:
return interval(-float('inf'), float('inf'), is_valid=False)
else:
power_rational = nsimplify(self.start)
num, denom = power_rational.as_numer_denom()
if denom % 2 == 0:
return interval(-float('inf'), float('inf'),
is_valid=False)
else:
start = -abs(other)**self.start
end = start
return interval(start, end)
else:
return interval(other**self.start, other**self.end)
elif isinstance(other, interval):
return other.__pow__(self)
else:
return NotImplemented
def __hash__(self):
return hash((self.is_valid, self.start, self.end))
def _pow_float(inter, power):
"""Evaluates an interval raised to a floating point."""
power_rational = nsimplify(power)
num, denom = power_rational.as_numer_denom()
if num % 2 == 0:
start = abs(inter.start)**power
end = abs(inter.end)**power
if start < 0:
ret = interval(0, max(start, end))
else:
ret = interval(start, end)
return ret
elif denom % 2 == 0:
if inter.end < 0:
return interval(-float('inf'), float('inf'), is_valid=False)
elif inter.start < 0:
return interval(0, inter.end**power, is_valid=None)
else:
return interval(inter.start**power, inter.end**power)
else:
if inter.start < 0:
start = -abs(inter.start)**power
else:
start = inter.start**power
if inter.end < 0:
end = -abs(inter.end)**power
else:
end = inter.end**power
return interval(start, end, is_valid=inter.is_valid)
def _pow_int(inter, power):
"""Evaluates an interval raised to an integer power"""
power = int(power)
if power & 1:
return interval(inter.start**power, inter.end**power)
else:
if inter.start < 0 and inter.end > 0:
start = 0
end = max(inter.start**power, inter.end**power)
return interval(start, end)
else:
return interval(inter.start**power, inter.end**power)
|
|
import collections
import copy
import heapq
import traceback
import warnings
import weakref
import numpy
import six
import chainer
from chainer import cuda
from chainer import initializers
from chainer.initializers import constant
from chainer import utils
from chainer.utils import argument
def _check_grad_type(func, x, gx):
def make_message(message):
if func:
detail = 'Function `{0}` ({1}) has a bug.\n'.format(
type(func).__name__, func.label)
stack = func.stack
if stack:
detail += 'Stacktrace of the function is below:\n'
for line in traceback.format_list(func._stack):
detail += line
detail += '''
Please report this error to the issue tracker with the stack trace,
the information of your environment, and your script:
https://github.com/pfnet/chainer/issues/new.
'''.format(type(func).__name__, func.label)
else:
detail = ''
detail += message
return detail
if x.data is None or gx is None:
# ``x.data is None`` implies that the data array is not retained
return
if not isinstance(gx, type(x.data)):
msg = ('Type of data and grad mismatch\n%s != %s' %
(type(x.data), type(gx)))
raise TypeError(make_message(msg))
if gx.dtype != x.data.dtype:
msg = ('Dtype of data and grad mismatch\n%s != %s' %
(x.data.dtype, gx.dtype))
raise TypeError(make_message(msg))
if gx.shape != x.data.shape:
msg = ('Shape of data and grad mismatch\n%s != %s' %
(x.data.shape, gx.shape))
raise ValueError(make_message(msg))
def variable_repr(var):
"""Return the string representation of a variable.
Args:
var (~chainer.Variable): Input Variable.
.. seealso:: numpy.array_repr
"""
xp = cuda.get_array_module(var)
if xp is numpy:
arr = var.data
else:
arr = var.data.get()
if var.name:
prefix = 'variable ' + var.name
else:
prefix = 'variable'
if arr.size > 0 or arr.shape == (0,):
lst = numpy.array2string(arr, None, None, None, ', ', prefix + '(')
else: # show zero-length shape unless it is (0,)
lst = '[], shape=%s' % (repr(arr.shape),)
return '%s(%s)' % (prefix, lst)
def variable_str(var):
"""Return the string representation of a variable.
Args:
var (~chainer.Variable): Input Variable.
.. seealso:: numpy.array_str
"""
xp = cuda.get_array_module(var)
if xp is numpy:
arr = var.data
else:
arr = var.data.get()
if var.name:
prefix = 'variable ' + var.name + '('
else:
prefix = 'variable('
return (prefix + numpy.array2string(arr, None, None, None, ' ', prefix) +
')')
class VariableNode(object):
"""Node in the backward computational graph representing a variable.
This object represents a variable node in a computational graph. The node
is used in error backpropagation (a.k.a. backprop) to determine which
gradient to be passed to each function.
A variable node is held by the corresponding :class:`Variable` object,
which is managed by users. :class:`Function` objects that take the variable
as an input also hold references to the variable node.
Note that the node does not hold a reference to the corresponding data
array in general. The data array is actually accessible by the node in the
following cases.
1. If there exists a :class:`Variable` object that holds a reference to the
variable node, the variable node holds a weak reference to the variable
object, and thus the data array is accessible via the weak reference.
2. If :meth:`retain_data` is called, the node holds a reference to the data
array. It is mainly called by a function that needs the input or output
data array in its backprop procedure. See :meth:`Function.retain_inputs`
and :meth:`Function.retain_outputs` for more details.
Users usually do not need to touch this variable node object. The
computational graph is automatically managed by Chainer, and any interface
that is beneficial for users is also provided by :class:`Variable`.
Args:
variable (Variable): The corresponding variable object.
name (str): Name of the variable node.
Attributes:
dtype: Data type of the data array.
shape: Shape of the data array.
name (str): Name of the variable node.
"""
def __init__(self, variable, name, grad=None):
self._variable = weakref.ref(variable)
self._creator = None
self._data = None
self._rank = 0
self.name = name
self._requires_grad = variable.requires_grad
vdata = variable.data
self._set_data_type(vdata)
self.grad = grad
@property
def creator(self):
"""Function node that created this variable node."""
return self._creator
@creator.setter
def creator(self, func):
self._creator = func
if func is not None:
self._rank = func.rank + 1
@property
def data(self):
"""Data array of the corresponding variable.
If the data is not available, it returns ``None``.
"""
return self._data
@data.setter
def data(self, d):
self._data = d
self._set_data_type(d)
@property
def grad(self):
"""Gradient array of the corresponding variable."""
return self._grad
@grad.setter
def grad(self, g):
_check_grad_type(None, self, g)
self._grad = g
@property
def label(self):
"""Short text that represents the variable node."""
if self.shape == ():
return str(self.dtype)
return '(%s), %s' % (', '.join(map(str, self.shape)),
str(self.dtype))
@property
def rank(self):
return self._rank
@property
def requires_grad(self):
"""It indicates that ``grad`` will be set in backward calculation."""
return self._requires_grad
def set_creator(self, creator):
"""Sets a :class:`Function` object that created this node.
This method is equivalent to ``self.creator = creator``.
Args:
creator (Function): Function object that created this node.
"""
self.creator = creator
def unchain(self):
"""Deletes the reference to the creator of this variable node.
This method is equivalent to ``self.creator = None``.
"""
self.creator = None
def retain_data(self):
"""Lets the node hold a reference to the underlying data array.
This method gets the data array of the corresponding variable and keeps
it. If the weak reference to the corresponding variable is dead, it
raises an error.
"""
variable = self._variable()
if variable is not None:
self.data = variable.data
else:
raise RuntimeError('cannot retain variable data: the variable has '
'been already released')
def _set_data_type(self, d):
if d is None:
self.dtype = None
self.shape = None
else:
self.dtype = d.dtype
self.shape = d.shape
def _set_grad_with_check(self, g, func, var):
_check_grad_type(func, var, g)
self._grad = g
def _create_variable(data, name, grad, requires_grad):
return Variable(
data, name=name, grad=grad, requires_grad=requires_grad)
class Variable(object):
"""__init__(data=None, *, name=None, grad=None, initializer=None, update_rule=None, requires_grad=True)
Array with a structure to keep track of computation.
Every variable holds a data array of type either :class:`numpy.ndarray` or
:class:`cupy.ndarray`.
A variable object holds a data array and a :class:`VariableNode` object of
a computational graph. If the variable is constructed by the user, the node
is _root_ and does not hold any parent. If the variable is constructed by a
:class:`Function` object, the node holds a reference to its parent called
`creator`. This reference is used in backpropagation to backtrack the
graph.
Users can disable (resp. enable) this chaining behavior by calling
:func:`~chainer.no_backprop_mode` (resp.
:func:`~chainer.force_backprop_mode`).
In the former context, a variable never creates a computational graph,
whereas in the latter context, it is forced to create.
.. warning::
``volatile`` argument is not supported anymore since v2.
Instead, use :func:`chainer.no_backprop_mode`.
Args:
data (numpy.ndarray or cupy.ndarray): Initial data array.
name (str): Name of the variable.
grad (numpy.ndarray or cupy.ndarray): Initial gradient array.
requires_grad (bool): Boolean indicating whether ``grad`` will be set
in backward calculation.
Attributes:
data: Data array of type either :class:`numpy.ndarray` or
:class:`cupy.ndarray`. If it is None, the variable is left in an
uninitialized state.
grad: Gradient array.
creator: The function who creates this variable. It is ``None`` if the
variable is not created by any function.
""" # NOQA
def __init__(self, data=None, **kwargs):
argument.check_unexpected_kwargs(
kwargs, volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
name, grad, requires_grad \
= argument.parse_kwargs(
kwargs, ('name', None), ('grad', None),
('requires_grad', True))
if (data is not None and
not isinstance(data, (numpy.ndarray, cuda.ndarray))):
msg = '''numpy.ndarray or cuda.ndarray are expected.
Actual: {0}'''.format(type(data))
raise TypeError(msg)
# Use a list as a data structure to hold the data array indirectly to
# abstract its initialized/uninitialized state.
self._data = [data]
self._requires_grad = requires_grad
self._node = VariableNode(self, name, grad)
def __copy__(self):
return self._copy_to(Variable())
def _copy_to(self, target):
target.__dict__ = copy.copy(self.__dict__)
target._node = VariableNode(target, self.name)
return target
def __reduce__(self):
return _create_variable, (self.data, self.name, self._node._grad,
self._requires_grad)
def __repr__(self):
return variable_repr(self)
def __str__(self):
return variable_str(self)
@property
def name(self):
return self._node.name
@name.setter
def name(self, n):
self._node.name = n
def summary(self):
if self.name:
return '<variable %s>' % self.name
else:
return '<variable at 0x%x>' % id(self)
def debug_print(self):
"""Display a summary of the stored data and location of the Variable"""
msg = """{summary}
- device: {device}
- backend: {background}
- shape: {shape}
- dtype: {dtype}
- statistics: {stats}
- grad: {grad}"""
stats_msg = 'mean={0:.8f}, std={1:.8f}'
try:
device = self.data.device
except AttributeError:
device = 'CPU'
with cuda.get_device_from_array(self.data) as dev:
xp = numpy if int(dev) == -1 else cuda.cupy
if self.grad is None:
grad = None
elif xp.all(self.grad == 0):
grad = 0
else:
grad = stats_msg.format(float(xp.mean(self.grad)),
float(xp.std(self.grad)))
stats = stats_msg.format(float(xp.mean(self.data)),
float(xp.std(self.data)))
return msg.format(summary=self.summary(),
grad=grad, shape=self.data.shape,
background=type(self.data),
dtype=self.data.dtype, device=device,
stats=stats)
def __pos__(self):
return self
def __len__(self):
"""Returns the first dimension of the data array.
Returns:
int: Number of the first dimension of the data array.
"""
return len(self.data)
@property
def label(self):
"""Short text that represents the variable."""
return self._node.label
@property
def creator(self):
""":meth:`Function` object that created this variable.
This property has a setter to which ``None`` can be set. Setting
``None`` to this property is equivalent to call :meth:`unchain`;
it purges the variable from the function that created this variable.
The setter also accepts the original :meth:`Function` object that
created this variable. For example, you can once set ``None`` to this
property and then set the original value again.
.. note::
Setting an irrelevant :meth:`Function` object does not emit any
error immediately, whereas the behavior is undefined. Do not set
a :meth:`Function` object that did not create this variable object.
"""
return self._node._creator
@creator.setter
def creator(self, func):
self._node.creator = func
@property
def data(self):
return self._data[0]
@data.setter
def data(self, d):
self._data[0] = d
self._node._set_data_type(d)
@property
def grad(self):
return self._node._grad
@grad.setter
def grad(self, g):
self._node._set_grad_with_check(g, None, self)
@property
def shape(self):
return self.data.shape
@property
def ndim(self):
return self.data.ndim
@property
def size(self):
return self.data.size
@property
def dtype(self):
return self.data.dtype
@property
def rank(self):
return self._node.rank
@property
def node(self):
return self._node
@property
def requires_grad(self):
"""It indicates that ``grad`` will be set in backward calculation."""
return self._requires_grad
def to_cpu(self):
"""Copies the data and gradient arrays to CPU."""
if self.data is None:
return
self._data = [cuda.to_cpu(self.data)]
# ensure that the node tracks the device migration
node = self._node
if node._data is not None:
node.retain_data()
if node._grad is not None:
node._grad = cuda.to_cpu(node._grad)
def to_gpu(self, device=None):
"""Copies the data and gradient arrays to specified GPU.
Args:
device: Target device specifier. If omitted, the current device is
used.
"""
if self.data is None:
current = cuda.Device().id
self._initial_device = current if device is None else device
else:
self._data = [cuda.to_gpu(self.data, device)]
# ensure that the node tracks the device migration
node = self._node
if node._data is not None:
node.retain_data()
if node._grad is not None:
node._grad = cuda.to_gpu(node._grad, device)
def cleargrad(self):
"""Clears the gradient array."""
self._node._grad = None
def zerograd(self):
"""Initializes the gradient array by zeros.
.. deprecated:: v1.15
Use :meth:`cleargrad` instead.
"""
warnings.warn(
'Variable.zerograd is deprecated. Use Variable.cleargard instead.',
DeprecationWarning)
if self.data is None:
return
with cuda.get_device_from_array(self.data) as dev:
node = self._node
if node._grad is None:
xp = numpy if int(dev) == -1 else cuda.cupy
node._grad = xp.zeros_like(self.data)
else:
node._grad.fill(0)
def copydata(self, var):
"""Copies the data array from given source variable.
This method copies the data array from given variable to this variable.
The copy is done even if the arrays reside on different devices,
including across the host and a GPU device. If this variable has an
uninitialized data array, this method initializes it by the data array
of the given variable. Similarly, if the given variable has an
uninitialized data array, this method initializes it by the data array
of this variable (``self``). If both are uninitialized, this method
does nothing.
Args:
var (Variable): Source variable.
"""
src = var.data
dst = self.data
if src is None:
if dst is None:
return
var.initialize(self.shape)
src = var.data
elif dst is None:
self.initialize(src.shape)
dst = self.data
src_xp = cuda.get_array_module(src)
dst_xp = cuda.get_array_module(dst)
if dst_xp is src_xp:
dst_xp.copyto(dst, src)
elif dst_xp is numpy:
dst_xp.copyto(dst, src.get())
else:
dst.set(src)
def addgrad(self, var):
"""Accumulates the gradient array from given source variable.
This method adds the gradient of a given variable to the gradient of
this variable. The accumulation is even done across the host and
different devices. If this variable has uninitialized data/grad arrays,
this method initializes it with the shape of the given varaible and
then accumulates the gradient.
Args:
var (Variable): Source variable.
"""
src = var._node._grad
if src is None:
return
if self.data is None:
self.initialize(var.shape)
dst = self._node._grad
src_dev = cuda.get_device_from_array(src)
dst_dev = cuda.get_device_from_array(self.data)
if src_dev.id == dst_dev.id:
with dst_dev:
if dst is None:
xp = cuda.get_array_module(src)
self._node.grad = xp.copy(src)
else:
dst += src
return
if dst_dev.id < 0:
src_grad = cuda.to_cpu(src)
else:
src_grad = cuda.to_gpu(src, device=dst_dev)
if dst is None:
self._node.grad = src_grad
else:
with dst_dev:
dst += src_grad
def set_creator(self, gen_func):
"""Notifies the variable that the given function is its creator.
Args:
gen_func (Function): Function object that creates this variable as
one of its outputs.
"""
self._node.set_creator(gen_func)
def backward(self, retain_grad=False):
"""Runs error backpropagation (a.k.a. backprop) from this variable.
On backprop, :meth:`Function.backward` is called on each
:class:`Function` object appearing in the backward graph starting from
this variable. The backward graph is represented by backward references
from variable nodes to their creators, and from functions to their
input variable nodes. The backprop stops at all root nodes. Some
functions set ``None`` as gradients of some inputs, where further
backprop does not take place at such inputs.
This method uses :data:`grad` as the initial error array. User can
manually set a gradient array before calling this method. If
:data:`data` contains only one element (i.e., it is scalar) and
:data:`grad` is ``None``, then this method automatically complements
1.0 as the initial error. This is useful on starting backprop from
some scalar loss value.
Args:
retain_grad (bool): If ``True``, the gradient arrays of all
intermediate variables are kept. Otherwise, :data:`grad` of the
intermediate variables are set to ``None`` on appropriate
timing, which may reduce the maximum memory consumption.
In most cases of training some models, the purpose of backprop
is to compute gradients of parameters, not of all variables,
and therefore it is recommended to set this flag ``False``.
"""
if self.creator is None:
return
initial_device = None
if cuda.available and isinstance(self.data, cuda.cupy.ndarray):
try:
initial_device = cuda.Device()
except cuda.cupy.cuda.runtime.CUDARuntimeError as e:
if e.status != 38: # cudaErrorNoDevice
raise
is_debug = chainer.is_debug()
cand_funcs = []
seen_set = set()
seen_vars = set()
need_copy = set()
# Initialize error by 1, if this is a loss variable
if self.data.size == 1 and self.grad is None:
with cuda.get_device_from_array(self.data) as device:
if device is cuda.DummyDevice:
self.grad = numpy.ones_like(self.data)
else:
self.grad = cuda.cupy.ones_like(self.data)
def add_cand(cand):
if cand not in seen_set:
# Negate since heapq is min-heap
heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand))
seen_set.add(cand)
add_cand(self.creator)
while cand_funcs:
_, _, func = heapq.heappop(cand_funcs)
outputs = [y() for y in func.outputs] # access via weak ref
in_data = tuple([x.data for x in func.inputs])
out_grad = tuple([None if y is None else y.grad for y in outputs])
hooks = chainer.get_function_hooks()
if func._n_local_function_hooks != 0:
hooks = collections.OrderedDict(hooks)
hooks.update(func.local_function_hooks)
cuda.get_device_from_array(*(in_data + out_grad)).use()
for hook in six.itervalues(hooks):
hook.backward_preprocess(func, in_data, out_grad)
func.output_data = tuple(
[None if y is None else y.data for y in outputs])
gxs = func.backward(in_data, out_grad)
assert len(gxs) == len(in_data)
if not getattr(func, '_retain_after_backward', False):
func.output_data = None
for hook in six.itervalues(hooks):
hook.backward_postprocess(func, in_data, out_grad)
if is_debug:
for gx in gxs:
if gx is None:
continue
cuda.get_device_from_array(gx).use()
if cuda.get_array_module(gx).isnan(gx).any():
msg = 'NaN is detected on backward computation'
raise RuntimeError(msg)
if not retain_grad:
for y in outputs:
if y is not None and y is not self.node:
y.grad = None
for x, gx in zip(func.inputs, gxs):
if gx is None:
continue
if not x.requires_grad:
continue
_check_grad_type(func, x, gx)
# Accumulate the gradient to x. It is a bit tricky to handle
# branches and parameter gradient accumulation correctly.
id_x = id(x)
if x.creator is None: # leaf
if x._grad is None:
x.grad = gx
need_copy.add(id_x)
else:
cuda.get_device_from_array(gx).use()
if id_x in need_copy:
x.grad = utils.force_array(x._grad + gx) # copy
need_copy.remove(id_x)
else:
x._grad += gx
else: # not a leaf
add_cand(x.creator)
if id_x not in seen_vars: # 1st visit
x.grad = gx
seen_vars.add(id_x)
need_copy.add(id_x)
else:
cuda.get_device_from_array(gx).use()
if id_x in need_copy: # 2nd visit
x.grad = utils.force_array(gx + x._grad) # copied
need_copy.remove(id_x)
else: # 3rd or later visit
x._grad += gx
del gxs # to reduce memory usage
if initial_device is not None:
initial_device.use()
def reshape(self, *shape):
"""Returns a variable of a different shape and the same content.
.. seealso::
:func:`chainer.functions.reshape` for full documentation,
"""
if len(shape) == 1 and isinstance(shape[0], (tuple, list)):
shape = shape[0]
return chainer.functions.reshape(self, shape)
def transpose(self, *axes):
"""Permute the dimensions of an input variable without copy.
.. seealso::
:func:`chainer.functions.transpose` for full documentation.
"""
if len(axes) == 0:
axes = None
elif len(axes) == 1 and (isinstance(axes[0], (tuple, list)) or
axes[0] is None):
axes = axes[0]
return chainer.functions.transpose(self, axes)
def unchain(self):
"""Deletes the reference to the creator of this variable.
This method deletes the reference to the creator from the corresponding
variable node. Unlike :meth:`unchain_backward`, it does not backtrack
the graph.
This method is equivalent to ``self.creator = None``.
"""
self.creator = None
def unchain_backward(self):
"""Deletes references between variable nodes and functions backward.
After this method completes, intermediate variable nodes and functions
that are not referenced from anywhere are deallocated by reference
count GC. Also this variable itself deletes the reference to its
creator function from the node, i.e. the node becomes root in the
computation graph. It indicates that backprop after unchaining stops at
this variable. This behavior is useful to implement truncated BPTT.
"""
cand_funcs = []
seen_set = set()
def add_cand(cand):
if cand is not None and cand not in seen_set:
cand_funcs.append(cand)
seen_set.add(cand)
add_cand(self.creator)
while cand_funcs:
func = cand_funcs.pop()
for var in func.inputs:
add_cand(var.creator)
func.unchain()
def retain_data(self):
"""Lets the corresponding variable node keep the underlying array."""
self._node.data = self._data[0]
def __lt__(self, other):
raise NotImplementedError()
def __le__(self, other):
raise NotImplementedError()
def __eq__(self, other):
raise NotImplementedError()
def __ne__(self, other):
raise NotImplementedError()
def __gt__(self, other):
raise NotImplementedError()
def __ge__(self, other):
raise NotImplementedError()
def __nonzero__(self):
raise NotImplementedError()
def __bool__(self):
raise NotImplementedError()
def __hash__(self):
return super(Variable, self).__hash__()
__array_priority__ = 200
class Parameter(Variable):
"""Parameter variable that can be registered to a link.
Parameter is a subclass of :class:`Variable`. It almost behaves as same
as a usual variable except that a parameter can be registered to a
:class:`~chainer.Link` object just by assigning it to an attribute of
the link within an :meth:`~chainer.Link.init_scope` context.
Parameter also supports an initialization by an initializer. It can have
two initializers: one for the data array, and the other for the gradient
array. The initializer only specifies the way of filling the elements of
these arrays, and the shape information is specified at the initialization
point.
When a link that the parameter has been registered to is passed to an
:class:`~chainer.GradientMethod`, an update rule is set to the parameter.
This update rule specifies how to update the data array of the parameter
using its gradient array.
Args:
initializer (~chainer.Initializer or numpy.ndarray or cupy.ndarray):
Initializer of the data array. If ``shape`` is given, this
initializer is immediately used to initialize the data array.
Otherwise, if it is an array, it is immediately used as the data
array, and otherwise the data array is left uninitialized and will
be initialized by this initializer in :meth:`initialize`. It can
also be a scalar, in which case the data array will be filled by
this scalar. Note that float32 is used in this case.
shape (int or tuple of int or None): Shape of the parameter. If it is
``None``, the initialization is deferred to the call of
:meth:`initialize`.
name (str): Name of the parameter.
Attributes:
initializer: Initializer of the data array. It is used for
initializing the data array of an uninitialized variable.
update_rule: :class:`~chainer.optimizer.UpdateRule` instance that
updates this variable as a parameter. This argument is set to
:attr:`update_rule`.
"""
initializer = None
_grad_initializer = None
_initial_device = -1
def __init__(self, initializer=None, shape=None, name=None):
if initializer is None:
initializer = constant.NaN()
elif numpy.isscalar(initializer):
initializer = constant.Constant(initializer)
if shape is None:
if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):
# parameter initialized by the initial array
super(Parameter, self).__init__(initializer, name=name)
else:
# uninitialized parameter
super(Parameter, self).__init__(name=name)
self.initializer = initializer
dtype = getattr(initializer, 'dtype', numpy.float32)
self._grad_initializer = constant.NaN(dtype)
else:
# parameter initialized with a given shape
if isinstance(initializer, (numpy.ndarray, cuda.ndarray)):
xp = cuda.get_array_module(initializer)
initializer = constant.Constant(initializer)
else:
xp = numpy
data = initializers.generate_array(initializer, shape, xp)
grad = xp.full_like(data, numpy.nan)
super(Parameter, self).__init__(data, name=name, grad=grad)
self.update_rule = None
def __copy__(self):
return self._copy_to(Parameter())
def __reduce__(self):
return _recover_parameter, (self.data, self.name, self.grad,
self.initializer, self.update_rule)
def to_cpu(self):
super(Parameter, self).to_cpu()
if self.data is None:
self._initial_device = -1
def to_gpu(self, device=None):
super(Parameter, self).to_gpu(device)
if self.data is None:
if device is None:
device = cuda.Device().id
self._initial_device = device
def cleargrad(self):
super(Parameter, self).cleargrad()
if self.data is None:
self._grad_initializer = None
def zerograd(self):
super(Parameter, self).zerograd()
if self.data is None:
dtype = getattr(self.initializer, 'dtype', None)
self._grad_initializer = initializers.Zero(dtype)
def initialize(self, shape):
"""Initializes the uninitialized variable.
Uninitialized variable is a variable created with the data array set to
None. This method creates and initializes the data array. The shape of
the variable can be left unknown until this method is called.
Args:
shape (tuple of int): Shape of the data array.
"""
data = initializers.generate_array(self.initializer, shape, numpy)
ginit = self._grad_initializer
grad = None if ginit is None else initializers.generate_array(
ginit, shape, numpy)
if self._initial_device >= 0:
data = cuda.to_gpu(data, device=self._initial_device)
if grad is not None:
grad = cuda.to_gpu(grad, device=self._initial_device)
self._data[0] = data
self._node._grad = grad
def update(self):
"""Updates the data array using the gradient and the update rule.
This method updates the parameter using the attached update rule.
"""
if self.update_rule is not None:
self.update_rule.update(self)
def _recover_parameter(data, name, grad, initializer, update_rule):
p = Parameter(initializer=initializer, name=name)
p.data = data
p.grad = grad
p.update_rule = update_rule
return p
|
|
# flake8: noqa
"""
Tests for matcher base classes.
"""
import callee.base as __unit__
from tests import MatcherTestCase, TestCase
class BaseMatcherMetaclass(TestCase):
"""Tests for the BaseMatcherMetaclass."""
def test_validate_class_definition(self):
"""Test for BaseMatcherMetaclass._validate_class_definition."""
vcd = __unit__.BaseMatcherMetaclass._validate_class_definition
bases = (object,)
method = lambda self: None
vcd('BaseMatcher', bases, __unit__.BaseMatcher.__dict__)
vcd('Foo', bases, {}) # OK, empty definition
vcd('Foo', bases, {'foo': 42}) # OK, no methods at all
vcd('Foo', bases, {'__foo__': 42}) # OK, magic name but not method
vcd('Foo', bases, {'__foo__': method}) # OK, not BaseMatcher's method
vcd('Foo', bases, {'__init__': method}) # OK, explicitly allowed
with self.assertRaises(RuntimeError):
vcd('Foo', bases, {'__eq__': method}) # trying to mess up!
def test_is_base_matcher_class_definition(self):
"""Test for BaseMatcherMetaclass._is_base_matcher_class_definition."""
is_bmcd = \
__unit__.BaseMatcherMetaclass._is_base_matcher_class_definition
self.assertFalse(is_bmcd('Foo', {})) # wrong name
self.assertFalse(is_bmcd('BaseMatcher', {})) # needs members
self.assertFalse(is_bmcd('BaseMatcher', {'foo': 1})) # needs methods
self.assertFalse( # needs methods from same module
is_bmcd('BaseMatcher', {'foo': lambda self: None}))
self.assertTrue(is_bmcd('BaseMatcher', __unit__.BaseMatcher.__dict__))
def test_list_magic_methods(self):
"""Test for BaseMatcherMetaclass._list_magic_methods."""
lmm = __unit__.BaseMatcherMetaclass._list_magic_methods
class Foo(object):
pass
self.assertItemsEqual([], lmm(Foo))
class Bar(object):
def method(self):
pass
self.assertItemsEqual([], lmm(Bar))
class Baz(object):
def __init__(self):
pass
def __rdiv__(self, other):
return self
self.assertItemsEqual(['init', 'rdiv'], lmm(Baz))
class Qux(object):
def __init__(self):
pass
def foo(self):
pass
self.assertItemsEqual(['init'], lmm(Qux))
class Thud(object):
def __bool__(self):
return False
__nonzero__ = __bool__
self.assertItemsEqual(['bool', 'nonzero'], lmm(Thud))
class Matcher(TestCase):
"""Tests for the Matcher base class."""
def test_match(self):
"""Test default match() is left to be implemented by subclasses."""
class Custom(__unit__.Matcher):
pass
matcher = Custom()
with self.assertRaises(NotImplementedError):
matcher.match(None)
def test_repr__no_ctor(self):
"""Test default __repr__ of Matcher subclass without a constructor."""
class Custom(__unit__.Matcher):
pass
self.assertEquals("<Custom>", "%r" % Custom())
def test_repr__argless_ctor__no_state(self):
"""Test default __repr__ of Matcher subclass with argless ctor."""
class Custom(__unit__.Matcher):
def __init__(self):
pass
self.assertEquals("<Custom>", "%r" % Custom())
def test_repr__argless_ctor__with_state(self):
"""Test __repr__ of Matcher subclass with argless ctor & state."""
class Custom(__unit__.Matcher):
def __init__(self):
self.foo = 42
self.assertEquals("<Custom>", "%r" % Custom())
def test_repr__argful_ctor__no_state(self):
"""Test __repr__ with argful constructor but no actual fields."""
class Custom(__unit__.Matcher):
def __init__(self, unused):
pass
self.assertEquals("<Custom(...)>", "%r" % Custom('unused'))
def test_repr__argful_ctor__with_state_from_args(self):
"""Test __repr__ with argful constructor & object fields."""
class Custom(__unit__.Matcher):
def __init__(self, foo):
self.foo = foo
foo = 'bar'
self.assertEquals("<Custom(foo=%r)>" % (foo,),
"%r" % Custom(foo='bar'))
def test_repr__argful_ctor__with_unrelated_state(self):
"""Test __repr__ with argful ctor & unrelated object fields."""
class Custom(__unit__.Matcher):
def __init__(self, foo):
self.bar = 42
self.assertEquals("<Custom(bar=42)>", "%r" % Custom(foo='unused'))
class Eq(MatcherTestCase):
"""Tests for the Eq matcher."""
def test_regular_objects(self):
"""Test that Eq is a no-op for regular objects."""
self.assert_match(__unit__.Eq(None), None)
self.assert_match(__unit__.Eq(0), 0)
self.assert_match(__unit__.Eq(""), "")
self.assert_match(__unit__.Eq([]), [])
self.assert_match(__unit__.Eq(()), ())
# Arbitary objects are only equal in the `is` sense.
obj = object()
self.assert_match(__unit__.Eq(obj), obj)
def test_matchers(self):
"""Test that Eq allows to treat matchers as values."""
# Hypothetical objects that we want to check the equality of,
# where one is by some accident a Matcher.
eq_by_x = lambda this, other: this.x == getattr(other, 'x', object())
class RegularValue(object):
def __init__(self, x):
self.x = x
def __eq__(self, other):
return eq_by_x(self, other)
class MatcherValue(__unit__.Matcher):
def __init__(self, x):
self.x = x
def match(self, value):
return eq_by_x(self, value)
# Matching against a matcher object is an error if Eq isn't used.
with self.assertRaises(TypeError) as r:
self.assert_match(MatcherValue(42), MatcherValue(42))
self.assertIn("incorrect use of matcher object", str(r.exception))
# It's fine with Eq, though.
self.assert_match(__unit__.Eq(RegularValue(42)), MatcherValue(42))
def test_repr(self):
"""Test for the __repr__ method."""
value = 42
eq = __unit__.Eq(value)
self.assert_repr(eq, value)
class LogicalCombinators(MatcherTestCase):
"""Tests for the logical combinators (Not, And, etc.)."""
def test_not(self):
test_strings = ['', 'a', '42', 'a13', '99b', '!', '22 ?']
not_no_digits = ~self.NoDigits() # i.e. HasDigits
has_digits = self.HasDigits()
for s in test_strings:
self.assertEquals(
has_digits.match(s), not_no_digits.match(s),
msg="expected `%r` and `%r` to match %r equivalently" % (
has_digits, not_no_digits, s))
def test_not__repr(self):
not_all_digits = ~self.AllDigits()
self.assert_repr(not_all_digits)
def test_and__impossible(self):
test_strings = ['', 'a', '42', 'a13', '99b']
impossible = self.AllDigits() & self.NoDigits()
for s in test_strings:
self.assertFalse(
impossible.match(s),
msg="%r matched an impossible matcher %r" % (s, impossible))
def test_and__idempotent(self):
test_strings = ['', 'a', '42', 'a13', '99b', '!', '22 ?']
all_digits_and_all_digits = self.AllDigits() & self.AllDigits()
all_digits = self.AllDigits()
for s in test_strings:
self.assertEquals(
all_digits.match(s), all_digits_and_all_digits.match(s),
msg="expected `%r` and `%r` to match %r equivalently" % (
all_digits, all_digits_and_all_digits, s))
def test_and__regular(self):
test_strings = ['', '42', '31337', 'abcdef', 'a42', '22?']
short_and_digits = self.Short() & self.AllDigits()
short_digits = self.ShortDigits()
for s in test_strings:
self.assertEquals(
short_digits.match(s), short_and_digits.match(s),
msg="expected `%r` and `%r` to match %r equivalently" % (
short_digits, short_and_digits, s))
def test_and__repr(self):
short_and_digits = self.Short() & self.AllDigits()
self.assert_repr(short_and_digits)
def test_or__trivially_true(self):
test_strings = ['', 'abc', '123456789', 'qwerty?', '!!!!one']
true = self.Short() | self.Long()
for s in test_strings:
self.assertTrue(
true.match(s),
msg="%r didn't match a trivially true matcher %r" % (s, true))
def test_or__idempotent(self):
test_strings = ['', '42', '31337', 'abcdef', 'a42', '22?']
short_or_short = self.Short() | self.Short()
short = self.Short()
for s in test_strings:
self.assertEquals(
short.match(s), short_or_short.match(s),
msg="expected `%r` and `%r` to match %r equivalently" % (
short, short_or_short, s))
def test_or__regular(self):
test_strings = ['', '42', '31337', 'abcdef', 'qwerty55', 'a42', '22?']
has_digits_or_long = self.HasDigits() | self.Long()
long_or_has_digits = self.LongOrHasDigits()
for s in test_strings:
self.assertEquals(
long_or_has_digits.match(s), has_digits_or_long.match(s),
msg="expected `%r` and `%r` to match %r equivalently" % (
long_or_has_digits, has_digits_or_long, s))
def test_or__repr(self):
has_digits_or_short = self.HasDigits() | self.Short()
self.assert_repr(has_digits_or_short)
def test_xor__impossible(self):
test_strings = ['', 'a', '42', 'a13', '99b', '!', '22 ?']
impossible = self.HasDigits() ^ self.HasDigits() # a^a <=> ~a
for s in test_strings:
self.assertFalse(
impossible.match(s),
msg="%r matched an impossible matcher" % (s,))
def test_xor__trivially_true(self):
test_strings = ['', 'abc', '123456789', 'qwerty?', '!!!!one']
true = self.NoDigits() ^ self.HasDigits()
for s in test_strings:
self.assertTrue(
true.match(s),
msg="%r didn't match a trivially true matcher %r" % (s, true))
def test_xor__as_and_not(self):
test_strings = ['', '42', '31337', 'abcdef', 'a42', '22?']
any_xor_all_digits = self.HasDigits() ^ self.AllDigits()
only_some_digits = self.HasDigits() & ~self.AllDigits()
for s in test_strings:
# Note that the truth of assertion is specific to those predicates:
# the second one implies the first one.
self.assertEquals(
only_some_digits.match(s), any_xor_all_digits.match(s),
msg="expected `%r` and `%r` to match %r equivalently" % (
only_some_digits, any_xor_all_digits, s))
def test_xor__repr(self):
all_digits_xor_short = self.AllDigits() ^ self.Short()
self.assert_repr(all_digits_xor_short)
# Utility code
class NoDigits(__unit__.Matcher):
def match(self, value):
return all(not c.isdigit() for c in value)
class HasDigits(__unit__.Matcher):
def match(self, value):
return any(c.isdigit() for c in value)
class AllDigits(__unit__.Matcher):
def match(self, value):
return value.isdigit()
class Short(__unit__.Matcher):
def match(self, value):
return len(value) < 5
class ShortDigits(__unit__.Matcher):
def match(self, value):
return value.isdigit() and len(value) < 5
class Long(__unit__.Matcher):
def match(self, value):
return len(value) >= 5
class LongOrHasDigits(__unit__.Matcher):
def match(self, value):
return len(value) >= 5 or any(c.isdigit() for c in value)
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import psutil
from builtins import input
from past.builtins import basestring
from datetime import datetime
import getpass
import imp
import os
import re
import signal
import subprocess
import sys
import warnings
from airflow.exceptions import AirflowException
# When killing processes, time to wait after issuing a SIGTERM before issuing a
# SIGKILL.
TIME_TO_WAIT_AFTER_SIGTERM = 5
def validate_key(k, max_length=250):
if not isinstance(k, basestring):
raise TypeError("The key has to be a string")
elif len(k) > max_length:
raise AirflowException(
"The key has to be less than {0} characters".format(max_length))
elif not re.match(r'^[A-Za-z0-9_\-\.]+$', k):
raise AirflowException(
"The key ({k}) has to be made of alphanumeric characters, dashes, "
"dots and underscores exclusively".format(**locals()))
else:
return True
def alchemy_to_dict(obj):
"""
Transforms a SQLAlchemy model instance into a dictionary
"""
if not obj:
return None
d = {}
for c in obj.__table__.columns:
value = getattr(obj, c.name)
if type(value) == datetime:
value = value.isoformat()
d[c.name] = value
return d
def ask_yesno(question):
yes = set(['yes', 'y'])
no = set(['no', 'n'])
done = False
print(question)
while not done:
choice = input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("Please respond by yes or no.")
def is_in(obj, l):
"""
Checks whether an object is one of the item in the list.
This is different from ``in`` because ``in`` uses __cmp__ when
present. Here we change based on the object itself
"""
for item in l:
if item is obj:
return True
return False
def is_container(obj):
"""
Test if an object is a container (iterable) but not a string
"""
return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
def as_tuple(obj):
"""
If obj is a container, returns obj as a tuple.
Otherwise, returns a tuple containing obj.
"""
if is_container(obj):
return tuple(obj)
else:
return tuple([obj])
def as_flattened_list(iterable):
"""
Return an iterable with one level flattened
>>> as_flattened_list((('blue', 'red'), ('green', 'yellow', 'pink')))
['blue', 'red', 'green', 'yellow', 'pink']
"""
return [e for i in iterable for e in i]
def chain(*tasks):
"""
Given a number of tasks, builds a dependency chain.
chain(task_1, task_2, task_3, task_4)
is equivalent to
task_1.set_downstream(task_2)
task_2.set_downstream(task_3)
task_3.set_downstream(task_4)
"""
for up_task, down_task in zip(tasks[:-1], tasks[1:]):
up_task.set_downstream(down_task)
def pprinttable(rows):
"""Returns a pretty ascii table from tuples
If namedtuple are used, the table will have headers
"""
if not rows:
return
if hasattr(rows[0], '_fields'): # if namedtuple
headers = rows[0]._fields
else:
headers = ["col{}".format(i) for i in range(len(rows[0]))]
lens = [len(s) for s in headers]
for row in rows:
for i in range(len(rows[0])):
slenght = len("{}".format(row[i]))
if slenght > lens[i]:
lens[i] = slenght
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(['-' * n for n in lens])
s = ""
s += separator + '\n'
s += (hpattern % tuple(headers)) + '\n'
s += separator + '\n'
def f(t):
return "{}".format(t) if isinstance(t, basestring) else t
for line in rows:
s += pattern % tuple(f(t) for t in line) + '\n'
s += separator + '\n'
return s
def kill_using_shell(pid, signal=signal.SIGTERM):
process = psutil.Process(pid)
# Use sudo only when necessary - consider SubDagOperator and SequentialExecutor case.
if process.username() != getpass.getuser():
args = ["sudo", "kill", "-{}".format(int(signal)), str(pid)]
else:
args = ["kill", "-{}".format(int(signal)), str(pid)]
# PID may not exist and return a non-zero error code
subprocess.call(args)
def kill_process_tree(logger, pid):
"""
Kills the process and all of the descendants. Kills using the `kill`
shell command so that it can change users. Note: killing via PIDs
has the potential to the wrong process if the process dies and the
PID gets recycled in a narrow time window.
:param logger: logger
:type logger: logging.Logger
"""
try:
root_process = psutil.Process(pid)
except psutil.NoSuchProcess:
logger.warn("PID: {} does not exist".format(pid))
return
# Check child processes to reduce cases where a child process died but
# the PID got reused.
descendant_processes = [x for x in root_process.children(recursive=True)
if x.is_running()]
if len(descendant_processes) != 0:
logger.warn("Terminating descendant processes of {} PID: {}"
.format(root_process.cmdline(),
root_process.pid))
temp_processes = descendant_processes[:]
for descendant in temp_processes:
logger.warn("Terminating descendant process {} PID: {}"
.format(descendant.cmdline(), descendant.pid))
try:
kill_using_shell(descendant.pid, signal.SIGTERM)
except psutil.NoSuchProcess:
descendant_processes.remove(descendant)
logger.warn("Waiting up to {}s for processes to exit..."
.format(TIME_TO_WAIT_AFTER_SIGTERM))
try:
psutil.wait_procs(descendant_processes, TIME_TO_WAIT_AFTER_SIGTERM)
logger.warn("Done waiting")
except psutil.TimeoutExpired:
logger.warn("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
descendant_processes = [x for x in root_process.children(recursive=True)
if x.is_running()]
if len(descendant_processes) > 0:
temp_processes = descendant_processes[:]
for descendant in temp_processes:
logger.warn("Killing descendant process {} PID: {}"
.format(descendant.cmdline(), descendant.pid))
try:
kill_using_shell(descendant.pid, signal.SIGTERM)
descendant.wait()
except psutil.NoSuchProcess:
descendant_processes.remove(descendant)
logger.warn("Killed all descendant processes of {} PID: {}"
.format(root_process.cmdline(),
root_process.pid))
else:
logger.debug("There are no descendant processes to kill")
def kill_descendant_processes(logger, pids_to_kill=None):
"""
Kills all descendant processes of this process.
:param logger: logger
:type logger: logging.Logger
:param pids_to_kill: if specified, kill only these PIDs
:type pids_to_kill: list[int]
"""
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where a child process died but the PID got reused.
descendant_processes = [x for x in this_process.children(recursive=True)
if x.is_running()]
if pids_to_kill:
descendant_processes = [x for x in descendant_processes
if x.pid in pids_to_kill]
if len(descendant_processes) == 0:
logger.debug("There are no descendant processes that can be killed")
return
logger.warn("Terminating descendant processes of {} PID: {}"
.format(this_process.cmdline(),
this_process.pid))
temp_processes = descendant_processes[:]
for descendant in temp_processes:
try:
logger.warn("Terminating descendant process {} PID: {}"
.format(descendant.cmdline(), descendant.pid))
descendant.terminate()
except psutil.NoSuchProcess:
descendant_processes.remove(descendant)
logger.warn("Waiting up to {}s for processes to exit..."
.format(TIME_TO_WAIT_AFTER_SIGTERM))
try:
psutil.wait_procs(descendant_processes, TIME_TO_WAIT_AFTER_SIGTERM)
logger.warn("Done waiting")
except psutil.TimeoutExpired:
logger.warn("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
descendant_processes = [x for x in this_process.children(recursive=True)
if x.is_running()]
if pids_to_kill:
descendant_processes = [x for x in descendant_processes
if x.pid in pids_to_kill]
if len(descendant_processes) > 0:
for descendant in descendant_processes:
logger.warn("Killing descendant process {} PID: {}"
.format(descendant.cmdline(), descendant.pid))
try:
descendant.kill()
descendant.wait()
except psutil.NoSuchProcess:
pass
logger.warn("Killed all descendant processes of {} PID: {}"
.format(this_process.cmdline(),
this_process.pid))
class AirflowImporter(object):
"""
Importer that dynamically loads a class and module from its parent. This
allows Airflow to support ``from airflow.operators import BashOperator``
even though BashOperator is actually in
``airflow.operators.bash_operator``.
The importer also takes over for the parent_module by wrapping it. This is
required to support attribute-based usage:
.. code:: python
from airflow import operators
operators.BashOperator(...)
"""
def __init__(self, parent_module, module_attributes):
"""
:param parent_module: The string package name of the parent module. For
example, 'airflow.operators'
:type parent_module: string
:param module_attributes: The file to class mappings for all importable
classes.
:type module_attributes: string
"""
self._parent_module = parent_module
self._attribute_modules = self._build_attribute_modules(module_attributes)
self._loaded_modules = {}
# Wrap the module so we can take over __getattr__.
sys.modules[parent_module.__name__] = self
@staticmethod
def _build_attribute_modules(module_attributes):
"""
Flips and flattens the module_attributes dictionary from:
module => [Attribute, ...]
To:
Attribute => module
This is useful so that we can find the module to use, given an
attribute.
"""
attribute_modules = {}
for module, attributes in list(module_attributes.items()):
for attribute in attributes:
attribute_modules[attribute] = module
return attribute_modules
def _load_attribute(self, attribute):
"""
Load the class attribute if it hasn't been loaded yet, and return it.
"""
module = self._attribute_modules.get(attribute, False)
if not module:
# This shouldn't happen. The check happens in find_modules, too.
raise ImportError(attribute)
elif module not in self._loaded_modules:
# Note that it's very important to only load a given modules once.
# If they are loaded more than once, the memory reference to the
# class objects changes, and Python thinks that an object of type
# Foo that was declared before Foo's module was reloaded is no
# longer the same type as Foo after it's reloaded.
path = os.path.realpath(self._parent_module.__file__)
folder = os.path.dirname(path)
f, filename, description = imp.find_module(module, [folder])
self._loaded_modules[module] = imp.load_module(module, f, filename, description)
# This functionality is deprecated, and AirflowImporter should be
# removed in 2.0.
warnings.warn(
"Importing {i} directly from {m} has been "
"deprecated. Please import from "
"'{m}.[operator_module]' instead. Support for direct "
"imports will be dropped entirely in Airflow 2.0.".format(
i=attribute, m=self._parent_module),
DeprecationWarning)
loaded_module = self._loaded_modules[module]
return getattr(loaded_module, attribute)
def __getattr__(self, attribute):
"""
Get an attribute from the wrapped module. If the attribute doesn't
exist, try and import it as a class from a submodule.
This is a Python trick that allows the class to pretend it's a module,
so that attribute-based usage works:
from airflow import operators
operators.BashOperator(...)
It also allows normal from imports to work:
from airflow.operators.bash_operator import BashOperator
"""
if hasattr(self._parent_module, attribute):
# Always default to the parent module if the attribute exists.
return getattr(self._parent_module, attribute)
elif attribute in self._attribute_modules:
# Try and import the attribute if it's got a module defined.
loaded_attribute = self._load_attribute(attribute)
setattr(self, attribute, loaded_attribute)
return loaded_attribute
raise AttributeError
|
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo.config import cfg
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import multiple_create
from nova.api.openstack.compute.plugins.v3 import servers
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova.network import manager
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-multiple-create',
'osapi_v3')
self.no_mult_create_controller = servers.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"security_groups": inst['security_groups'],
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params, update_cells=True,
columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2)
if no_image:
server.pop('image_ref', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
if override_controller:
server = override_controller.create(req, body).obj['server']
else:
server = self.controller.create(req, body).obj['server']
def test_create_instance_with_multiple_create_disabled(self):
ret_res_id = True
min_count = 2
max_count = 3
params = {
multiple_create.MIN_ATTRIBUTE_NAME: min_count,
multiple_create.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('min_count', kwargs)
self.assertNotIn('max_count', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(
params,
override_controller=self.no_mult_create_controller)
def test_multiple_create_with_string_type_min_and_max(self):
min_count = '2'
max_count = '3'
params = {
multiple_create.MIN_ATTRIBUTE_NAME: min_count,
multiple_create.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsInstance(kwargs['min_count'], int)
self.assertIsInstance(kwargs['max_count'], int)
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_multiple_create_enabled(self):
min_count = 2
max_count = 3
params = {
multiple_create.MIN_ATTRIBUTE_NAME: min_count,
multiple_create.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_invalid_negative_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_create_instance_invalid_negative_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MAX_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_create_instance_invalid_min_greater_than_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 4,
multiple_create.MAX_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_create_instance_invalid_alpha_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_create_instance_invalid_alpha_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MAX_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_create_multiple_instances(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_len(res["server"])
def test_create_multiple_instances_pass_disabled(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
self.flags(enable_instance_password=False)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_missing(res["server"])
def _check_admin_password_len(self, server_dict):
"""utility function - check server_dict for admin_password length."""
self.assertEqual(CONF.password_length,
len(server_dict["admin_password"]))
def _check_admin_password_missing(self, server_dict):
"""utility function - check server_dict for admin_password absence."""
self.assertNotIn("admin_password", server_dict)
def test_create_multiple_instances_resv_id_return(self):
"""Test creating multiple instances with asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
multiple_create.RRID_ATTRIBUTE_NAME: True
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body)
reservation_id = res.obj['servers_reservation']['reservation_id']
self.assertNotEqual(reservation_id, "")
self.assertIsNotNone(reservation_id)
self.assertTrue(len(reservation_id) > 1)
def test_create_multiple_instances_with_multiple_volume_bdm(self):
"""
Test that a BadRequest is raised if multiple instances
are requested with a list of block device mappings for volumes.
"""
min_count = 2
bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'},
{'device_name': 'foo2', 'volume_id': 'vol-yyyy'}
]
params = {
'block_device_mapping': bdm,
'min_count': min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(len(kwargs['block_device_mapping']), 2)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
def test_create_multiple_instances_with_single_volume_bdm(self):
"""
Test that a BadRequest is raised if multiple instances
are requested to boot from a single volume.
"""
min_count = 2
bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'}]
params = {
'block_device_mapping': bdm,
multiple_create.MIN_ATTRIBUTE_NAME: min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['block_device_mapping']['volume_id'],
'vol-xxxx')
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
def test_create_multiple_instance_with_non_integer_max_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MAX_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_multiple_instance_with_non_integer_min_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import tempfile
from wsgiref.util import FileWrapper
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six import StringIO
from .forms import MediaActionForm
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Article, BarAccount, Book,
Bookmark, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice,
City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter,
CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient,
InlineReference, InlineReferer, Inquisition, Language, Link,
MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber,
OtherStory, Paper, Parent, ParentWithDependentChildren, ParentWithUUIDPK,
Person, Persona, Picture, Pizza, Plot, PlotDetails, PlotProxy,
PluggableSearchPerson, Podcast, Post, PrePopulatedPost,
PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question, Recipe,
Recommendation, Recommender, ReferencedByGenRel, ReferencedByInline,
ReferencedByParent, RelatedPrepopulated, RelatedWithUUIDPKModel, Report,
Reservation, Restaurant, RowLevelChangePermissionModel, Section,
ShortMessage, Simple, Sketch, State, Story, StumpJoke, Subscriber,
SuperVillain, Telegram, Thing, Topping, UnchangeableObject,
UndeletableObject, UnorderedObject, UserMessenger, Villain, Vodcast,
Whatsit, Widget, Worker, WorkHour,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section', lambda obj: obj.title,
)
list_editable = ('section',)
list_filter = ('date', 'section')
view_on_site = False
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
def changelist_view(self, request):
return super(ArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'from@example.com',
['to@example.com']
).send()
return super(ArticleAdmin, self).save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
popup_response_template = 'custom_admin/popup_response.html'
def changelist_view(self, request):
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected', 'sketch')
def sketch(self, obj):
# A method with the same name as a reverse accessor.
return 'list-display-sketch'
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super(PersonAdmin, self).get_changelist_formset(request, formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super(PersonAdmin, self).get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
action_form = MediaActionForm
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'from@example.com',
['to@example.com']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'from@example.com',
['to@example.com']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action',
status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
save_as = True
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super(ParentAdmin, self).save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super(EmptyModelAdmin, self).get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=the_recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline", "readonly_link_content")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo",
'multiline_html_allow_tags', 'readonly_content',
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
value.short_description = 'Value in $US'
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
def multiline_html_allow_tags(self, instance):
return "Multiline<br>html<br>content<br>with allow tags"
multiline_html_allow_tags.allow_tags = True
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.order_by('pk').filter(pk=9999) # Doesn't exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances. Note that the CoverLetter model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(CoverLetterAdmin, self).get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(PaperAdmin, self).get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(ShortMessageAdmin, self).get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances. Note that the Telegram model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super(TelegramAdmin, self).get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ["-pk"]
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ["-pk"]
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return format_html('<span style="color: #ff00ff;">{}</span>', obj.name)
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super(PluggableSearchPersonAdmin, self).get_search_results(
request, queryset, search_term
)
try:
search_term_as_int = int(search_term)
except ValueError:
pass
else:
queryset |= self.model.objects.filter(age=search_term_as_int)
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [
url(r'^extra/$',
self.extra,
name='cable_extra'),
]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2', 'slug3'))
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
'slug3': ['name'],
}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['name']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super(UndeletableObjectAdmin, self).change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super(UnchangeableObjectAdmin, self).get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown, ]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super(DependentChildAdminForm, self).clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
class PlotReadonlyAdmin(admin.ModelAdmin):
readonly_fields = ('plotdetails',)
class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
fields = ['name']
def add_view(self, request, *args, **kwargs):
request.is_add_view = True
return super(GetFormsetsArgumentCheckingAdmin, self).add_view(request, *args, **kwargs)
def change_view(self, request, *args, **kwargs):
request.is_add_view = False
return super(GetFormsetsArgumentCheckingAdmin, self).change_view(request, *args, **kwargs)
def get_formsets_with_inlines(self, request, obj=None):
if request.is_add_view and obj is not None:
raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view")
if not request.is_add_view and obj is None:
raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view")
return super(GetFormsetsArgumentCheckingAdmin, self).get_formsets_with_inlines(request, obj)
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline], readonly_fields=['name_property'])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(PlotProxy, PlotReadonlyAdmin)
site.register(Bookmark)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
site.register(ReferencedByGenRel)
site.register(GenRelReference)
site.register(ParentWithUUIDPK)
site.register(RelatedWithUUIDPKModel)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four troublesome (w.r.t escaping
# and calling force_text to avoid problems on Python 2.3) paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer, date_hierarchy='question__posted')
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Recipe)
site.register(Ingredient)
site.register(NotReferenced)
site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
# Register core models we need in our tests
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site2.register(ParentWithUUIDPK)
site2.register(
RelatedWithUUIDPKModel,
list_display=['pk', 'parent'],
list_editable=['parent'],
raw_id_fields=['parent'],
)
site2.register(Person, save_as_continue=False)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
site7.register(Section)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import unittest
import warnings
class TestConfig(unittest.TestCase):
_multiprocess_can_split_ = True
def __init__(self, *args):
super(TestConfig, self).__init__(*args)
from copy import deepcopy
self.cf = pd.core.config
self.gc = deepcopy(getattr(self.cf, '_global_config'))
self.do = deepcopy(getattr(self.cf, '_deprecated_options'))
self.ro = deepcopy(getattr(self.cf, '_registered_options'))
def setUp(self):
setattr(self.cf, '_global_config', {})
setattr(
self.cf, 'options', self.cf.DictWrapper(self.cf._global_config))
setattr(self.cf, '_deprecated_options', {})
setattr(self.cf, '_registered_options', {})
def tearDown(self):
setattr(self.cf, '_global_config', self.gc)
setattr(self.cf, '_deprecated_options', self.do)
setattr(self.cf, '_registered_options', self.ro)
def test_api(self):
# the pandas object exposes the user API
self.assertTrue(hasattr(pd, 'get_option'))
self.assertTrue(hasattr(pd, 'set_option'))
self.assertTrue(hasattr(pd, 'reset_option'))
self.assertTrue(hasattr(pd, 'describe_option'))
def test_is_one_of_factory(self):
v = self.cf.is_one_of_factory([None, 12])
v(12)
v(None)
self.assertRaises(ValueError, v, 1.1)
def test_register_option(self):
self.cf.register_option('a', 1, 'doc')
# can't register an already registered option
self.assertRaises(KeyError, self.cf.register_option, 'a', 1, 'doc')
# can't register an already registered option
self.assertRaises(KeyError, self.cf.register_option, 'a.b.c.d1', 1,
'doc')
self.assertRaises(KeyError, self.cf.register_option, 'a.b.c.d2', 1,
'doc')
# no python keywords
self.assertRaises(ValueError, self.cf.register_option, 'for', 0)
self.assertRaises(ValueError, self.cf.register_option, 'a.for.b', 0)
# must be valid identifier (ensure attribute access works)
self.assertRaises(ValueError, self.cf.register_option,
'Oh my Goddess!', 0)
# we can register options several levels deep
# without predefining the intermediate steps
# and we can define differently named options
# in the same namespace
self.cf.register_option('k.b.c.d1', 1, 'doc')
self.cf.register_option('k.b.c.d2', 1, 'doc')
def test_describe_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b', 1, 'doc2')
self.cf.deprecate_option('b')
self.cf.register_option('c.d.e1', 1, 'doc3')
self.cf.register_option('c.d.e2', 1, 'doc4')
self.cf.register_option('f', 1)
self.cf.register_option('g.h', 1)
self.cf.register_option('k', 2)
self.cf.deprecate_option('g.h', rkey="k")
self.cf.register_option('l', "foo")
# non-existent keys raise KeyError
self.assertRaises(KeyError, self.cf.describe_option, 'no.such.key')
# we can get the description for any key we registered
self.assertTrue(
'doc' in self.cf.describe_option('a', _print_desc=False))
self.assertTrue(
'doc2' in self.cf.describe_option('b', _print_desc=False))
self.assertTrue(
'precated' in self.cf.describe_option('b', _print_desc=False))
self.assertTrue(
'doc3' in self.cf.describe_option('c.d.e1', _print_desc=False))
self.assertTrue(
'doc4' in self.cf.describe_option('c.d.e2', _print_desc=False))
# if no doc is specified we get a default message
# saying "description not available"
self.assertTrue(
'vailable' in self.cf.describe_option('f', _print_desc=False))
self.assertTrue(
'vailable' in self.cf.describe_option('g.h', _print_desc=False))
self.assertTrue(
'precated' in self.cf.describe_option('g.h', _print_desc=False))
self.assertTrue(
'k' in self.cf.describe_option('g.h', _print_desc=False))
# default is reported
self.assertTrue(
'foo' in self.cf.describe_option('l', _print_desc=False))
# current value is reported
self.assertFalse(
'bar' in self.cf.describe_option('l', _print_desc=False))
self.cf.set_option("l", "bar")
self.assertTrue(
'bar' in self.cf.describe_option('l', _print_desc=False))
def test_case_insensitive(self):
self.cf.register_option('KanBAN', 1, 'doc')
self.assertTrue(
'doc' in self.cf.describe_option('kanbaN', _print_desc=False))
self.assertEqual(self.cf.get_option('kanBaN'), 1)
self.cf.set_option('KanBan', 2)
self.assertEqual(self.cf.get_option('kAnBaN'), 2)
# gets of non-existent keys fail
self.assertRaises(KeyError, self.cf.get_option, 'no_such_option')
self.cf.deprecate_option('KanBan')
self.assertTrue(self.cf._is_deprecated('kAnBaN'))
def test_get_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
# gets of existing keys succeed
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.assertTrue(self.cf.get_option('b.b') is None)
# gets of non-existent keys fail
self.assertRaises(KeyError, self.cf.get_option, 'no_such_option')
def test_set_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.assertTrue(self.cf.get_option('b.b') is None)
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
self.cf.set_option('b.b', 1.1)
self.assertEqual(self.cf.get_option('a'), 2)
self.assertEqual(self.cf.get_option('b.c'), 'wurld')
self.assertEqual(self.cf.get_option('b.b'), 1.1)
self.assertRaises(KeyError, self.cf.set_option, 'no.such.key', None)
def test_set_option_empty_args(self):
self.assertRaises(ValueError, self.cf.set_option)
def test_set_option_uneven_args(self):
self.assertRaises(ValueError, self.cf.set_option, 'a.b', 2, 'b.c')
def test_set_option_invalid_single_argument_type(self):
self.assertRaises(ValueError, self.cf.set_option, 2)
def test_set_option_multiple(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.assertTrue(self.cf.get_option('b.b') is None)
self.cf.set_option('a', '2', 'b.c', None, 'b.b', 10.0)
self.assertEqual(self.cf.get_option('a'), '2')
self.assertTrue(self.cf.get_option('b.c') is None)
self.assertEqual(self.cf.get_option('b.b'), 10.0)
def test_validation(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_text)
self.assertRaises(ValueError, self.cf.register_option, 'a.b.c.d2',
'NO', 'doc', validator=self.cf.is_int)
self.cf.set_option('a', 2) # int is_int
self.cf.set_option('b.c', 'wurld') # str is_str
self.assertRaises(
ValueError, self.cf.set_option, 'a', None) # None not is_int
self.assertRaises(ValueError, self.cf.set_option, 'a', 'ab')
self.assertRaises(ValueError, self.cf.set_option, 'b.c', 1)
validator = self.cf.is_one_of_factory([None, self.cf.is_callable])
self.cf.register_option('b', lambda: None, 'doc',
validator=validator)
self.cf.set_option('b', '%.1f'.format) # Formatter is callable
self.cf.set_option('b', None) # Formatter is none (default)
self.assertRaises(ValueError, self.cf.set_option, 'b', '%.1f')
def test_reset_option(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_str)
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
self.assertEqual(self.cf.get_option('a'), 2)
self.assertEqual(self.cf.get_option('b.c'), 'wurld')
self.cf.reset_option('a')
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'wurld')
self.cf.reset_option('b.c')
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
def test_reset_option_all(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_str)
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
self.assertEqual(self.cf.get_option('a'), 2)
self.assertEqual(self.cf.get_option('b.c'), 'wurld')
self.cf.reset_option("all")
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b.c'), 'hullo')
def test_deprecate_option(self):
# we can deprecate non-existent options
self.cf.deprecate_option('foo')
self.assertTrue(self.cf._is_deprecated('foo'))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
try:
self.cf.get_option('foo')
except KeyError:
pass
else:
self.fail("Nonexistent option didn't raise KeyError")
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'deprecated' in str(w[-1])) # we get the default message
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('foo', 'hullo', 'doc2')
self.cf.deprecate_option('a', removal_ver='nifty_ver')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.get_option('a')
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'eprecated' in str(w[-1])) # we get the default message
self.assertTrue(
'nifty_ver' in str(w[-1])) # with the removal_ver quoted
self.assertRaises(
KeyError, self.cf.deprecate_option, 'a') # can't depr. twice
self.cf.deprecate_option('b.c', 'zounds!')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.get_option('b.c')
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'zounds!' in str(w[-1])) # we get the custom message
# test rerouting keys
self.cf.register_option('d.a', 'foo', 'doc2')
self.cf.register_option('d.dep', 'bar', 'doc2')
self.assertEqual(self.cf.get_option('d.a'), 'foo')
self.assertEqual(self.cf.get_option('d.dep'), 'bar')
self.cf.deprecate_option('d.dep', rkey='d.a') # reroute d.dep to d.a
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.cf.get_option('d.dep'), 'foo')
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'eprecated' in str(w[-1])) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.set_option('d.dep', 'baz') # should overwrite "d.a"
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'eprecated' in str(w[-1])) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.cf.get_option('d.dep'), 'baz')
self.assertEqual(len(w), 1) # should have raised one warning
self.assertTrue(
'eprecated' in str(w[-1])) # we get the custom message
def test_config_prefix(self):
with self.cf.config_prefix("base"):
self.cf.register_option('a', 1, "doc1")
self.cf.register_option('b', 2, "doc2")
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b'), 2)
self.cf.set_option('a', 3)
self.cf.set_option('b', 4)
self.assertEqual(self.cf.get_option('a'), 3)
self.assertEqual(self.cf.get_option('b'), 4)
self.assertEqual(self.cf.get_option('base.a'), 3)
self.assertEqual(self.cf.get_option('base.b'), 4)
self.assertTrue(
'doc1' in self.cf.describe_option('base.a', _print_desc=False))
self.assertTrue(
'doc2' in self.cf.describe_option('base.b', _print_desc=False))
self.cf.reset_option('base.a')
self.cf.reset_option('base.b')
with self.cf.config_prefix("base"):
self.assertEqual(self.cf.get_option('a'), 1)
self.assertEqual(self.cf.get_option('b'), 2)
def test_callback(self):
k = [None]
v = [None]
def callback(key):
k.append(key)
v.append(self.cf.get_option(key))
self.cf.register_option('d.a', 'foo', cb=callback)
self.cf.register_option('d.b', 'foo', cb=callback)
del k[-1], v[-1]
self.cf.set_option("d.a", "fooz")
self.assertEqual(k[-1], "d.a")
self.assertEqual(v[-1], "fooz")
del k[-1], v[-1]
self.cf.set_option("d.b", "boo")
self.assertEqual(k[-1], "d.b")
self.assertEqual(v[-1], "boo")
del k[-1], v[-1]
self.cf.reset_option("d.b")
self.assertEqual(k[-1], "d.b")
def test_set_ContextManager(self):
def eq(val):
self.assertEqual(self.cf.get_option("a"), val)
self.cf.register_option('a', 0)
eq(0)
with self.cf.option_context("a", 15):
eq(15)
with self.cf.option_context("a", 25):
eq(25)
eq(15)
eq(0)
self.cf.set_option("a", 17)
eq(17)
def test_attribute_access(self):
holder = []
def f():
options.b = 1
def f2():
options.display = 1
def f3(key):
holder.append(True)
self.cf.register_option('a', 0)
self.cf.register_option('c', 0, cb=f3)
options = self.cf.options
self.assertEqual(options.a, 0)
with self.cf.option_context("a", 15):
self.assertEqual(options.a, 15)
options.a = 500
self.assertEqual(self.cf.get_option("a"), 500)
self.cf.reset_option("a")
self.assertEqual(options.a, self.cf.get_option("a", 0))
self.assertRaises(KeyError, f)
self.assertRaises(KeyError, f2)
# make sure callback kicks when using this form of setting
options.c = 1
self.assertEqual(len(holder), 1)
def test_option_context_scope(self):
# Ensure that creating a context does not affect the existing
# environment as it is supposed to be used with the `with` statement.
# See https://github.com/pydata/pandas/issues/8514
original_value = 60
context_value = 10
option_name = 'a'
self.cf.register_option(option_name, original_value)
# Ensure creating contexts didn't affect the current context.
ctx = self.cf.option_context(option_name, context_value)
self.assertEqual(self.cf.get_option(option_name), original_value)
# Ensure the correct value is available inside the context.
with ctx:
self.assertEqual(self.cf.get_option(option_name), context_value)
# Ensure the current context is reset
self.assertEqual(self.cf.get_option(option_name), original_value)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bgoldd"),
help="bgoldd binary to test")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-whitelist=127.0.0.1"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = NodeConnCB() # connects to node0 (not whitelisted)
white_node = NodeConnCB() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
self.log.info("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
self.log.info("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
self.nodes[0].getblock(blocks_h3[0].hash)
self.log.info("Unrequested more-work block accepted from non-whitelisted peer")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
self.log.info("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
|
"""
This script has been used to compute the Neumann results of the paper
On efficient Chebyshev-Galerkin methods for second-order equations
The results have been computed using Python 3.9 and Shenfun 3.2.2.
The results have been computed with the Numba optimization you get
by calling this script with argument --numba, e.g.,
python CGpaper_naumann.py --return_type 2 --numba
The generalized Chebyshev-Tau results are computed with dedalus,
and are as such not part of this script.
"""
import sympy as sp
import numpy as np
import array_to_latex as a2l
x = sp.Symbol('x', real=True)
fe = {}
def matvec(u_hat, f_hat, A, B, alpha, method):
"""Compute matrix vector product
Parameters
----------
u_hat : Function
The solution array
f_hat : Function
The right hand side array
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import chebyshev
if method == 2:
if alpha == 0:
f_hat = A.matvec(-u_hat, f_hat)
else:
sol = chebyshev.la.Helmholtz(A, B, -1, alpha)
f_hat = sol.matvec(u_hat, f_hat)
else:
if alpha == 0:
f_hat[:-2] = A.diags() * u_hat[:-2]
f_hat *= -1
else:
M = alpha*B - A
f_hat[:-2] = M.diags() * u_hat[:-2]
f_hat[0] = 0
return f_hat
def solve(f_hat, u_hat, A, B, alpha, method):
"""Solve problem for given method
Parameters
----------
f_hat : Function
The right hand side array
u_hat : Function
The solution array
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import chebyshev, la
constraints = ((0, 0),) if alpha == 0 else ()
if method == 2:
if alpha == 0:
sol = la.Solver(A)
f_hat *= -1
else:
sol = chebyshev.la.Helmholtz(A, B, -1, alpha)
elif method in (0, 1, 3, 4):
if alpha == 0:
sol = la.TwoDMA(A)
f_hat *= -1
else:
sol = la.FDMA(alpha*B-A)
elif method == 5:
if alpha == 0:
sol = la.Solver(A)
f_hat *= -1
else:
sol = la.TDMA(alpha*B-A)
else:
sol = la.Solver(alpha*B-A)
u_hat = sol(f_hat, u_hat, constraints=constraints)
u_hat[0] = 0
return u_hat
def main(N, method=0, alpha=0, returntype=0):
from shenfun import FunctionSpace, TrialFunction, TestFunction, \
inner, div, grad, chebyshev, SparseMatrix, Function, Array, \
jacobi, extract_diagonal_matrix
global fe
basis = {0: ('ShenNeumann', 'CombinedShenNeumann'),
1: ('ShenDirichlet', 'MikNeumann'),
2: ('ShenNeumann', 'ShenNeumann'),
3: ('Phi1', 'ShenNeumann'),
4: ('Orthogonal', 'ShenNeumann'), # Quasi-Galerkin
5: ('ShenNeumann', 'ShenNeumann'), # Legendre
}
test, trial = basis[method]
family = 'C' if method < 5 else 'L'
famtest = 'U' if method == 3 else family
#M = N if method == 3 else N
kw = {}
if method == 3:
kw['scaled'] = True
test = FunctionSpace(N, famtest, basis=test, **kw)
trial = FunctionSpace(N, family, basis=trial)
v = TestFunction(test)
u = TrialFunction(trial)
A = inner(v, div(grad(u)))
B = inner(v, u)
if method == 4:
# Quasi preconditioning
#Q2 = chebyshev.quasi.QIGmat(N)
#A = Q2*A
#B = Q2*B
Q2 = jacobi.recursions.ShiftedMatrix(jacobi.recursions.b, 2, 2, 0, N-2, N, alf=-sp.S.Half, bet=-sp.S.Half, gn=jacobi.recursions.cn)
#I2 = jacobi.recursions.Lmat(2, 0, 0, N-2, N, -sp.S.Half, -sp.S.Half, jacobi.recursions.cn)
I2 = SparseMatrix({2: 1}, (N-2, N)).diags('csr')
K = trial.stencil_matrix()
K.shape = (N-2, N)
K = K.diags('csr')
BT = inner(v, TrialFunction(test)).diags('csr')
Q2[0][0] /= 2
Q2[2][-2:] = 0
Q2[4][-2:] = 0
A = extract_diagonal_matrix(I2*BT*K.T, lowerband=0, upperband=2)
B = extract_diagonal_matrix(Q2.diags('csr')*BT*K.T, lowerband=2, upperband=4)
if returntype == 0:
if alpha == 0:
con = np.linalg.cond(A.diags().toarray()[1:, 1:])
else:
con = np.linalg.cond(alpha*B.diags().toarray()-A.diags().toarray())
elif returntype == 1:
v = Function(trial, buffer=np.random.random(N))
v[0] = 0
v[-2:] = 0
u_hat = Function(trial)
f_hat = Function(trial)
f_hat = matvec(v, f_hat, A, B, alpha, method)
u_hat = solve(f_hat, u_hat, A, B, alpha, method)
con = np.abs(u_hat-v).max()
elif returntype == 2:
ue = sp.cos(100*sp.pi*x)
fe = alpha*ue-ue.diff(x, 2)
f_hat = Function(test)
fj = Array(test, buffer=fe)
f_hat = test.scalar_product(fj, f_hat, fast_transform=True)
if method == 4:
f_hat[:-2] = Q2.diags('csc')*f_hat
u_hat = Function(trial)
u_hat = solve(f_hat, u_hat, A, B, alpha, method)
uj = Array(trial)
uj = trial.backward(u_hat, uj, fast_transform=True)
ua = Array(trial, buffer=ue)
xj, wj = trial.points_and_weights()
if family == 'C':
ua -= np.sum(ua*wj)/np.pi # normalize
uj -= np.sum(uj*wj)/np.pi # normalize
else:
ua -= np.sum(ua*wj)/2 # normalize
uj -= np.sum(uj*wj)/2 # normalize
con = np.sqrt(inner(1, (uj-ua)**2))
#con = np.max(abs(uj-ua))
return con
if __name__ == '__main__':
import matplotlib.pyplot as plt
import argparse
import os
import sys
import yaml
parser = argparse.ArgumentParser(description='Solve the Helmholtz problem with Neumann boundary conditions')
parser.add_argument('--return_type', action='store', type=int, required=True)
parser.add_argument('--include_legendre', action='store_true')
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--plot', action='store_true')
parser.add_argument('--numba', action='store_true')
args = parser.parse_args()
if args.numba:
try:
import numba
cfg = {'optimization': {'mode': 'numba', 'verbose': False}}
with open('shenfun.yaml', 'w') as f:
yaml.dump(cfg, f)
f.close()
except ModuleNotFoundError:
os.warning('Numba not found - using Cython')
cond = []
M = 6 if args.include_legendre else 5
if args.return_type == 2:
N = (2**4,2**6, 2**8, 2**12)#, 2**16, 2**20)
elif args.return_type == 1:
N = (2**4, 2**12, 2**20)
else:
N = (32, 64, 128, 256, 512, 1024, 2048)
alphas = (0, 1000)
if args.return_type in (0, 2):
for alpha in alphas:
cond.append([])
if args.verbose > 0:
print('alpha =', alpha)
for basis in range(M): # To include Legendre use --include_legendre (takes hours for N=2**20)
if args.verbose > 1:
print('Method =', basis)
cond[-1].append([])
for n in N:
if args.verbose > 2:
print('N =', n)
cond[-1][-1].append(main(n, basis, alpha, args.return_type))
linestyle = {0: 'solid', 1: 'dashed', 2: 'dotted'}
for i in range(len(cond)):
plt.loglog(N, cond[i][0], 'b',
N, cond[i][1], 'r',
N, cond[i][2], 'k',
N, cond[i][3], 'm',
N, cond[i][4], 'y',
linestyle=linestyle[i])
if args.include_legendre:
plt.loglog(N, cond[i][5], 'y', linestyle=linestyle[i])
a2l.to_ltx(np.array(cond)[i], frmt='{:6.2e}', print_out=True, mathform=False)
else:
for basis in range(M):
cond.append([])
if args.verbose > 1:
print('Method =', basis)
for alpha in alphas:
if args.verbose > 0:
print('alpha =', alpha)
for n in N:
if args.verbose > 2:
print('N =', n)
cond[-1].append(main(n, basis, alpha, args.return_type))
a2l.to_ltx(np.array(cond), frmt='{:6.2e}', print_out=True, mathform=False)
if os.path.exists('shenfun.yaml'):
os.remove('shenfun.yaml')
if args.plot:
plt.show()
|
|
"""
Provides functionality for mailboxes.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mailbox/
"""
import asyncio
from contextlib import suppress
from datetime import timedelta
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_prepare_setup_platform
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['http']
DOMAIN = 'mailbox'
EVENT = 'mailbox_updated'
CONTENT_TYPE_MPEG = 'audio/mpeg'
CONTENT_TYPE_NONE = 'none'
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup(hass, config):
"""Track states and offer events for mailboxes."""
mailboxes = []
await hass.components.frontend.async_register_built_in_panel(
'mailbox', 'mailbox', 'mdi:mailbox')
hass.http.register_view(MailboxPlatformsView(mailboxes))
hass.http.register_view(MailboxMessageView(mailboxes))
hass.http.register_view(MailboxMediaView(mailboxes))
hass.http.register_view(MailboxDeleteView(mailboxes))
async def async_setup_platform(p_type, p_config=None, discovery_info=None):
"""Set up a mailbox platform."""
if p_config is None:
p_config = {}
if discovery_info is None:
discovery_info = {}
platform = await async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
_LOGGER.error("Unknown mailbox platform specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
mailbox = None
try:
if hasattr(platform, 'async_get_handler'):
mailbox = await \
platform.async_get_handler(hass, p_config, discovery_info)
elif hasattr(platform, 'get_handler'):
mailbox = await hass.async_add_executor_job(
platform.get_handler, hass, p_config, discovery_info)
else:
raise HomeAssistantError("Invalid mailbox platform.")
if mailbox is None:
_LOGGER.error(
"Failed to initialize mailbox platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error setting up platform %s', p_type)
return
mailboxes.append(mailbox)
mailbox_entity = MailboxEntity(mailbox)
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
await component.async_add_entities([mailbox_entity])
setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config
in config_per_platform(config, DOMAIN)]
if setup_tasks:
await asyncio.wait(setup_tasks, loop=hass.loop)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class MailboxEntity(Entity):
"""Entity for each mailbox platform to provide a badge display."""
def __init__(self, mailbox):
"""Initialize mailbox entity."""
self.mailbox = mailbox
self.message_count = 0
async def async_added_to_hass(self):
"""Complete entity initialization."""
@callback
def _mailbox_updated(event):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen(EVENT, _mailbox_updated)
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the binary sensor."""
return str(self.message_count)
@property
def name(self):
"""Return the name of the entity."""
return self.mailbox.name
async def async_update(self):
"""Retrieve messages from platform."""
messages = await self.mailbox.async_get_messages()
self.message_count = len(messages)
class Mailbox:
"""Represent a mailbox device."""
def __init__(self, hass, name):
"""Initialize mailbox object."""
self.hass = hass
self.name = name
def async_update(self):
"""Send event notification of updated mailbox."""
self.hass.bus.async_fire(EVENT)
@property
def media_type(self):
"""Return the supported media type."""
raise NotImplementedError()
@property
def can_delete(self):
"""Return if messages can be deleted."""
return False
@property
def has_media(self):
"""Return if messages have attached media files."""
return False
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
raise NotImplementedError()
async def async_get_messages(self):
"""Return a list of the current messages."""
raise NotImplementedError()
def async_delete(self, msgid):
"""Delete the specified messages."""
raise NotImplementedError()
class StreamError(Exception):
"""Media streaming exception."""
pass
class MailboxView(HomeAssistantView):
"""Base mailbox view."""
def __init__(self, mailboxes):
"""Initialize a basic mailbox view."""
self.mailboxes = mailboxes
def get_mailbox(self, platform):
"""Retrieve the specified mailbox."""
for mailbox in self.mailboxes:
if mailbox.name == platform:
return mailbox
raise HTTPNotFound
class MailboxPlatformsView(MailboxView):
"""View to return the list of mailbox platforms."""
url = "/api/mailbox/platforms"
name = "api:mailbox:platforms"
async def get(self, request):
"""Retrieve list of platforms."""
platforms = []
for mailbox in self.mailboxes:
platforms.append(
{
'name': mailbox.name,
'has_media': mailbox.has_media,
'can_delete': mailbox.can_delete
})
return self.json(platforms)
class MailboxMessageView(MailboxView):
"""View to return the list of messages."""
url = "/api/mailbox/messages/{platform}"
name = "api:mailbox:messages"
async def get(self, request, platform):
"""Retrieve messages."""
mailbox = self.get_mailbox(platform)
messages = await mailbox.async_get_messages()
return self.json(messages)
class MailboxDeleteView(MailboxView):
"""View to delete selected messages."""
url = "/api/mailbox/delete/{platform}/{msgid}"
name = "api:mailbox:delete"
async def delete(self, request, platform, msgid):
"""Delete items."""
mailbox = self.get_mailbox(platform)
mailbox.async_delete(msgid)
class MailboxMediaView(MailboxView):
"""View to return a media file."""
url = r"/api/mailbox/media/{platform}/{msgid}"
name = "api:asteriskmbox:media"
async def get(self, request, platform, msgid):
"""Retrieve media."""
mailbox = self.get_mailbox(platform)
hass = request.app['hass']
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(10, loop=hass.loop):
try:
stream = await mailbox.async_get_media(msgid)
except StreamError as err:
error_msg = "Error getting media: %s" % (err)
_LOGGER.error(error_msg)
return web.Response(status=500)
if stream:
return web.Response(body=stream,
content_type=mailbox.media_type)
return web.Response(status=500)
|
|
from lib import log
from lib.log import exception, warning
from version import VERSION_FOR_BUG_REPORTS
from paths import CLIENT_LOG_PATH
log.set_version(VERSION_FOR_BUG_REPORTS)
log.add_secure_file_handler(CLIENT_LOG_PATH, "w")
log.add_http_handler("http://jlpo.free.fr/soundrts/metaserver")
log.add_console_handler()
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
warning("couldn't set locale")
import os
import pickle
import sys
import time
import urllib
import webbrowser
from clientmedia import voice, init_media, close_media
from clientmenu import Menu, input_string, END_LOOP
from clientserver import connect_and_play, start_server_and_connect
from clientversion import revision_checker
import config
from constants import MAIN_METASERVER_URL
from definitions import style
from game import TrainingGame, ReplayGame
from lib.msgs import nb2msg
from paths import CONFIG_DIR_PATH, REPLAYS_PATH, SAVE_PATH
import res
import stats
from version import VERSION
_ds = open("cfg/default_servers.txt").readlines()
_ds = [_x.split() for _x in _ds]
DEFAULT_SERVERS = [" ".join(["0"] + _x[:1] + [VERSION] + _x[1:]) for _x in _ds]
SERVERS_LIST_HEADER = "SERVERS_LIST"
SERVERS_LIST_URL = MAIN_METASERVER_URL + "servers.php?header=%s&include_ports=1" % SERVERS_LIST_HEADER
class Application(object):
def choose_server_ip_in_a_list(self):
servers_list = None
try:
f = urllib.urlopen(SERVERS_LIST_URL)
if f.read(len(SERVERS_LIST_HEADER)) == SERVERS_LIST_HEADER:
servers_list = f.readlines()
except:
pass
if servers_list is None:
voice.alert([1029]) # hostile sound
warning("couldn't get the servers list from the metaserver"
" => using the default servers list")
servers_list = DEFAULT_SERVERS
nb = 0
menu = Menu()
for s in servers_list:
try:
ip, version, login, port = s.split()[1:]
# ignore the first parameter (time)
except:
warning("line not recognized from the metaserver: %s", s)
continue
nb += 1
if version == VERSION:
menu.append([login, 4073, login], (connect_and_play, ip, port))
menu.title = nb2msg(len(menu.choices)) + [4078] + nb2msg(nb) + [4079]
menu.append([4075, 4076], None)
menu.run()
def enter_server_ip(self):
host = input_string([], "^[A-Za-z0-9\.]$")
if host:
connect_and_play(host)
def multiplayer_menu(self):
revision_checker.start_if_needed()
if config.login == "player":
voice.alert([4235]) # type your new login
self.modify_login()
menu = Menu([4030], [
([4119], self.choose_server_ip_in_a_list),
([4120], self.enter_server_ip),
([4048], None),
])
menu.run()
def restore_game(self):
n = SAVE_PATH
if not os.path.exists(n):
voice.alert([1029]) # hostile sound
return
f = open(n)
try:
i = int(stats.Stats(None, None)._get_weak_user_id())
j = int(f.readline())
except:
i = 0
j = "error"
if i == j:
try:
game_session = pickle.load(f)
except:
exception("cannot load savegame file")
voice.alert([1029]) # hostile sound
return
game_session.run_on()
else:
warning("savegame file is not from this machine")
voice.alert([1029]) # hostile sound
def training_menu_invite(self, ai_type):
self.players.append(ai_type)
self.factions.append("random_faction")
self.menu.update_menu(self.build_training_menu_after_map())
def training_menu_after_map(self, m):
style.load(res.get_text_file("ui/style", append=True, localize=True)) # XXX: won't work with factions defined in the map
self.players = [config.login]
self.factions = ["random_faction"]
self.map = m
self.menu = self.build_training_menu_after_map()
self.menu.loop()
def start_training_game(self):
game = TrainingGame(self.map, self.players)
game.factions = self.factions
game.run()
return END_LOOP
def set_faction(self, pn, r):
self.factions[pn] = r
self.menu.update_menu(self.build_training_menu_after_map())
def _add_faction_menu(self, menu, pn, p, pr):
if len(self.map.factions) > 1:
for r in ["random_faction"] + self.map.factions:
if r != pr:
menu.append([p,] + style.get(r, "title"),
(self.set_faction, pn, r))
def build_training_menu_after_map(self):
menu = Menu()
if len(self.players) < self.map.nb_players_max:
menu.append([4058, 4258], (self.training_menu_invite, "easy"))
menu.append([4058, 4257], (self.training_menu_invite,
"aggressive"))
if len(self.players) >= self.map.nb_players_min:
menu.append([4059], self.start_training_game)
for pn, (p, pr) in enumerate(zip(self.players, self.factions)):
self._add_faction_menu(menu, pn, p, pr)
menu.append([4048, 4060], END_LOOP)
return menu
def training_menu(self):
menu = Menu([4055], remember="mapmenu")
for m in res.worlds_multi():
menu.append(m.title, (self.training_menu_after_map, m))
menu.append([4041], None)
menu.run()
def replay(self, n):
ReplayGame(os.path.join(REPLAYS_PATH, n)).run()
def replay_menu(self):
menu = Menu([4315])
for n in sorted(os.listdir(REPLAYS_PATH), reverse=True):
if n.endswith(".txt"):
menu.append([time.strftime("%c", time.localtime(int(n[:-4])))], (self.replay, n))
menu.append([4041], None)
menu.run()
def modify_login(self):
login = input_string([4235, 4236], "^[a-zA-Z0-9]$") # type your new
# login ; use alphanumeric characters
if login == None:
voice.alert([4238]) # current login kept
elif (len(login) < 1) or (len(login) > 20):
voice.alert([4237, 4238]) # incorrect login ; current login kept
else:
voice.alert([4239, login]) # new login:
config.login = login
config.save()
def main(self):
def open_user_folder():
webbrowser.open(CONFIG_DIR_PATH)
single_player_menu = Menu([4030],
[(c.title, c) for c in res.campaigns()] +
[
([4055], self.training_menu),
([4113], self.restore_game),
([4118], END_LOOP),
])
server_menu = Menu([4043], [
([4044, 4045], (start_server_and_connect, "admin_only")),
([4046, 4047], (start_server_and_connect, "")),
([4121, 4122], (start_server_and_connect,
"admin_only no_metaserver")),
([4048], None),
])
def set_and_launch_mod(mods):
config.mods = mods
config.save()
res.set_mods(config.mods)
main_menu().loop() # update the menu title
raise SystemExit
def mods_menu():
mods_menu = Menu([4341])
mods_menu.append([0], (set_and_launch_mod, ""))
for mod in res.available_mods():
mods_menu.append([mod], (set_and_launch_mod, mod))
mods_menu.append([4118], END_LOOP)
mods_menu.run()
return END_LOOP
def set_and_launch_soundpack(soundpacks):
config.soundpacks = soundpacks
config.save()
res.set_soundpacks(config.soundpacks)
main_menu().loop() # update the menu title
raise SystemExit
def soundpacks_menu():
soundpacks_menu = Menu([4342])
soundpacks_menu.append([0], (set_and_launch_soundpack, ""))
for soundpack in res.available_soundpacks():
soundpacks_menu.append([soundpack], (set_and_launch_soundpack, soundpack))
soundpacks_menu.append([4118], END_LOOP)
soundpacks_menu.run()
return END_LOOP
options_menu = Menu([4086], [
([4087], self.modify_login),
((4341, ), mods_menu),
((4342, ), soundpacks_menu),
([4336], open_user_folder),
([4118], END_LOOP),
])
def main_menu():
import version
return Menu(["SoundRTS %s %s %s," % (version.VERSION, res.mods, res.soundpacks), 4030], [
[[4031, 4032], single_player_menu.loop],
[[4033, 4034], self.multiplayer_menu],
[[4035, 4036], server_menu],
[[4315], self.replay_menu],
[[4037, 4038], options_menu.loop],
[[4337], launch_manual],
[[4041, 4042], END_LOOP],
])
def launch_manual():
webbrowser.open(os.path.realpath("doc/help-index.htm"))
if "connect_localhost" in sys.argv:
connect_and_play()
else:
main_menu().loop()
def main():
try:
try:
init_media()
revision_checker.start_if_needed()
Application().main()
except SystemExit:
raise
except:
exception("error")
finally:
close_media()
if __name__ == "__main__":
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.learn implementation of tensor_forest (extremely random forests)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.data import data_ops
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
KEYS_NAME = 'keys'
LOSS_NAME = 'rf_training_loss'
def _assert_float32(tensors):
"""Assert all tensors are float32.
Args:
tensors: `Tensor` or `dict` of `Tensor` objects.
Raises:
TypeError: if any tensor is not float32.
"""
if not isinstance(tensors, dict):
tensors = [tensors]
else:
tensors = tensors.values()
for tensor in tensors:
if tensor.dtype.base_dtype != dtypes.float32:
raise TypeError('Expected dtype=float32, %s.' % tensor)
class TensorForestLossHook(session_run_hook.SessionRunHook):
"""Monitor to request stop when loss stops decreasing."""
def __init__(self, early_stopping_rounds):
self.early_stopping_rounds = early_stopping_rounds
self.min_loss = None
self.last_step = -1
# self.steps records the number of steps for which the loss has been
# non-decreasing
self.steps = 0
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
{'global_step': contrib_framework.get_global_step(),
'current_loss': run_context.session.graph.get_operation_by_name(
LOSS_NAME).outputs[0]})
def after_run(self, run_context, run_values):
current_loss = run_values.results['current_loss']
current_step = run_values.results['global_step']
self.steps += 1
# Gaurd against the global step going backwards, which might happen
# if we recover from something.
if self.last_step == -1 or self.last_step > current_step:
logging.info('TensorForestLossHook resetting last_step.')
self.last_step = current_step
self.steps = 0
self.min_loss = None
return
self.last_step = current_step
if self.min_loss is None or current_loss < self.min_loss:
self.min_loss = current_loss
self.steps = 0
if self.steps > self.early_stopping_rounds:
logging.info('TensorForestLossHook requesting stop.')
run_context.request_stop()
def get_model_fn(params, graph_builder_class, device_assigner,
weights_name=None, keys_name=None):
"""Return a model function given a way to construct a graph builder."""
def _model_fn(features, labels):
"""Function that returns predictions, training loss, and training op."""
weights = None
keys = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
if keys_name and keys_name in features:
keys = features.pop(keys_name)
processed_features, spec = data_ops.ParseDataTensorOrDict(features)
_assert_float32(processed_features)
if labels is not None:
labels = data_ops.ParseLabelTensorOrDict(labels)
_assert_float32(labels)
graph_builder = graph_builder_class(params, device_assigner=device_assigner)
inference = {eval_metrics.INFERENCE_PROB_NAME:
graph_builder.inference_graph(processed_features,
data_spec=spec)}
if not params.regression:
inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
inference[eval_metrics.INFERENCE_PROB_NAME], 1)
if keys:
inference[KEYS_NAME] = keys
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_loss = None
training_graph = None
if labels is not None:
training_loss = graph_builder.training_loss(processed_features, labels,
data_spec=spec,
name=LOSS_NAME)
training_graph = control_flow_ops.group(
graph_builder.training_graph(
processed_features, labels, data_spec=spec,
input_weights=weights),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
return (inference, training_loss, training_graph)
return _model_fn
class TensorForestEstimator(evaluable.Evaluable, trainable.Trainable):
"""An estimator that can train and evaluate a random forest.
Example:
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
"""
def __init__(self, params, device_assigner=None, model_dir=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
config=None, weights_name=None, keys_name=None,
feature_engineering_fn=None, early_stopping_rounds=100):
"""Initializes a TensorForestEstimator instance.
Args:
params: ForestHParams object that holds random forest hyperparameters.
These parameters will be passed into `model_fn`.
device_assigner: An `object` instance that controls how trees get
assigned to devices. If `None`, will use
`tensor_forest.RandomForestDeviceAssigner`.
model_dir: Directory to save model parameters, graph, etc. To continue
training a previously saved model, load checkpoints saved to this
directory into an estimator.
graph_builder_class: An `object` instance that defines how TF graphs for
random forest training and inference are built. By default will use
`tensor_forest.RandomForestGraphs`.
config: `RunConfig` object to configure the runtime settings.
weights_name: A string defining feature column name representing
weights. Will be multiplied by the loss of the example. Used to
downweight or boost examples during training.
keys_name: A string defining feature column name representing example
keys. Used by `predict_with_keys` method.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
early_stopping_rounds: Allows training to terminate early if the forest is
no longer growing. 100 by default.
Returns:
A `TensorForestEstimator` instance.
"""
self.params = params.fill()
self.graph_builder_class = graph_builder_class
self.early_stopping_rounds = early_stopping_rounds
self.weights_name = weights_name
self._estimator = estimator.Estimator(
model_fn=get_model_fn(params, graph_builder_class, device_assigner,
weights_name=weights_name, keys_name=keys_name),
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
def evaluate(
self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None,
steps=None, metrics=None, name=None):
"""See evaluable.Evaluable."""
return self._estimator.evaluate(
input_fn=input_fn, x=x, y=y, feed_fn=feed_fn,
batch_size=batch_size, steps=steps,
metrics=metrics, name=name)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
if not monitors:
monitors = [TensorForestLossHook(self.early_stopping_rounds)]
self._estimator.fit(input_fn=input_fn, x=x, y=y,
batch_size=batch_size, steps=steps, monitors=monitors,
max_steps=max_steps)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns prediction probabilities for given features (classification).
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities (or an iterable of predicted
probabilities if as_iterable is True).
Raises:
ValueError: If both or neither of x and input_fn were given.
"""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
if as_iterable:
return (x[eval_metrics.INFERENCE_PROB_NAME] for x in results)
else:
return results[eval_metrics.INFERENCE_PROB_NAME]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(
self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
axis: Axis on which to argmax (for classification).
Last axis is used by default.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes or regression values (or an iterable of
predictions if as_iterable is True).
"""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
predict_name = (eval_metrics.INFERENCE_PROB_NAME if self.params.regression
else eval_metrics.INFERENCE_PRED_NAME)
if as_iterable:
return (x[predict_name] for x in results)
else:
return results[predict_name]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_with_keys(
self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
as_iterable=True):
"""Same as predict but also returns the example keys."""
results = self._estimator.predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
predict_name = (eval_metrics.INFERENCE_PROB_NAME if self.params.regression
else eval_metrics.INFERENCE_PRED_NAME)
if as_iterable:
return ((x[predict_name], x.get(KEYS_NAME, None)) for x in results)
else:
return results[predict_name], results.get(KEYS_NAME, None)
def export(self,
export_dir,
input_fn,
signature_fn=None,
default_batch_size=1):
"""See BaseEstimator.export."""
# Reset model function with basic device assigner.
# Servo doesn't support distributed inference
# but it will try to respect device assignments if they're there.
# pylint: disable=protected-access
orig_model_fn = self._estimator._model_fn
self._estimator._model_fn = get_model_fn(
self.params, self.graph_builder_class,
tensor_forest.RandomForestDeviceAssigner(),
weights_name=self.weights_name)
result = self._estimator.export(
export_dir=export_dir,
use_deprecated_input_fn=True,
signature_fn=(signature_fn or
(export.regression_signature_fn
if self.params.regression else
export.classification_signature_fn_with_prob)),
default_batch_size=default_batch_size,
prediction_key=eval_metrics.INFERENCE_PROB_NAME)
self._estimator._model_fn = orig_model_fn
# pylint: enable=protected-access
return result
@experimental
def export_savedmodel(self,
export_dir_base,
input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=None):
return self._estimator.export_savedmodel(
export_dir_base,
input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
as_text=as_text,
exports_to_keep=exports_to_keep)
|
|
"""Provides functionality to interact with lights."""
from __future__ import annotations
from collections.abc import Iterable
import csv
import dataclasses
from datetime import timedelta
import logging
import os
from typing import cast, final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_registry as er
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import ToggleEntity, ToggleEntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
import homeassistant.util.color as color_util
# mypy: allow-untyped-defs, no-check-untyped-defs
DOMAIN = "light"
SCAN_INTERVAL = timedelta(seconds=30)
DATA_PROFILES = "light_profiles"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# Bitfield of features supported by the light entity
SUPPORT_BRIGHTNESS = 1 # Deprecated, replaced by color modes
SUPPORT_COLOR_TEMP = 2 # Deprecated, replaced by color modes
SUPPORT_EFFECT = 4
SUPPORT_FLASH = 8
SUPPORT_COLOR = 16 # Deprecated, replaced by color modes
SUPPORT_TRANSITION = 32
SUPPORT_WHITE_VALUE = 128 # Deprecated, replaced by color modes
# Color mode of the light
ATTR_COLOR_MODE = "color_mode"
# List of color modes supported by the light
ATTR_SUPPORTED_COLOR_MODES = "supported_color_modes"
# Possible color modes
COLOR_MODE_UNKNOWN = "unknown" # Ambiguous color mode
COLOR_MODE_ONOFF = "onoff" # Must be the only supported mode
COLOR_MODE_BRIGHTNESS = "brightness" # Must be the only supported mode
COLOR_MODE_COLOR_TEMP = "color_temp"
COLOR_MODE_HS = "hs"
COLOR_MODE_XY = "xy"
COLOR_MODE_RGB = "rgb"
COLOR_MODE_RGBW = "rgbw"
COLOR_MODE_RGBWW = "rgbww"
COLOR_MODE_WHITE = "white" # Must *NOT* be the only supported mode
VALID_COLOR_MODES = {
COLOR_MODE_ONOFF,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_XY,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_WHITE,
}
COLOR_MODES_BRIGHTNESS = VALID_COLOR_MODES - {COLOR_MODE_ONOFF}
COLOR_MODES_COLOR = {
COLOR_MODE_HS,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_XY,
}
def valid_supported_color_modes(color_modes: Iterable[str]) -> set[str]:
"""Validate the given color modes."""
color_modes = set(color_modes)
if (
not color_modes
or COLOR_MODE_UNKNOWN in color_modes
or (COLOR_MODE_BRIGHTNESS in color_modes and len(color_modes) > 1)
or (COLOR_MODE_ONOFF in color_modes and len(color_modes) > 1)
or (COLOR_MODE_WHITE in color_modes and not color_supported(color_modes))
):
raise vol.Error(f"Invalid supported_color_modes {sorted(color_modes)}")
return color_modes
def brightness_supported(color_modes: Iterable[str] | None) -> bool:
"""Test if brightness is supported."""
if not color_modes:
return False
return any(mode in COLOR_MODES_BRIGHTNESS for mode in color_modes)
def color_supported(color_modes: Iterable[str] | None) -> bool:
"""Test if color is supported."""
if not color_modes:
return False
return any(mode in COLOR_MODES_COLOR for mode in color_modes)
def color_temp_supported(color_modes: Iterable[str] | None) -> bool:
"""Test if color temperature is supported."""
if not color_modes:
return False
return COLOR_MODE_COLOR_TEMP in color_modes
def get_supported_color_modes(hass: HomeAssistant, entity_id: str) -> set | None:
"""Get supported color modes for a light entity.
First try the statemachine, then entity registry.
This is the equivalent of entity helper get_supported_features.
"""
if state := hass.states.get(entity_id):
return state.attributes.get(ATTR_SUPPORTED_COLOR_MODES)
entity_registry = er.async_get(hass)
if not (entry := entity_registry.async_get(entity_id)):
raise HomeAssistantError(f"Unknown entity {entity_id}")
if not entry.capabilities:
return None
return entry.capabilities.get(ATTR_SUPPORTED_COLOR_MODES)
# Float that represents transition time in seconds to make change.
ATTR_TRANSITION = "transition"
# Lists holding color values
ATTR_RGB_COLOR = "rgb_color"
ATTR_RGBW_COLOR = "rgbw_color"
ATTR_RGBWW_COLOR = "rgbww_color"
ATTR_XY_COLOR = "xy_color"
ATTR_HS_COLOR = "hs_color"
ATTR_COLOR_TEMP = "color_temp"
ATTR_KELVIN = "kelvin"
ATTR_MIN_MIREDS = "min_mireds"
ATTR_MAX_MIREDS = "max_mireds"
ATTR_COLOR_NAME = "color_name"
ATTR_WHITE_VALUE = "white_value"
ATTR_WHITE = "white"
# Brightness of the light, 0..255 or percentage
ATTR_BRIGHTNESS = "brightness"
ATTR_BRIGHTNESS_PCT = "brightness_pct"
ATTR_BRIGHTNESS_STEP = "brightness_step"
ATTR_BRIGHTNESS_STEP_PCT = "brightness_step_pct"
# String representing a profile (built-in ones or external defined).
ATTR_PROFILE = "profile"
# If the light should flash, can be FLASH_SHORT or FLASH_LONG.
ATTR_FLASH = "flash"
FLASH_SHORT = "short"
FLASH_LONG = "long"
# List of possible effects
ATTR_EFFECT_LIST = "effect_list"
# Apply an effect to the light, can be EFFECT_COLORLOOP.
ATTR_EFFECT = "effect"
EFFECT_COLORLOOP = "colorloop"
EFFECT_RANDOM = "random"
EFFECT_WHITE = "white"
COLOR_GROUP = "Color descriptors"
LIGHT_PROFILES_FILE = "light_profiles.csv"
# Service call validation schemas
VALID_TRANSITION = vol.All(vol.Coerce(float), vol.Clamp(min=0, max=6553))
VALID_BRIGHTNESS = vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255))
VALID_BRIGHTNESS_PCT = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
VALID_BRIGHTNESS_STEP = vol.All(vol.Coerce(int), vol.Clamp(min=-255, max=255))
VALID_BRIGHTNESS_STEP_PCT = vol.All(vol.Coerce(float), vol.Clamp(min=-100, max=100))
VALID_FLASH = vol.In([FLASH_SHORT, FLASH_LONG])
LIGHT_TURN_ON_SCHEMA = {
vol.Exclusive(ATTR_PROFILE, COLOR_GROUP): cv.string,
ATTR_TRANSITION: VALID_TRANSITION,
vol.Exclusive(ATTR_BRIGHTNESS, ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
vol.Exclusive(ATTR_BRIGHTNESS_PCT, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_PCT,
vol.Exclusive(ATTR_BRIGHTNESS_STEP, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_STEP,
vol.Exclusive(ATTR_BRIGHTNESS_STEP_PCT, ATTR_BRIGHTNESS): VALID_BRIGHTNESS_STEP_PCT,
vol.Exclusive(ATTR_COLOR_NAME, COLOR_GROUP): cv.string,
vol.Exclusive(ATTR_COLOR_TEMP, COLOR_GROUP): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Exclusive(ATTR_KELVIN, COLOR_GROUP): cv.positive_int,
vol.Exclusive(ATTR_HS_COLOR, COLOR_GROUP): vol.All(
vol.Coerce(tuple),
vol.ExactSequence(
(
vol.All(vol.Coerce(float), vol.Range(min=0, max=360)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
)
),
),
vol.Exclusive(ATTR_RGB_COLOR, COLOR_GROUP): vol.All(
vol.Coerce(tuple), vol.ExactSequence((cv.byte,) * 3)
),
vol.Exclusive(ATTR_RGBW_COLOR, COLOR_GROUP): vol.All(
vol.Coerce(tuple), vol.ExactSequence((cv.byte,) * 4)
),
vol.Exclusive(ATTR_RGBWW_COLOR, COLOR_GROUP): vol.All(
vol.Coerce(tuple), vol.ExactSequence((cv.byte,) * 5)
),
vol.Exclusive(ATTR_XY_COLOR, COLOR_GROUP): vol.All(
vol.Coerce(tuple), vol.ExactSequence((cv.small_float, cv.small_float))
),
vol.Exclusive(ATTR_WHITE, COLOR_GROUP): VALID_BRIGHTNESS,
ATTR_WHITE_VALUE: vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
ATTR_FLASH: VALID_FLASH,
ATTR_EFFECT: cv.string,
}
LIGHT_TURN_OFF_SCHEMA = {ATTR_TRANSITION: VALID_TRANSITION, ATTR_FLASH: VALID_FLASH}
_LOGGER = logging.getLogger(__name__)
@bind_hass
def is_on(hass: HomeAssistant, entity_id: str) -> bool:
"""Return if the lights are on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
def preprocess_turn_on_alternatives(hass, params):
"""Process extra data for turn light on request.
Async friendly.
"""
# Bail out, we process this later.
if ATTR_BRIGHTNESS_STEP in params or ATTR_BRIGHTNESS_STEP_PCT in params:
return
if ATTR_PROFILE in params:
hass.data[DATA_PROFILES].apply_profile(params.pop(ATTR_PROFILE), params)
if (color_name := params.pop(ATTR_COLOR_NAME, None)) is not None:
try:
params[ATTR_RGB_COLOR] = color_util.color_name_to_rgb(color_name)
except ValueError:
_LOGGER.warning("Got unknown color %s, falling back to white", color_name)
params[ATTR_RGB_COLOR] = (255, 255, 255)
if (kelvin := params.pop(ATTR_KELVIN, None)) is not None:
mired = color_util.color_temperature_kelvin_to_mired(kelvin)
params[ATTR_COLOR_TEMP] = int(mired)
brightness_pct = params.pop(ATTR_BRIGHTNESS_PCT, None)
if brightness_pct is not None:
params[ATTR_BRIGHTNESS] = round(255 * brightness_pct / 100)
def filter_turn_off_params(light, params):
"""Filter out params not used in turn off or not supported by the light."""
supported_features = light.supported_features
if not supported_features & SUPPORT_FLASH:
params.pop(ATTR_FLASH, None)
if not supported_features & SUPPORT_TRANSITION:
params.pop(ATTR_TRANSITION, None)
return {k: v for k, v in params.items() if k in (ATTR_TRANSITION, ATTR_FLASH)}
def filter_turn_on_params(light, params):
"""Filter out params not supported by the light."""
supported_features = light.supported_features
if not supported_features & SUPPORT_EFFECT:
params.pop(ATTR_EFFECT, None)
if not supported_features & SUPPORT_FLASH:
params.pop(ATTR_FLASH, None)
if not supported_features & SUPPORT_TRANSITION:
params.pop(ATTR_TRANSITION, None)
if not supported_features & SUPPORT_WHITE_VALUE:
params.pop(ATTR_WHITE_VALUE, None)
supported_color_modes = (
light._light_internal_supported_color_modes # pylint:disable=protected-access
)
if not brightness_supported(supported_color_modes):
params.pop(ATTR_BRIGHTNESS, None)
if COLOR_MODE_COLOR_TEMP not in supported_color_modes:
params.pop(ATTR_COLOR_TEMP, None)
if COLOR_MODE_HS not in supported_color_modes:
params.pop(ATTR_HS_COLOR, None)
if COLOR_MODE_RGB not in supported_color_modes:
params.pop(ATTR_RGB_COLOR, None)
if COLOR_MODE_RGBW not in supported_color_modes:
params.pop(ATTR_RGBW_COLOR, None)
if COLOR_MODE_RGBWW not in supported_color_modes:
params.pop(ATTR_RGBWW_COLOR, None)
if COLOR_MODE_WHITE not in supported_color_modes:
params.pop(ATTR_WHITE, None)
if COLOR_MODE_XY not in supported_color_modes:
params.pop(ATTR_XY_COLOR, None)
return params
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: # noqa: C901
"""Expose light control via state machine and services."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
profiles = hass.data[DATA_PROFILES] = Profiles(hass)
await profiles.async_initialize()
def preprocess_data(data):
"""Preprocess the service data."""
base = {
entity_field: data.pop(entity_field)
for entity_field in cv.ENTITY_SERVICE_FIELDS
if entity_field in data
}
preprocess_turn_on_alternatives(hass, data)
base["params"] = data
return base
async def async_handle_light_on_service(light, call):
"""Handle turning a light on.
If brightness is set to 0, this service will turn the light off.
"""
params = dict(call.data["params"])
# Only process params once we processed brightness step
if params and (
ATTR_BRIGHTNESS_STEP in params or ATTR_BRIGHTNESS_STEP_PCT in params
):
brightness = light.brightness if light.is_on else 0
if ATTR_BRIGHTNESS_STEP in params:
brightness += params.pop(ATTR_BRIGHTNESS_STEP)
else:
brightness += round(params.pop(ATTR_BRIGHTNESS_STEP_PCT) / 100 * 255)
params[ATTR_BRIGHTNESS] = max(0, min(255, brightness))
preprocess_turn_on_alternatives(hass, params)
if (not params or not light.is_on) or (
params and ATTR_TRANSITION not in params
):
profiles.apply_default(light.entity_id, light.is_on, params)
legacy_supported_color_modes = (
light._light_internal_supported_color_modes # pylint: disable=protected-access
)
supported_color_modes = light.supported_color_modes
# Backwards compatibility: if an RGBWW color is specified, convert to RGB + W
# for legacy lights
if ATTR_RGBW_COLOR in params:
if (
COLOR_MODE_RGBW in legacy_supported_color_modes
and not supported_color_modes
):
rgbw_color = params.pop(ATTR_RGBW_COLOR)
params[ATTR_RGB_COLOR] = rgbw_color[0:3]
params[ATTR_WHITE_VALUE] = rgbw_color[3]
# If a color temperature is specified, emulate it if not supported by the light
if ATTR_COLOR_TEMP in params:
if (
supported_color_modes
and COLOR_MODE_COLOR_TEMP not in supported_color_modes
and COLOR_MODE_RGBWW in supported_color_modes
):
color_temp = params.pop(ATTR_COLOR_TEMP)
brightness = params.get(ATTR_BRIGHTNESS, light.brightness)
params[ATTR_RGBWW_COLOR] = color_util.color_temperature_to_rgbww(
color_temp, brightness, light.min_mireds, light.max_mireds
)
elif COLOR_MODE_COLOR_TEMP not in legacy_supported_color_modes:
color_temp = params.pop(ATTR_COLOR_TEMP)
if color_supported(legacy_supported_color_modes):
temp_k = color_util.color_temperature_mired_to_kelvin(color_temp)
params[ATTR_HS_COLOR] = color_util.color_temperature_to_hs(temp_k)
# If a color is specified, convert to the color space supported by the light
# Backwards compatibility: Fall back to hs color if light.supported_color_modes
# is not implemented
if not supported_color_modes:
if (rgb_color := params.pop(ATTR_RGB_COLOR, None)) is not None:
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
elif (xy_color := params.pop(ATTR_XY_COLOR, None)) is not None:
params[ATTR_HS_COLOR] = color_util.color_xy_to_hs(*xy_color)
elif (rgbw_color := params.pop(ATTR_RGBW_COLOR, None)) is not None:
rgb_color = color_util.color_rgbw_to_rgb(*rgbw_color)
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
elif (rgbww_color := params.pop(ATTR_RGBWW_COLOR, None)) is not None:
rgb_color = color_util.color_rgbww_to_rgb(
*rgbww_color, light.min_mireds, light.max_mireds
)
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
elif ATTR_HS_COLOR in params and COLOR_MODE_HS not in supported_color_modes:
hs_color = params.pop(ATTR_HS_COLOR)
if COLOR_MODE_RGB in supported_color_modes:
params[ATTR_RGB_COLOR] = color_util.color_hs_to_RGB(*hs_color)
elif COLOR_MODE_RGBW in supported_color_modes:
rgb_color = color_util.color_hs_to_RGB(*hs_color)
params[ATTR_RGBW_COLOR] = color_util.color_rgb_to_rgbw(*rgb_color)
elif COLOR_MODE_RGBWW in supported_color_modes:
rgb_color = color_util.color_hs_to_RGB(*hs_color)
params[ATTR_RGBWW_COLOR] = color_util.color_rgb_to_rgbww(
*rgb_color, light.min_mireds, light.max_mireds
)
elif COLOR_MODE_XY in supported_color_modes:
params[ATTR_XY_COLOR] = color_util.color_hs_to_xy(*hs_color)
elif ATTR_RGB_COLOR in params and COLOR_MODE_RGB not in supported_color_modes:
rgb_color = params.pop(ATTR_RGB_COLOR)
if COLOR_MODE_RGBW in supported_color_modes:
params[ATTR_RGBW_COLOR] = color_util.color_rgb_to_rgbw(*rgb_color)
elif COLOR_MODE_RGBWW in supported_color_modes:
params[ATTR_RGBWW_COLOR] = color_util.color_rgb_to_rgbww(
*rgb_color, light.min_mireds, light.max_mireds
)
elif COLOR_MODE_HS in supported_color_modes:
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
elif COLOR_MODE_XY in supported_color_modes:
params[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
elif ATTR_XY_COLOR in params and COLOR_MODE_XY not in supported_color_modes:
xy_color = params.pop(ATTR_XY_COLOR)
if COLOR_MODE_HS in supported_color_modes:
params[ATTR_HS_COLOR] = color_util.color_xy_to_hs(*xy_color)
elif COLOR_MODE_RGB in supported_color_modes:
params[ATTR_RGB_COLOR] = color_util.color_xy_to_RGB(*xy_color)
elif COLOR_MODE_RGBW in supported_color_modes:
rgb_color = color_util.color_xy_to_RGB(*xy_color)
params[ATTR_RGBW_COLOR] = color_util.color_rgb_to_rgbw(*rgb_color)
elif COLOR_MODE_RGBWW in supported_color_modes:
rgb_color = color_util.color_xy_to_RGB(*xy_color)
params[ATTR_RGBWW_COLOR] = color_util.color_rgb_to_rgbww(
*rgb_color, light.min_mireds, light.max_mireds
)
elif ATTR_RGBW_COLOR in params and COLOR_MODE_RGBW not in supported_color_modes:
rgbw_color = params.pop(ATTR_RGBW_COLOR)
rgb_color = color_util.color_rgbw_to_rgb(*rgbw_color)
if COLOR_MODE_RGB in supported_color_modes:
params[ATTR_RGB_COLOR] = rgb_color
elif COLOR_MODE_RGBWW in supported_color_modes:
params[ATTR_RGBWW_COLOR] = color_util.color_rgb_to_rgbww(
*rgb_color, light.min_mireds, light.max_mireds
)
elif COLOR_MODE_HS in supported_color_modes:
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
elif COLOR_MODE_XY in supported_color_modes:
params[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
elif (
ATTR_RGBWW_COLOR in params and COLOR_MODE_RGBWW not in supported_color_modes
):
rgbww_color = params.pop(ATTR_RGBWW_COLOR)
rgb_color = color_util.color_rgbww_to_rgb(
*rgbww_color, light.min_mireds, light.max_mireds
)
if COLOR_MODE_RGB in supported_color_modes:
params[ATTR_RGB_COLOR] = rgb_color
elif COLOR_MODE_RGBW in supported_color_modes:
params[ATTR_RGBW_COLOR] = color_util.color_rgb_to_rgbw(*rgb_color)
elif COLOR_MODE_HS in supported_color_modes:
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
elif COLOR_MODE_XY in supported_color_modes:
params[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
# If both white and brightness are specified, override white
if (
supported_color_modes
and ATTR_WHITE in params
and COLOR_MODE_WHITE in supported_color_modes
):
params[ATTR_WHITE] = params.pop(ATTR_BRIGHTNESS, params[ATTR_WHITE])
# Remove deprecated white value if the light supports color mode
if supported_color_modes:
params.pop(ATTR_WHITE_VALUE, None)
if params.get(ATTR_BRIGHTNESS) == 0 or params.get(ATTR_WHITE) == 0:
await async_handle_light_off_service(light, call)
else:
await light.async_turn_on(**filter_turn_on_params(light, params))
async def async_handle_light_off_service(light, call):
"""Handle turning off a light."""
params = dict(call.data["params"])
if ATTR_TRANSITION not in params:
profiles.apply_default(light.entity_id, True, params)
await light.async_turn_off(**filter_turn_off_params(light, params))
async def async_handle_toggle_service(light, call):
"""Handle toggling a light."""
if light.is_on:
await async_handle_light_off_service(light, call)
else:
await async_handle_light_on_service(light, call)
# Listen for light on and light off service calls.
component.async_register_entity_service(
SERVICE_TURN_ON,
vol.All(cv.make_entity_service_schema(LIGHT_TURN_ON_SCHEMA), preprocess_data),
async_handle_light_on_service,
)
component.async_register_entity_service(
SERVICE_TURN_OFF,
vol.All(cv.make_entity_service_schema(LIGHT_TURN_OFF_SCHEMA), preprocess_data),
async_handle_light_off_service,
)
component.async_register_entity_service(
SERVICE_TOGGLE,
vol.All(cv.make_entity_service_schema(LIGHT_TURN_ON_SCHEMA), preprocess_data),
async_handle_toggle_service,
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component = cast(EntityComponent, hass.data[DOMAIN])
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component = cast(EntityComponent, hass.data[DOMAIN])
return await component.async_unload_entry(entry)
def _coerce_none(value: str) -> None:
"""Coerce an empty string as None."""
if not isinstance(value, str):
raise vol.Invalid("Expected a string")
if value:
raise vol.Invalid("Not an empty string")
@dataclasses.dataclass
class Profile:
"""Representation of a profile."""
name: str
color_x: float | None = dataclasses.field(repr=False)
color_y: float | None = dataclasses.field(repr=False)
brightness: int | None
transition: int | None = None
hs_color: tuple[float, float] | None = dataclasses.field(init=False)
SCHEMA = vol.Schema(
vol.Any(
vol.ExactSequence(
(
str,
vol.Any(cv.small_float, _coerce_none),
vol.Any(cv.small_float, _coerce_none),
vol.Any(cv.byte, _coerce_none),
)
),
vol.ExactSequence(
(
str,
vol.Any(cv.small_float, _coerce_none),
vol.Any(cv.small_float, _coerce_none),
vol.Any(cv.byte, _coerce_none),
vol.Any(VALID_TRANSITION, _coerce_none),
)
),
)
)
def __post_init__(self) -> None:
"""Convert xy to hs color."""
if None in (self.color_x, self.color_y):
self.hs_color = None
return
self.hs_color = color_util.color_xy_to_hs(
cast(float, self.color_x), cast(float, self.color_y)
)
@classmethod
def from_csv_row(cls, csv_row: list[str]) -> Profile:
"""Create profile from a CSV row tuple."""
return cls(*cls.SCHEMA(csv_row))
class Profiles:
"""Representation of available color profiles."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize profiles."""
self.hass = hass
self.data: dict[str, Profile] = {}
def _load_profile_data(self) -> dict[str, Profile]:
"""Load built-in profiles and custom profiles."""
profile_paths = [
os.path.join(os.path.dirname(__file__), LIGHT_PROFILES_FILE),
self.hass.config.path(LIGHT_PROFILES_FILE),
]
profiles = {}
for profile_path in profile_paths:
if not os.path.isfile(profile_path):
continue
with open(profile_path, encoding="utf8") as inp:
reader = csv.reader(inp)
# Skip the header
next(reader, None)
try:
for rec in reader:
profile = Profile.from_csv_row(rec)
profiles[profile.name] = profile
except vol.MultipleInvalid as ex:
_LOGGER.error(
"Error parsing light profile row '%s' from %s: %s",
rec,
profile_path,
ex,
)
continue
return profiles
async def async_initialize(self) -> None:
"""Load and cache profiles."""
self.data = await self.hass.async_add_executor_job(self._load_profile_data)
@callback
def apply_default(self, entity_id: str, state_on: bool, params: dict) -> None:
"""Return the default profile for the given light."""
for _entity_id in (entity_id, "group.all_lights"):
name = f"{_entity_id}.default"
if name in self.data:
if not state_on or not params:
self.apply_profile(name, params)
elif self.data[name].transition is not None:
params.setdefault(ATTR_TRANSITION, self.data[name].transition)
@callback
def apply_profile(self, name: str, params: dict) -> None:
"""Apply a profile."""
if (profile := self.data.get(name)) is None:
return
if profile.hs_color is not None:
params.setdefault(ATTR_HS_COLOR, profile.hs_color)
if profile.brightness is not None:
params.setdefault(ATTR_BRIGHTNESS, profile.brightness)
if profile.transition is not None:
params.setdefault(ATTR_TRANSITION, profile.transition)
@dataclasses.dataclass
class LightEntityDescription(ToggleEntityDescription):
"""A class that describes binary sensor entities."""
class LightEntity(ToggleEntity):
"""Base class for light entities."""
entity_description: LightEntityDescription
_attr_brightness: int | None = None
_attr_color_mode: str | None = None
_attr_color_temp: int | None = None
_attr_effect_list: list[str] | None = None
_attr_effect: str | None = None
_attr_hs_color: tuple[float, float] | None = None
_attr_max_mireds: int = 500
_attr_min_mireds: int = 153
_attr_rgb_color: tuple[int, int, int] | None = None
_attr_rgbw_color: tuple[int, int, int, int] | None = None
_attr_rgbww_color: tuple[int, int, int, int, int] | None = None
_attr_supported_color_modes: set[str] | None = None
_attr_supported_features: int = 0
_attr_xy_color: tuple[float, float] | None = None
@property
def brightness(self) -> int | None:
"""Return the brightness of this light between 0..255."""
return self._attr_brightness
@property
def color_mode(self) -> str | None:
"""Return the color mode of the light."""
return self._attr_color_mode
@property
def _light_internal_color_mode(self) -> str:
"""Return the color mode of the light with backwards compatibility."""
if (color_mode := self.color_mode) is None:
# Backwards compatibility for color_mode added in 2021.4
# Add warning in 2021.6, remove in 2021.10
supported = self._light_internal_supported_color_modes
if (
COLOR_MODE_RGBW in supported
and self.white_value is not None
and self.hs_color is not None
):
return COLOR_MODE_RGBW
if COLOR_MODE_HS in supported and self.hs_color is not None:
return COLOR_MODE_HS
if COLOR_MODE_COLOR_TEMP in supported and self.color_temp is not None:
return COLOR_MODE_COLOR_TEMP
if COLOR_MODE_BRIGHTNESS in supported and self.brightness is not None:
return COLOR_MODE_BRIGHTNESS
if COLOR_MODE_ONOFF in supported:
return COLOR_MODE_ONOFF
return COLOR_MODE_UNKNOWN
return color_mode
@property
def hs_color(self) -> tuple[float, float] | None:
"""Return the hue and saturation color value [float, float]."""
return self._attr_hs_color
@property
def xy_color(self) -> tuple[float, float] | None:
"""Return the xy color value [float, float]."""
return self._attr_xy_color
@property
def rgb_color(self) -> tuple[int, int, int] | None:
"""Return the rgb color value [int, int, int]."""
return self._attr_rgb_color
@property
def rgbw_color(self) -> tuple[int, int, int, int] | None:
"""Return the rgbw color value [int, int, int, int]."""
return self._attr_rgbw_color
@property
def _light_internal_rgbw_color(self) -> tuple[int, int, int, int] | None:
"""Return the rgbw color value [int, int, int, int]."""
rgbw_color = self.rgbw_color
if (
rgbw_color is None
and self.hs_color is not None
and self.white_value is not None
):
# Backwards compatibility for rgbw_color added in 2021.4
# Add warning in 2021.6, remove in 2021.10
r, g, b = color_util.color_hs_to_RGB( # pylint: disable=invalid-name
*self.hs_color
)
w = self.white_value # pylint: disable=invalid-name
rgbw_color = (r, g, b, w)
return rgbw_color
@property
def rgbww_color(self) -> tuple[int, int, int, int, int] | None:
"""Return the rgbww color value [int, int, int, int, int]."""
return self._attr_rgbww_color
@property
def color_temp(self) -> int | None:
"""Return the CT color value in mireds."""
return self._attr_color_temp
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return self._attr_min_mireds
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return self._attr_max_mireds
@property
def white_value(self) -> int | None:
"""Return the white value of this light between 0..255."""
return None
@property
def effect_list(self) -> list[str] | None:
"""Return the list of supported effects."""
return self._attr_effect_list
@property
def effect(self) -> str | None:
"""Return the current effect."""
return self._attr_effect
@property
def capability_attributes(self):
"""Return capability attributes."""
data = {}
supported_features = self.supported_features
supported_color_modes = self._light_internal_supported_color_modes
if COLOR_MODE_COLOR_TEMP in supported_color_modes:
data[ATTR_MIN_MIREDS] = self.min_mireds
data[ATTR_MAX_MIREDS] = self.max_mireds
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT_LIST] = self.effect_list
data[ATTR_SUPPORTED_COLOR_MODES] = sorted(supported_color_modes)
return data
def _light_internal_convert_color(self, color_mode: str) -> dict:
data: dict[str, tuple] = {}
if color_mode == COLOR_MODE_HS and self.hs_color:
hs_color = self.hs_color
data[ATTR_HS_COLOR] = (round(hs_color[0], 3), round(hs_color[1], 3))
data[ATTR_RGB_COLOR] = color_util.color_hs_to_RGB(*hs_color)
data[ATTR_XY_COLOR] = color_util.color_hs_to_xy(*hs_color)
elif color_mode == COLOR_MODE_XY and self.xy_color:
xy_color = self.xy_color
data[ATTR_HS_COLOR] = color_util.color_xy_to_hs(*xy_color)
data[ATTR_RGB_COLOR] = color_util.color_xy_to_RGB(*xy_color)
data[ATTR_XY_COLOR] = (round(xy_color[0], 6), round(xy_color[1], 6))
elif color_mode == COLOR_MODE_RGB and self.rgb_color:
rgb_color = self.rgb_color
data[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
data[ATTR_RGB_COLOR] = tuple(int(x) for x in rgb_color[0:3])
data[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
elif color_mode == COLOR_MODE_RGBW and self._light_internal_rgbw_color:
rgbw_color = self._light_internal_rgbw_color
rgb_color = color_util.color_rgbw_to_rgb(*rgbw_color)
data[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
data[ATTR_RGB_COLOR] = tuple(int(x) for x in rgb_color[0:3])
data[ATTR_RGBW_COLOR] = tuple(int(x) for x in rgbw_color[0:4])
data[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
elif color_mode == COLOR_MODE_RGBWW and self.rgbww_color:
rgbww_color = self.rgbww_color
rgb_color = color_util.color_rgbww_to_rgb(
*rgbww_color, self.min_mireds, self.max_mireds
)
data[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
data[ATTR_RGB_COLOR] = tuple(int(x) for x in rgb_color[0:3])
data[ATTR_RGBWW_COLOR] = tuple(int(x) for x in rgbww_color[0:5])
data[ATTR_XY_COLOR] = color_util.color_RGB_to_xy(*rgb_color)
elif color_mode == COLOR_MODE_COLOR_TEMP and self.color_temp:
hs_color = color_util.color_temperature_to_hs(
color_util.color_temperature_mired_to_kelvin(self.color_temp)
)
data[ATTR_HS_COLOR] = (round(hs_color[0], 3), round(hs_color[1], 3))
data[ATTR_RGB_COLOR] = color_util.color_hs_to_RGB(*hs_color)
data[ATTR_XY_COLOR] = color_util.color_hs_to_xy(*hs_color)
return data
@final
@property
def state_attributes(self):
"""Return state attributes."""
if not self.is_on:
return None
data = {}
supported_features = self.supported_features
color_mode = self._light_internal_color_mode
if color_mode not in self._light_internal_supported_color_modes:
# Increase severity to warning in 2021.6, reject in 2021.10
_LOGGER.debug(
"%s: set to unsupported color_mode: %s, supported_color_modes: %s",
self.entity_id,
color_mode,
self._light_internal_supported_color_modes,
)
data[ATTR_COLOR_MODE] = color_mode
if color_mode in COLOR_MODES_BRIGHTNESS:
data[ATTR_BRIGHTNESS] = self.brightness
elif supported_features & SUPPORT_BRIGHTNESS:
# Backwards compatibility for ambiguous / incomplete states
# Add warning in 2021.6, remove in 2021.10
data[ATTR_BRIGHTNESS] = self.brightness
if color_mode == COLOR_MODE_COLOR_TEMP:
data[ATTR_COLOR_TEMP] = self.color_temp
if color_mode in COLOR_MODES_COLOR or color_mode == COLOR_MODE_COLOR_TEMP:
data.update(self._light_internal_convert_color(color_mode))
if supported_features & SUPPORT_COLOR_TEMP and not self.supported_color_modes:
# Backwards compatibility
# Add warning in 2021.6, remove in 2021.10
data[ATTR_COLOR_TEMP] = self.color_temp
if supported_features & SUPPORT_WHITE_VALUE and not self.supported_color_modes:
# Backwards compatibility
# Add warning in 2021.6, remove in 2021.10
data[ATTR_WHITE_VALUE] = self.white_value
if self.hs_color is not None:
data.update(self._light_internal_convert_color(COLOR_MODE_HS))
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT] = self.effect
return {key: val for key, val in data.items() if val is not None}
@property
def _light_internal_supported_color_modes(self) -> set:
"""Calculate supported color modes with backwards compatibility."""
supported_color_modes = self.supported_color_modes
if supported_color_modes is None:
# Backwards compatibility for supported_color_modes added in 2021.4
# Add warning in 2021.6, remove in 2021.10
supported_features = self.supported_features
supported_color_modes = set()
if supported_features & SUPPORT_COLOR_TEMP:
supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if supported_features & SUPPORT_COLOR:
supported_color_modes.add(COLOR_MODE_HS)
if supported_features & SUPPORT_WHITE_VALUE:
supported_color_modes.add(COLOR_MODE_RGBW)
if supported_features & SUPPORT_BRIGHTNESS and not supported_color_modes:
supported_color_modes = {COLOR_MODE_BRIGHTNESS}
if not supported_color_modes:
supported_color_modes = {COLOR_MODE_ONOFF}
return supported_color_modes
@property
def supported_color_modes(self) -> set[str] | None:
"""Flag supported color modes."""
return self._attr_supported_color_modes
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._attr_supported_features
def legacy_supported_features(
supported_features: int, supported_color_modes: list[str] | None
) -> int:
"""Calculate supported features with backwards compatibility."""
# Backwards compatibility for supported_color_modes added in 2021.4
if supported_color_modes is None:
return supported_features
if any(mode in supported_color_modes for mode in COLOR_MODES_COLOR):
supported_features |= SUPPORT_COLOR
if any(mode in supported_color_modes for mode in COLOR_MODES_BRIGHTNESS):
supported_features |= SUPPORT_BRIGHTNESS
if COLOR_MODE_COLOR_TEMP in supported_color_modes:
supported_features |= SUPPORT_COLOR_TEMP
return supported_features
|
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" Contains classes for persisting running executions to disk
"""
from __future__ import division
from vistrails.core.configuration import get_vistrails_configuration
from vistrails.core import debug
from vistrails.core.modules.vistrails_module import NotCacheable, \
ModuleError, ModuleSuspended
from uuid import uuid1
import datetime
import getpass
import json
import time
import unittest
import weakref
class JobMixin(object):
""" Mixin for suspendable modules.
This provides the base behavior for modules that submit jobs by handling
the serialization & JobMonitor interaction for you.
The module developer needs only implement the following methods:
job_read_inputs()
job_set_results()
job_start()
job_get_handle()
job_finish()
"""
def compute(self):
""" Base behavior for job-submitting modules.
This provides the base code and calls the methods that the module
developer should provide.
"""
debug.log("%s compute() starting\n"
"signature = %r" % (self.__class__.__name__,
self.signature))
jm = self.job_monitor()
cache = jm.getCache(self.signature)
if cache is not None:
# Result is available from cache
jm.setCache(self.signature, cache.parameters)
debug.log("Cached results found; calling job_set_results()")
self.job_set_results(cache.parameters)
return
else:
debug.log("Cache miss")
job = jm.getJob(self.signature)
if job is None:
debug.log("Job doesn't exist")
params = self.job_read_inputs()
params = self.job_start(params)
else:
debug.log("Got job from JobMonitor")
params = job.parameters
jm.addJob(self.signature, params, self.job_name())
# Might raise ModuleSuspended
debug.log("Calling checkJob()")
try:
jm.checkJob(self, self.signature, self.job_get_handle(params))
except ModuleSuspended, e:
debug.log("checkJob() raised ModuleSuspended, job handle is %r" %
e.handle)
raise
# Didn't raise: job is finished
debug.log("Calling job_finish()")
params = self.job_finish(params)
debug.log("Filling cache")
jm.setCache(self.signature, params)
debug.log("Calling job_set_results()")
self.job_set_results(params)
def update_upstream(self):
""" Decides whether or not to run the upstream.
If a job has already been submitted and the local JobMonitor knows of
it, we don't need to run upstream modules to check the status.
If status check indicates that the job no longer exists, then we should
run upstream then submit again.
"""
if not hasattr(self, 'signature'):
raise ModuleError(self, "Module has no signature")
jm = self.job_monitor()
if not (jm.getCache(self.signature) or jm.getJob(self.signature)):
# We need to submit a new job
# Update upstream, compute() will need it
super(JobMixin, self).update_upstream()
def job_read_inputs(self):
""" Implemented by modules to read job parameters from input ports.
Returns the `params` dictionary used by subsequent methods.
"""
raise NotImplementedError
def job_start(self, params):
""" Implemented by modules to submit the job.
Gets the `params` dictionary and returns a new dictionary, for example
with additional info necessary to check the status later.
"""
raise NotImplementedError
def job_finish(self, params):
""" Implemented by modules to get info from the finished job.
This is called once the job is finished to get the results. These can
be added to the `params` dictionary that this method returns.
This is the right place to clean up the job from the server if they are
not supposed to persist.
"""
raise NotImplementedError
def job_set_results(self, params):
""" Implemented by modules to set the output ports.
This is called after job_finished() or after getting the cached results
to set the output ports on this module, from the `params` dictionary.
"""
raise NotImplementedError
def job_get_handle(self, params):
""" Implemented by modules to return the JobHandle object.
This returns an object following the JobHandle interface. The
JobMonitor will use it to check the status of the job and call back
this module once the job is done.
JobHandle needs the following method:
* finished(): returns True if the job is finished
"""
return None
def job_name(self):
""" Readable name for the job.
Modules needn't override this, in which case a default will be
provided.
"""
# use module description if it exists
if 'pipeline' in self.moduleInfo and self.moduleInfo['pipeline']:
p_modules = self.moduleInfo['pipeline'].modules
p_module = p_modules[self.moduleInfo['moduleId']]
if p_module.has_annotation_with_key('__desc__'):
return p_module.get_annotation_by_key('__desc__').value
return self.__class__.__name__
class Workflow(object):
""" Represents a workflow that has jobs.
It can have one or several suspended modules.
It can be serialized to disk.
"""
def __init__(self, version, name='untitled', id=None, user=None,
start=None, jobs=None):
""" __init__(version: str/int, name: str, id: str,
user: str, start: str, jobs: list) -> None
version - workflow version
name - a human readable name for the job
id - persistent identifier
user - who started the job
start - start time
jobs - a dict with jobs
"""
self.version = version
self.name = name
self.id = id if id else str(uuid1())
self.user = getpass.getuser()
self.start = start if start else datetime.datetime.now().isoformat()
self.jobs = jobs if jobs else {}
# parent modules are stored as temporary exceptions
self.parents = {}
def to_dict(self):
wf = dict()
wf['version'] = self.version
wf['id'] = self.id
wf['name'] = self.name
wf['user'] = self.user
wf['start'] = self.start
wf['jobs'] = self.jobs.keys()
return wf
@staticmethod
def from_dict(wf):
return Workflow(wf['version'], wf['name'], wf['id'],
wf['user'], wf['start'], wf['jobs'])
def __eq__(self, other):
if self.version != other.version:
return False
if self.name != other.name:
return False
if self.id != other.id:
return False
if self.user != other.user:
return False
if self.start != other.start:
return False
if len(self.jobs) != len(other.jobs):
return False
if set(self.jobs) != set(other.jobs):
return False
return True
def reset(self):
for job in self.jobs.itervalues():
job.reset()
self.parents = {}
def completed(self):
""" Returns true if there are no suspended jobs
"""
for job in self.jobs.itervalues():
if not job.finished:
return False
return True
class Job(object):
"""A suspended module.
"""
def __init__(self, id, parameters, name='', start=None, finished=False):
""" __init__(id: str, parameters: dict, name: str, start: str,
finished: bool)
id - persistent identifier
parameters - either output values or job parameters
start - start time
finished - is it finished or running?
"""
self.id = id
self.parameters = parameters
self.name = name
self.start = start if start else datetime.datetime.now().isoformat()
self.finished = finished
# A ready job is ready to output its result
self.ready = False
self.updated = True
def reset(self):
self.updated = False
self.ready = False
def mark(self):
""" Mark job as updated and not finished
"""
self.updated = True
# Old-style jobs may need to reset finished status
self.finished = False
self.ready = False
def finish(self, params=None):
self.parameters = params if params else {}
self.finished = True
def description(self):
return self.parameters.get('__desc__', '')
def to_dict(self):
m = dict()
m['id'] = self.id
m['parameters'] = self.parameters
m['name'] = self.name
m['start'] = self.start
m['finished'] = self.finished
return m
@staticmethod
def from_dict(m):
return Job(m['id'],
m['parameters'],
m['name'],
m['start'],
m['finished'])
def __eq__(self, other):
if self.id != other.id:
return False
if self.parameters != other.parameters:
return False
if self.start != other.start:
return False
if self.finished != other.finished:
return False
return True
class JobMonitor(object):
""" Keeps a list of running jobs and the current job for a vistrail.
Jobs are added by the interpreter are saved with the vistrail.
A callback mechanism is used to interact with the associated GUI component.
"""
def __init__(self, json_string=None):
self._current_workflow = None
self.workflows = {}
self.jobs = {}
self.callback = None
if json_string is not None:
self.unserialize(json_string)
def setCallback(self, callback=None):
""" setCallback(callback: class) -> None
Sets a callback when receiving commands
"""
self.callback = weakref.ref(callback)
###########################################################################
# Running Workflows
def serialize(self):
""" serialize() -> None
serializes the running jobs to json
"""
_dict = {}
jobs = dict()
for id, job in self.jobs.items():
jobs[id] = job.to_dict()
_dict['jobs'] = jobs
workflows = dict()
for id, workflow in self.workflows.items():
workflows[id] = workflow.to_dict()
_dict['workflows'] = workflows
return json.dumps(_dict)
def unserialize(self, s):
""" unserialize(s: str) -> None
unserializes the running jobs from json
"""
_dict = json.loads(s)
jobs = _dict.get('jobs', {})
self.jobs = {}
for id, job in jobs.iteritems():
self.jobs[id] = Job.from_dict(job)
workflows = _dict.get('workflows', {})
self.workflows = {}
for id, workflow in workflows.iteritems():
workflow['jobs'] = dict([(i, self.jobs[i])
for i in workflow['jobs']
if i in self.jobs])
wf = Workflow.from_dict(workflow)
self.workflows[id] = wf
return self.workflows
def addWorkflow(self, workflow):
""" addWorkflow(workflow: Workflow) -> None
"""
self.workflows[workflow.id] = workflow
for id, job in workflow.jobs.iteritems():
self.jobs[id] = job
def getWorkflow(self, id):
""" getWorkflow(id: str) -> Workflow
Checks if a workflow exists using its id and returns it
"""
return self.workflows.get(id, None)
def deleteWorkflow(self, id):
""" deleteWorkflow(id: str) -> None
deletes a workflow
"""
workflow = self.workflows[id]
del self.workflows[id]
# delete jobs that only occur in this workflow
for job_id in workflow.jobs:
delete = True
for wf in self.workflows.values():
if job_id in wf.jobs:
delete = False
if delete:
del self.jobs[job_id]
if self.callback is not None and self.callback() is not None:
self.callback().deleteWorkflow(id)
def deleteJob(self, id):
""" deleteJob(id: str, parent_id: str) -> None
deletes a job from all workflows
"""
del self.jobs[id]
for wf in self.workflows.itervalues():
if id in wf.jobs:
del wf.jobs[id]
if self.callback is not None and self.callback() is not None:
self.callback().deleteJob(id)
###########################################################################
# _current_workflow methods
def currentWorkflow(self):
""" currentWorkflow() -> Workflow
"""
return self._current_workflow
def startWorkflow(self, workflow):
""" startWorkflow(workflow: Workflow) -> None
"""
if self._current_workflow:
raise Exception("A workflow is still running: %s!" %
self._current_workflow)
workflow.reset()
self._current_workflow = workflow
if self.callback is not None and self.callback() is not None:
self.callback().startWorkflow(workflow)
def addJobRec(self, obj, parent_id=None):
workflow = self.currentWorkflow()
id = obj.module.signature
if obj.children:
for child in obj.children:
self.addJobRec(child, id)
return
if id in workflow.jobs:
# this is an already existing job
# that has been suspended
workflow.jobs[id].mark()
# trigger job update
self.addJob(id, workflow.jobs[id].parameters)
return
# this is a new old-style job that we need to add
self.addJob(id, {'__message__': obj.msg}, obj.name)
def finishWorkflow(self):
""" finish_job() -> None
Finishes the running workflow
"""
workflow = self._current_workflow
# untangle parents
# only keep the top item
c = set()
for exception in workflow.parents.itervalues():
if exception.children:
c.update([id(child) for child in exception.children])
for child in c:
if child in workflow.parents:
del workflow.parents[child]
for parent in workflow.parents.itervalues():
self.addJobRec(parent)
# Assume all unfinished jobs that were not updated are now finished
for job in workflow.jobs.values():
if not job.finished and not job.updated:
job.finish()
if self.callback is not None and self.callback() is not None:
self.callback().finishWorkflow(workflow)
self._current_workflow = None
def addJob(self, id, params=None, name='', finished=False):
""" addJob(id: str, params: dict, name: str, finished: bool) -> uuid
Adds a job to the currently running workflow
"""
params = params if params is not None else {}
if self.hasJob(id):
# update job attributes
job = self.getJob(id)
job.parameters = params
if name:
job.name = name
job.finished = finished
# we want to keep the start date
else:
job = Job(id, params, name, finished=finished)
self.jobs[id] = job
workflow = self.currentWorkflow()
if workflow:
workflow.jobs[id] = job
# we add workflows permanently if they have at least one job
self.workflows[workflow.id] = workflow
if self.callback is not None and self.callback() is not None:
self.callback().addJob(self.getJob(id))
def addParent(self, error):
""" addParent(id: str, name: str, finished: bool) -> None
Adds an exception to be used later
"""
workflow = self.currentWorkflow()
if not workflow:
return # ignore non-monitored jobs
workflow.parents[id(error)] = error
def setCache(self, id, params, name=''):
self.addJob(id, params, name, True)
def checkJob(self, module, id, handle):
""" checkJob(module: VistrailsModule, id: str, handle: object) -> None
Starts monitoring the job for the current running workflow
module - the module to suspend
id - the job identifier
handle - an object following the JobHandle interface, i.e. with a
finished method for checking if the job has completed
"""
if not self.currentWorkflow():
if not handle or not self.isDone(handle):
raise ModuleSuspended(module, 'Job is running',
handle=handle)
job = self.getJob(id)
if self.callback is not None and self.callback() is not None:
self.callback().checkJob(module, id, handle)
return
conf = get_vistrails_configuration()
interval = conf.jobCheckInterval
if interval and not conf.jobAutorun:
if handle:
# wait for module to complete
try:
while not self.isDone(handle):
time.sleep(interval)
print ("Waiting for job: %s,"
"press Ctrl+C to suspend") % job.name
except KeyboardInterrupt:
raise ModuleSuspended(module, 'Interrupted by user, job'
' is still running', handle=handle)
else:
if not handle or not self.isDone(handle):
raise ModuleSuspended(module, 'Job is running',
handle=handle)
def getJob(self, id):
""" getJob(id: str) -> Job
"""
return self.jobs.get(id, None)
def getCache(self, id):
""" getCache(id: str) -> Job
Checks if a completed module exists using its id and returns it
"""
job = self.jobs.get(id, None)
return job if job and job.finished else None
def hasJob(self, id):
""" hasJob(id: str) -> bool
Checks if a job exists
"""
return id in self.jobs
def updateUrl(self, new, old):
for workflow in self.workflows.values():
if workflow.vistrail == old:
workflow.vistrail = new
def isDone(self, handle):
""" isDone(self, monitor) -> bool
A job is done when it reaches finished or failed state
val() is used by stable batchq branch
"""
finished = handle.finished()
if hasattr(finished, 'val'):
finished = finished.val()
if finished:
return True
# FIXME : deprecate this, remove from RemoteQ
# finished should just return True here too
if hasattr(handle, 'failed'):
failed = handle.failed()
if hasattr(failed, 'val'):
failed = failed.val()
if failed:
return True
return False
###############################################################################
# Testing
class TestJob(unittest.TestCase):
def test_job(self):
jm = JobMonitor()
job1 = Job('`13/5', {'a': 3, 'b': '7'})
job2 = Job('3', {'a': 6}, 'my_name', 'a_string_date', True)
# test module to/from dict
job3 = Job.from_dict(job2.to_dict())
self.assertEqual(job2, job3)
workflow1 = Workflow(26)
workflow2 = Workflow('tagname', 'myjob', 'myid', 'tommy',
'2013-10-07 13:06',
{job1.id: job1, job2.id: job2})
# test workflow to/from dict
workflow3 = Workflow.from_dict(workflow2.to_dict())
self.assertEqual(workflow2, workflow3)
# test start/finish job
jm.startWorkflow(workflow2)
self.assertEqual(workflow2, jm._current_workflow)
jm.finishWorkflow()
self.assertEqual(None, jm._current_workflow)
# test add job
jm.startWorkflow(workflow2)
jm.addJob('my_uuid_id', {'myparam': 0})
self.assertIn('my_uuid_id', workflow2.jobs)
jm.finishWorkflow()
# test serialization
jm.addWorkflow(workflow1)
jm.addWorkflow(workflow2)
jm.unserialize(jm.serialize())
self.assertIn(workflow1.id, jm.workflows)
self.assertIn(workflow2.id, jm.workflows)
self.assertEqual(workflow1, jm.workflows[workflow1.id])
self.assertEqual(workflow2, jm.workflows[workflow2.id])
|
|
# -*- coding: utf-8 -*-
#MIT License
#Copyright (c) 2017 Marton Kelemen
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as np
import sys
from scipy import stats
# performs the combined QC all at once in a single loop, so it is more efficient
# can disable individual QC filters by setting it to -1
def genoQC_all(X, rsIds = None, replaceMissing = True, minObserved = 0.95, minMAF = 0.01, minVariance = 0.02) :
indicesToRemove = list()
indicesKept = np.asarray( range(X.shape[1]) )
#print("orig number of indices " + str(len(indicesKept) ) )
numIndividuals = X.shape[0]
MAFs = np.zeros(X.shape[1] ) # create a sparse array that will hold the MAFs
for i in range(0, X.shape[1]): # go through each col
alreadyRemoved = False # only want to remove SNPs once
snpId = str(i)
if rsIds is not None : snpId = rsIds[i] # if we have supplied the Rs Ids, then we want to display the name of the ones we have removed
if alreadyRemoved is False and minObserved != -1 : # if we do want to filter for missingness
count = np.sum(X[:,i] == -1)
percMissing = count / numIndividuals
if percMissing >= (1 - minObserved) :
indicesToRemove.append(i)
alreadyRemoved = True
print("SNP " + snpId + " has too many missing(" + str(percMissing)+ ")")
if replaceMissing : X[:,i][ X[:,i] ==-1] = 0 # replace no calls with 0
if minMAF != -1 : # if we do want to filter for minimum MAF,
count = np.sum(X[:,i])
MAF = count / ( numIndividuals * 2) # dont forget we are Diploids!!! so *2
MAFs[i] = MAF # save away MAF
if alreadyRemoved is False and MAF < minMAF : # we don't check for 'already removed' here as we definitely want to compute the MAF
indicesToRemove.append(i)
alreadyRemoved = True
print("SNP " + snpId + " is has too low MAF(" + str(MAF)+ "), count: " + str(count) + " / " + str(numIndividuals))
if alreadyRemoved is False and minVariance != -1 :
SNP_var = np.var(X[:,i]) # this is the Population variance,
# print("variance for SNP",i , " is: " , SNP_var)
if(SNP_var < minVariance ) :
indicesToRemove.append(i)
alreadyRemoved = True
print("SNP " + snpId + " has too low variance(" + str(SNP_var) + ")" )
rsIds_qc = None
if rsIds is not None :
rsIds_qc = np.delete(rsIds, indicesToRemove)
MAFs_qc = np.delete(MAFs, indicesToRemove)
X_qc = np.delete(X, indicesToRemove, axis=1)
indicesKept = np.delete(indicesKept, indicesToRemove)
print("QC removed" , len(indicesToRemove), " SNPs out of" , X.shape[1], " / indicesKept: (" , len(indicesKept), ")" )
return ( {"X":X_qc, "rsIds":rsIds_qc, "MAFs":MAFs_qc, "indicesToRemove": indicesToRemove, "indicesKept": indicesKept } )
def computeMAFScore(MAFs, alpha = -0.25) : # computes the MAF Score as suggested by Speed 2017
MAFScore = np.zeros(len(MAFs) )
for i in range(len(MAFs)) :
MAFScore = (MAFs * (1-MAFs) )**(1+alpha )
return(MAFScore)
def removeList(X, indicesToRemove, rsIds = None) :
rsIds_qc = None
if rsIds is not None :
rsIds_qc = np.delete(rsIds, indicesToRemove)
X_qc = np.delete(X, indicesToRemove, axis=1)
return ( {"X":X_qc, "rsIds":rsIds_qc} )
def removeRareVariants_quick(X, MAFs, rsIds = None, minMAF = 0.01) :
indicesToRemove = list()
for i in range(0, len(MAFs)): # go through SNPs MAF
snpId = str(i)
if rsIds is not None : snpId = rsIds[i] # if we have supplied the Rs Ids, then we want to display the name of the ones we have removed
MAF = MAFs[i]
if( MAF < minMAF ) :
indicesToRemove.append(i)
print("SNP " + snpId + " is has too low MAF(" + str(MAF)+ ")")
print("QC removed " , len(indicesToRemove), " SNPs out of " , X.shape[1])
return ( np.delete(X, indicesToRemove, axis=1) )
def replaceMissing(X) : # this modifies the ORIGINAL data
for i in range(0, X.shape[1]): # go through each col
X[:,i][ X[:,i] ==-1] = 0 # replace no calls with 0
return (X )
def removeMissing(X, rsIds = None, minObserved = 0.95) :
indicesToRemove = list()
for i in range(0, X.shape[1]): # go through each col
snpId = str(i)
if rsIds is not None : snpId = rsIds[i] # if we have supplied the Rs Ids, then we want to display the name of the ones we have removed
count = np.sum(X[:,i] == -1)
percMissing = count / X.shape[0]
##print("number of missing for SNP",i , " is: " , count)
if( percMissing >= (1 - minObserved) ) :
indicesToRemove.append(i)
print("SNP " + snpId + " has too many missing(" + str(percMissing)+ ")")
print("QC removed " , len(indicesToRemove), " SNPs out of " , X.shape[1])
return ( np.delete(X, indicesToRemove, axis=1) )
def removeRareVariants(X, rsIds = None, minMAF = 0.01) :
indicesToRemove = list()
numIndividuals = X.shape[0]
print("numIndividuals:" ,numIndividuals)
for i in range(0, X.shape[1]): # go through each col
snpId = str(i)
if rsIds is not None : snpId = rsIds[i] # if we have supplied the Rs Ids, then we want to display the name of the ones we have removed
count = np.sum(X[:,i])
MAF = count / ( numIndividuals * 2) # dont forget we are Diploids!!! so *2
if( MAF < minMAF ) :
indicesToRemove.append(i)
print("SNP " + snpId + " is has too low MAF(" + str(MAF)+ "), count: " + str(count) + " / " + str(numIndividuals))
print("QC removed " , len(indicesToRemove), " SNPs out of " , X.shape[1])
return ( np.delete(X, indicesToRemove, axis=1) )
def removeMonomorphicVariants(X, rsIds = None) :
colSDs = np.zeros(X.shape[1])
indicesToRemove = list()
for i in range(0, X.shape[1]): # go through each col
snpId = str(i)
if rsIds is not None : snpId = rsIds[i] # if we have supplied the Rs Ids, then we want to display the name of the ones we have removed
colSDs[i] = np.std(X[:,i]) # this is the Population variance,
if(colSDs[i] == 0 ) :
indicesToRemove.append(i)
print("SNP " + snpId + " is monomorphic")
print("QC removed " , len(indicesToRemove), " SNPs out of " , X.shape[1])
return ( np.delete(X, indicesToRemove, axis=1) )
def removeLowVarianceSNPs(X, rsIds = None, minVariance = 0.02) :
indicesToRemove = list()
for i in range(0, X.shape[1]): # go through each col
snpId = str(i)
if rsIds is not None : snpId = rsIds[i] # if we have supplied the Rs Ids, then we want to display the name of the ones we have removed
SNP_var = np.var(X[:,i]) # this is the Population variance,
print("variance for SNP",i , " is: " , SNP_var)
if(SNP_var < minVariance ) :
indicesToRemove.append(i)
print("SNP " +snpId + " has too low variance" + str(SNP_var) + " )" )
print("QC removed " , len(indicesToRemove), " SNPs out of " , X.shape[1])
return ( np.delete(X, indicesToRemove, axis=1) )
def standardise_Genotypes(X, rsIds = None) :
return(zscore(X)) # if we don't cast this then this would upconvert everything to float64
# Sum over an axis is a reduction operation so the specified axis disappears.
def zscore(a, axis=0, EPSILON = -1):
a = a.astype('float32') # if we don't cast this then this would upconvert everything to float64
mns = a.mean(axis=axis)
sstd = a.std(axis=axis)
mns = mns.astype('float32')
sstd = sstd.astype('float32')
if EPSILON != -1 : sstd += EPSILON # for numerical stability
sstd[sstd==0] = 1 # dont want division by zero
#a = (a - mns) / sstd
a -= mns
a /= sstd
return a, mns, sstd
# this can thorw error:
# /BSU/Cluster_Apps/Python/3.6.0/lib/python3.6/site-packages/scipy/stats/stats.py:2247: RuntimeWarning: invalid value encountered in true_divide return (a - mns) / sstd
# if a variant is monomorphic... can happen for the validation fold when the sample size is small
# IE the SNP passed QC for M_training, but then fails for M_valid... as the std is 0
# https://fossies.org/linux/misc/scipy-0.19.1.tar.xz/scipy-0.19.1/scipy/stats/stats.py
def standardise_Genotypes_slow(X, rsIds = None) : # this is ~80x slower than the above
# calculate col SD and means for SNPs
colMeans = np.zeros(X.shape[1])
colSDs = np.zeros(X.shape[1])
for i in range(0, X.shape[1]): # go through each col
snpId = str(i)
if rsIds is not None : snpId = rsIds[i] # if we have supplied the Rs Ids, then we want to display the name of the ones we have removed
colMeans[i] = np.mean(X[:,i])
colSDs[i] = np.std(X[:,i]) # this is the Population variance, IE no correction DFs lost
if(colSDs[i] == 0 ) : raise ValueError("SNP " + snpId + " is monomorphic")
## Standardise SNPs: calculate Zscores
X_zScore = np.zeros((X.shape[0], X.shape[1]))
for col in range(0, X.shape[1]): # go through all columns
for row in range(0, X.shape[0]): # go through all rows
# zScore = difference from mean, divided by SD
X_zScore[row,col] = (X[row,col] - colMeans[col] ) / colSDs[col]
return(X_zScore)
# standardises this in place, if it doesn't need to convert datatypes
def standardise_Genotypes_01(X, convertDataType = -1) :
print("standardise Genotypes to be within 0-1, rather than Z-scores")
if convertDataType != -1 :
print("converting datatype to " , convertDataType)
X = X.astype(convertDataType)
for i in range(0, X.shape[1]): # go through each col
x = X[:,i]
max_x = np.max(x)
min_x = np.min(x)
denominator = (max_x - min_x)
if denominator != 0 : X[:,i] = (x -min_x )/ denominator # watch out fo division by 0
return(X)
def getSizeInMBs(myObject) :
if myObject is None : return 0.
return ( np.round( myObject.nbytes / 1024/ 1024 ) )
def getSizeInGBs(myObject) :
if myObject is None : return 0.
return ( np.round( myObject.nbytes * 10 / 1024/ 1024 / 1024 ) / 10 )
#def getSizeInMBs(myObject) :
# return ( np.round( sys.getsizeof(myObject) / 1024/ 1024 ) )
#
#def getSizeInGBs(myObject) :
# return ( np.round( sys.getsizeof(myObject) * 10 / 1024/ 1024 / 1024 ) / 10 )
# z-sclae data
# from sklearn.preprocessing import StandardScaler
#sc = StandardScaler()
#X_train_s = sc.fit_transform(xorInput)
#X_test_s = sc.transform(X_test)
|
|
import demistomock as demisto
from CommonServerPython import *
from datetime import timezone
from typing import List, Dict, Any
import json
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def __init__(self, base_url, verify, client_id, token, proxy):
"""
Constructor which adds the authentication headers required by IllusionBLACK external API
Args:
base_url: IllusionBLACK URL. For example: https://experience.illusionblack.com
verify: Allow insecure SSL
client_id:
token:
proxy:
"""
headers = {"x-client-id": client_id, "x-client-auth": token}
super().__init__(base_url=base_url, verify=verify, proxy=proxy, headers=headers)
def ping(self):
"""
Initiates a HTTP Request to IllusionBLACK test endpoint /ping
"""
response = self._http_request(
method="GET",
url_suffix="/ping",
ok_codes=(200,)
)
return response.get("message", "error")
def get_ad_decoys(self):
"""
Gets a list of Active Directory (AD) user decoys from IllusionBLACK
Returns: A tuple containing the response in human readable, context data and raw response formats
"""
response = self._http_request(
method="GET",
url_suffix="/decoy/users",
ok_codes=(200,)
)
users = response["items"]
return (
tableToMarkdown(
"IllusionBLACK AD Decoys",
users,
headerTransform=lambda s: " ".join([w.capitalize() for w in s.split("_")])
),
{"IllusionBlack.AdDecoy(val.user_name==obj.user_name)": users},
users
)
def get_network_decoys(self):
"""
Gets a list of network decoys from IllusionBLACK and enriches the data with the list of services enabled.
Returns:A tuple containing the response in human readable, context data and raw response formats
"""
response = self._http_request(
method="GET",
url_suffix="/decoy/hosts",
ok_codes=(200,)
)
hosts: List = response["items"]
for h in hosts:
h["services"] = ", ".join(h["services"])
return (
tableToMarkdown("IllusionBLACK Network Decoys", hosts, headerTransform=lambda s: s.capitalize()),
{"IllusionBlack.NetworkDecoy(val.name==obj.name)": hosts},
hosts
)
def get_ti_decoys(self):
"""
Gets a list of Threat Intelligence decoys from IllusionBLACK.
Returns: A tuple containing the response in human readable, context data and raw response formats
"""
response = self._http_request(
method="GET",
url_suffix="/decoy/recon",
ok_codes=(200,)
)
recon_decoys = response["items"]
return (
tableToMarkdown(
"IllusionBLACK TI Decoys",
recon_decoys,
headerTransform=lambda s: " ".join([w.capitalize() for w in s.split("_")])
),
{"IllusionBlack.TIDecoy(val.name==obj.name)": recon_decoys},
recon_decoys
)
def is_host_decoy(self, host):
"""
Checks if the host is an IllusionBLACK network decoy
Args:
host: The name of the entity For example: SMB-12
Returns: True if host is a decoy else False
"""
response = self._http_request(
method="GET",
url_suffix="/decoy/hosts",
ok_codes=(200,)
)
hosts: List = response["items"]
for decoy_host in hosts:
if host == decoy_host["name"]:
return "True", {"IllusionBlack.IsHostDecoy": {"Host": host, "Value": True}}
return "False", {"IllusionBlack.IsHostDecoy": {"Host": host, "Value": False}}
def is_user_decoy(self, user):
"""
Checks if the user is an IllusionBLACK AD user decoy
Args:
user: The user name of the AD user to check
Returns: True if user is a decoy else False
"""
response = self._http_request(
method="GET",
url_suffix="/decoy/users",
ok_codes=(200,)
)
users: List = response["items"]
for decoy_user in users:
if user.lower() == decoy_user["user_name"]:
return "True", {"IllusionBlack.IsUserDecoy": {"User": user, "Value": True}}
return "False", {"IllusionBlack.IsUserDecoy": {"User": user, "Value": False}}
def is_subdomain_decoy(self, subdomain):
"""
Checks if the subdomain is an IllusionBLACK TI decoy
Args:
subdomain: The subdomain to check. For example: experience.illusionblack.com
Returns: True if subdomain is a decoy else False
"""
response = self._http_request(
method="GET",
url_suffix="/decoy/recon",
ok_codes=(200,)
)
ti_decoys: List = response["items"]
for ti_decoy in ti_decoys:
if subdomain == ti_decoy["name"]:
return "True", {"IllusionBlack.IsSubdomainDecoy": {"Subdomain": subdomain, "Value": True}}
return "False", {"IllusionBlack.IsSubdomainDecoy": {"Subdomain": subdomain, "Value": False}}
def get_events(self, limit=None, query=None, from_time=None, to_time=None):
"""
Gets Events and corresponding Threat Parse data from IllusionBLACK based on the filtering parameters.
Args:
limit: Number of events to return per API call. Defaults to 10.
query: IllusionBLACK orchestrate engine query string. Refer to IllusionBLACK doc for reference.
from_time: ISO-8601 formatted datetime string of the starting time in the filter
to_time: ISO-8601 formatted datetime string of the ending time in the filter
Returns: A tuple with raw events and threat parse data corresponding to the events
"""
raw_events, raw_threat_parse, offset = [], {}, 0 # type: ignore
while True:
response = self._http_request(
method="GET",
url_suffix="/events",
params={"limit": limit, "expfilter": query, "from": from_time, "to": to_time, "offset": offset},
ok_codes=(200,)
)
meta: Dict = response["meta"]
amount = meta["paging"]["amount"]
raw_events.extend(response["events"])
for tp in response.get("threat_parse", {}):
tp_id = tp["id"]
if tp_id not in raw_threat_parse:
tp.pop("id", None)
raw_threat_parse[tp_id] = tp
offset += 1000
if amount < 1000:
break
return raw_events, raw_threat_parse
def test_module(client):
"""
Returning "ok" indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client: IllusionBLACK client
Returns:
"ok" if test passed, anything else will fail the test.
"""
try:
message = client.ping()
if message == "pong":
return "ok"
except DemistoException as e:
if e.args[0] == "Error in API call [401] - Unauthorized":
return_error("Failed to connect to IllusionBLACK. External API Token or Client Id might be invalid.")
else:
raise e
def convert_to_demisto_severity(ib_severity="medium", tp_score_based=False, score=0):
"""
Converts the IllusionBLACK Threat Parse score for an attacker to demisto incident severity
Args:
ib_severity: IllusionBLACK severity. Some events do not have threat parse score.
tp_score_based: If score is based on Threat Parse cumulative score
score: The cumulative Threat Parse score
Returns: The demisto incident severity ranging from 1 to 4
"""
severity = 1
if tp_score_based:
severity = score // 25
severity = 1 if severity < 1 else severity
severity = 4 if severity > 4 else severity
else:
if ib_severity == "low":
severity = 2
elif ib_severity == "medium":
severity = 3
elif ib_severity == "high":
severity = 4
return severity
def process_events(events, threat_parse):
"""
Converts raw events and raw threat parse to demisto incidents based on common parameters.
Args:
events: Raw events from IllusionBLACK
threat_parse: Raw Threat Parse from IllusionBLACK
Returns: A list of raw incidents with data pertinent to demisto incident format.
"""
raw_incident_data: Dict[str, Any] = {}
for event in events:
attacker_id = event.get("attacker.id", "")
decoy_id = event.get("decoy.id", "")
attack_type = event.get("type", "")
ib_severity = event.get("severity")
incident_id = "-".join(filter(None, [attacker_id, decoy_id, attack_type])).rstrip("-")
title = f"{attack_type} activity by {attacker_id} on {decoy_id} decoy"
tps = event.get("threat_parse_ids", [])
score, is_tp = 0, False
for tp in tps:
is_tp = True
score += threat_parse[tp]["score"]
severity = convert_to_demisto_severity(ib_severity=ib_severity, tp_score_based=is_tp, score=score)
raw_incident = raw_incident_data.setdefault(
incident_id,
{
"events": [],
"threat_parse_ids": [],
"title": "",
"severity": 1,
"attack_type": "illusionblack_event",
"attacker_id": "",
"decoy_id": "",
"source": "IllusionBLACK"
}
)
raw_incident["events"].append(event["id"])
raw_incident["threat_parse_ids"].extend(tps)
raw_incident["threat_parse_ids"] = list(set(raw_incident["threat_parse_ids"]))
raw_incident["title"] = title
raw_incident["severity"] = severity
raw_incident["attack_type"] = attack_type
raw_incident["attacker_id"] = attacker_id
raw_incident["decoy_id"] = decoy_id
return raw_incident_data
def create_incident(raw_incident):
"""
Creates a demisto incident from a raw incident.
Args:
raw_incident: The data in the raw incident processed from raw events and Threat Parse from IllusionBLACK
Returns: Demisto incident dict
"""
demisto.info(f"Severity is {raw_incident['severity']}")
return {
"name": raw_incident["title"],
"severity": raw_incident["severity"],
"rawJSON": json.dumps(raw_incident)
}
def fetch_incidents(first_fetch, client):
"""
Automated fetching of incidents from IllusionBLACK. For first run 2 days is the fixed duration for events.
Args:
first_fetch: For first fetch the timespan to consider to fetch incidents. Example: 2 days, 5 weeks etc
client: IllusionBLACK client
Returns: Demisto Incidents
"""
now = datetime.now(tz=timezone.utc)
demisto.info(f"IllusionBLACK: Fetching incidents at {now}")
demisto_last_run = demisto.getLastRun()
if "last_run" in demisto_last_run:
last_run = datetime.fromisoformat(demisto_last_run["last_run"])
else:
last_run, _ = parse_date_range(first_fetch)
last_run = last_run.replace(tzinfo=timezone.utc)
if now - last_run < timedelta(minutes=5):
return []
from_time = last_run.replace(microsecond=0).isoformat()
to_time = now.replace(microsecond=0).isoformat()
demisto.debug(f"IllusionBLACK: Getting raw events from {from_time} to {to_time}")
events, all_threat_parse = client.get_events(limit=1000, from_time=from_time, to_time=to_time)
raw_incidents = process_events(events, all_threat_parse)
incidents = []
for incident_id, raw_incident in raw_incidents.items():
incidents.append(create_incident(raw_incident))
demisto.setLastRun({"last_run": to_time})
return incidents
def main():
client_id = demisto.params().get("client_id")
token = demisto.params().get("token")
base_url = urljoin(demisto.params()["url"], "/apiv1")
verify_certificate = not demisto.params().get("insecure", False)
proxy = demisto.params().get("proxy", False)
LOG(f"Command being called is {demisto.command()}")
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
client_id=client_id,
token=token,
proxy=proxy
)
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == "illusionblack-get-ad-decoys":
return_outputs(*client.get_ad_decoys())
elif demisto.command() == "illusionblack-get-network-decoys":
return_outputs(*client.get_network_decoys())
elif demisto.command() == "illusionblack-get-ti-decoys":
return_outputs(*client.get_ti_decoys())
elif demisto.command() == "illusionblack-is-host-decoy":
return_outputs(*client.is_host_decoy(demisto.args()["host"]))
elif demisto.command() == "illusionblack-is-user-decoy":
return_outputs(*client.is_user_decoy(demisto.args()["user"]))
elif demisto.command() == "illusionblack-is-subdomain-decoy":
return_outputs(*client.is_subdomain_decoy(demisto.args()["subdomain"]))
elif demisto.command() == "illusionblack-get-events":
args = demisto.args()
events, _ = client.get_events(args.get("limit"), args.get("query"), args.get("from"), args.get("to"))
return_outputs(
tableToMarkdown("IllusionBLACK Events", events),
{"IllusionBlack.Event(val.id==obj.id)": events},
events
)
elif demisto.command() == "illusionblack-get-event-by-id":
events, _ = client.get_events(query=f"id == \"{demisto.args()['id']}\"")
if len(events) != 1:
return_error("Invalid event ID")
event = events[0]
return_outputs(
tableToMarkdown("IllusionBLACK Single Event", event),
{"IllusionBlack.Event(val.id==obj.id)": event},
event
)
elif demisto.command() == "fetch-incidents":
demisto.incidents(fetch_incidents(demisto.params().get("first_fetch", "2 days"), client=client))
# Log exceptions
except Exception as e:
return_error(f"Failed to execute {demisto.command()} command. Error: {str(e)}")
if __name__ in ("__main__", "builtins"):
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
"""Table format resource printer."""
import cStringIO
import json
import operator
import os
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.resource import resource_printer_base
# Table output column padding.
_TABLE_COLUMN_PAD = 2
def _Stringify(value):
"""Dumps value to JSON if it's not a string."""
if value is None:
return ''
elif isinstance(value, (basestring, console_attr.Colorizer)):
return value
elif hasattr(value, '__str__'):
return str(value)
else:
return json.dumps(value, sort_keys=True)
class TablePrinter(resource_printer_base.ResourcePrinter):
"""A printer for printing human-readable tables.
Printer attributes:
box: Adds table and column border lines if True. The box drawing characters
match the capabilities of the standard output. If UTF-8 is supported
then Unicode character are used, else if the output is a windows console
then the console code page characters are used, otherwise ASCII art is
used. The bool attributes [ascii, utf8, win] override the default.
empty-legend=SENTENCES: Prints SENTENCES after the table if the table has no
rows.
noempty-legend: Disables the default empty table diagnostic to log.status.
noheading: Does not print column headings.
legend=SENTENCES: Prints SENTENCES after the table if the table has at least
one row.
pad=N: The table cell horizontal padding space count. The default is 1 for
box, 2 otherwise.
title=TITLE: Prints a centered TITLE at the top of the table. Included
inside the box if box is also specified.
Attributes:
_rows: The list of all resource columns indexed by row.
"""
_WRITERS = {
'status': lambda x: log.status.write(x + os.linesep),
'debug': log.debug,
'info': log.info,
'warn': log.warn,
'error': log.error,
}
def __init__(self, *args, **kwargs):
"""Creates a new TablePrinter."""
self._rows = []
super(TablePrinter, self).__init__(*args, by_columns=True, **kwargs)
encoding = None
for name in ['ascii', 'utf8', 'win']:
if name in self._attributes:
encoding = name
break
self._console_attr = console_attr.GetConsoleAttr(encoding=encoding,
out=self._out)
def _AddRecord(self, record, delimit=True):
"""Adds a list of columns. Output delayed until Finish().
Args:
record: A JSON-serializable object.
delimit: Prints resource delimiters if True.
"""
self._rows.append(record)
def _Legend(self):
"""Prints the table legend if it was specified.
The legend is one or more lines of text printed after the table data.
"""
writer = self._WRITERS.get(self._attributes.get('log'),
lambda x: self._out.write(x + os.linesep))
if self._rows:
legend = self._attributes.get('legend')
if legend and 'log' not in self._attributes:
legend = os.linesep + legend
else:
legend = self._attributes.get('empty-legend')
if legend is None and 'noempty-legend' not in self._attributes:
legend = 'Listed 0 items.'
writer = self._WRITERS['status']
if legend is not None:
writer(legend)
def Finish(self):
"""Prints the actual table."""
if not self._rows:
# Table is empty but there might be an empty legend.
self._Legend()
return
# Border box decorations.
if 'box' in self._attributes:
box = self._console_attr.GetBoxLineCharacters()
table_column_pad = 1
else:
box = None
table_column_pad = self._attributes.get('pad', _TABLE_COLUMN_PAD)
# Determine the max column widths of heading + rows
rows = [[_Stringify(cell) for cell in row] for row in self._rows]
heading = []
if 'noheading' not in self._attributes:
labels = self._heading or self._column_attributes.Labels()
if labels:
heading = [[_Stringify(cell) for cell in labels]]
col_widths = [0] * max(len(x) for x in rows + heading)
for row in rows + heading:
for i in range(len(row)):
col_widths[i] = max(col_widths[i], len(row[i]))
# Print the title if specified.
title = self._attributes.get('title')
if title is not None:
if box:
line = box.dr
width = 0
sep = 2
for i in range(len(col_widths)):
width += col_widths[i]
if box:
line += box.h * (col_widths[i] + sep)
sep = 3
if width < len(title):
# Title is wider than the table => pad each column to make room.
pad = (len(title) + len(col_widths) - 1) / len(col_widths)
width += len(col_widths) * pad
if box:
line += box.h * len(col_widths) * pad
for i in range(len(col_widths)):
col_widths[i] += pad
if box:
width += 3 * len(col_widths) - 1
line += box.dl
self._out.write(line)
self._out.write(os.linesep)
line = box.v + title.center(width) + box.v
else:
line = title.center(width)
self._out.write(line)
self._out.write(os.linesep)
# Set up box borders.
if box:
t_sep = box.vr if title else box.dr
m_sep = box.vr
b_sep = box.ur
t_rule = ''
m_rule = ''
b_rule = ''
for i in range(len(col_widths)):
cell = box.h * (col_widths[i] + 2)
t_rule += t_sep + cell
t_sep = box.hd
m_rule += m_sep + cell
m_sep = box.vh
b_rule += b_sep + cell
b_sep = box.hu
t_rule += box.vl if title else box.dl
m_rule += box.vl
b_rule += box.ul
self._out.write(t_rule)
self._out.write(os.linesep)
if heading:
line = cStringIO.StringIO()
row = heading[0]
heading = []
for i in range(len(row)):
line.write(box.v + ' ')
line.write(row[i].center(col_widths[i]))
line.write(' ')
line.write(box.v)
self._out.write(line.getvalue())
self._out.write(os.linesep)
self._out.write(m_rule)
self._out.write(os.linesep)
# Sort by columns if requested.
if self._column_attributes:
order = self._column_attributes.Order()
if order:
rows = sorted(rows, key=operator.itemgetter(*order))
align = self._column_attributes.Alignments()
else:
align = None
# Print the left-adjusted columns with space stripped from rightmost column.
# We must flush directly to the output just in case there is a Windows-like
# colorizer. This complicates the trailing space logic.
for row in heading + rows:
pad = 0
for i in range(len(row)):
if box:
self._out.write(box.v + ' ')
width = col_widths[i]
elif i < len(row) - 1:
width = col_widths[i]
else:
width = 0
justify = align[i] if align else lambda s, w: s.ljust(w)
cell = row[i]
if isinstance(cell, console_attr.Colorizer):
if pad:
self._out.write(' ' * pad)
pad = 0
# pylint: disable=cell-var-from-loop
cell.Render(justify=lambda s: justify(s, width))
if box:
self._out.write(' ' * table_column_pad)
else:
pad = table_column_pad
else:
value = justify(cell, width)
if box:
self._out.write(value)
self._out.write(' ' * table_column_pad)
elif value.strip():
if pad:
self._out.write(' ' * pad)
pad = 0
stripped = value.rstrip()
self._out.write(stripped)
pad = table_column_pad + len(value) - len(stripped)
else:
pad += table_column_pad + len(value)
if box:
self._out.write(box.v)
self._out.write(os.linesep)
if box:
self._out.write(b_rule)
self._out.write(os.linesep)
# Print the legend if any.
self._Legend()
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import (
InvalidSelectorException,
NoSuchElementException,
WebDriverException)
# By.id positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Id(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.ID, "linkId")
assert element.get_attribute("id") == "linkId"
def test_Should_Be_Able_To_Find_ASingle_Element_By_Numeric_Id(driver, pages):
pages.load("nestedElements.html")
element = driver.find_element(By.ID, "2")
assert element.get_attribute("id") == "2"
def test_should_be_able_to_find_an_element_with_css_escape(driver, pages):
pages.load("idElements.html")
element = driver.find_element(By.ID, "with.dots")
assert element.get_attribute("id") == "with.dots"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Id(driver, pages):
pages.load("nestedElements.html")
elements = driver.find_elements(By.ID, "test_id")
assert len(elements) == 2
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Numeric_Id(driver, pages):
pages.load("nestedElements.html")
elements = driver.find_elements(By.ID, "2")
assert len(elements) == 8
# By.id negative
def test_Should_Not_Be_Able_To_Locate_By_Id_ASingle_Element_That_Does_Not_Exist(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.ID, "non_Existent_Button")
def test_Should_Not_Be_Able_To_Locate_By_Id_Multiple_Elements_That_Do_Not_Exist(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.ID, "non_Existent_Button")
assert len(elements) == 0
def test_Finding_ASingle_Element_By_Empty_Id_Should_Throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.ID, "")
def test_Finding_Multiple_Elements_By_Empty_Id_Should_Return_Empty_List(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.ID, "")
assert len(elements) == 0
def test_Finding_ASingle_Element_By_Id_With_Space_Should_Throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.ID, "nonexistent button")
def test_Finding_Multiple_Elements_By_Id_With_Space_Should_Return_Empty_List(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.ID, "nonexistent button")
assert len(elements) == 0
# By.name positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Name(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "checky")
assert element.get_attribute("value") == "furrfu"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Name(driver, pages):
pages.load("nestedElements.html")
elements = driver.find_elements(By.NAME, "checky")
assert len(elements) > 1
def test_Should_Be_Able_To_Find_An_Element_That_Does_Not_Support_The_Name_Property(driver, pages):
pages.load("nestedElements.html")
element = driver.find_element(By.NAME, "div1")
assert element.get_attribute("name") == "div1"
# By.name negative
def test_Should_Not_Be_Able_To_Locate_By_Name_ASingle_Element_That_Does_Not_Exist(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.NAME, "non_Existent_Button")
def test_Should_Not_Be_Able_To_Locate_By_Name_Multiple_Elements_That_Do_Not_Exist(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.NAME, "non_Existent_Button")
assert len(elements) == 0
def test_Finding_ASingle_Element_By_Empty_Name_Should_Throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.NAME, "")
def test_Finding_Multiple_Elements_By_Empty_Name_Should_Return_Empty_List(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.NAME, "")
assert len(elements) == 0
def test_Finding_ASingle_Element_By_Name_With_Space_Should_Throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.NAME, "nonexistent button")
def test_Finding_Multiple_Elements_By_Name_With_Space_Should_Return_Empty_List(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.NAME, "nonexistent button")
assert len(elements) == 0
# By.tag_Name positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Tag_Name(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.TAG_NAME, "input")
assert element.tag_name.lower() == "input"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Tag_Name(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.TAG_NAME, "input")
assert len(elements) > 1
# By.tag_Name negative
def test_Should_Not_Be_Able_To_Locate_By_Tag_Name_ASingle_Element_That_Does_Not_Exist(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.TAG_NAME, "non_Existent_Button")
def test_Should_Not_Be_Able_To_Locate_By_Tag_Name_Multiple_Elements_That_Do_Not_Exist(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.TAG_NAME, "non_Existent_Button")
assert len(elements) == 0
def test_Finding_ASingle_Element_By_Empty_Tag_Name_Should_Throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_element(By.TAG_NAME, "")
def test_Finding_Multiple_Elements_By_Empty_Tag_Name_Should_Throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_elements(By.TAG_NAME, "")
def test_Finding_ASingle_Element_By_Tag_Name_With_Space_Should_Throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.TAG_NAME, "nonexistent button")
def test_Finding_Multiple_Elements_By_Tag_Name_With_Space_Should_Return_Empty_List(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.TAG_NAME, "nonexistent button")
assert len(elements) == 0
# By.class_Name positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Class(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "extraDiv")
assert "Another div starts here." in element.text
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Class_Name(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CLASS_NAME, "nameC")
assert len(elements) > 1
def test_Should_Find_Element_By_Class_When_It_Is_The_First_Name_Among_Many(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "nameA")
assert element.text == "An H2 title"
def test_Should_Find_Element_By_Class_When_It_Is_The_Last_Name_Among_Many(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "nameC")
assert element.text == "An H2 title"
def test_Should_Find_Element_By_Class_When_It_Is_In_The_Middle_Among_Many(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "nameBnoise")
assert element.text == "An H2 title"
def test_Should_Find_Element_By_Class_When_Its_Name_Is_Surrounded_By_Whitespace(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "spaceAround")
assert element.text == "Spaced out"
def test_Should_Find_Elements_By_Class_When_Its_Name_Is_Surrounded_By_Whitespace(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CLASS_NAME, "spaceAround")
assert len(elements) == 1
assert elements[0].text == "Spaced out"
# By.class_Name negative
def test_Should_Not_Find_Element_By_Class_When_The_Name_Queried_Is_Shorter_Than_Candidate_Name(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "name_B")
def test_Finding_ASingle_Element_By_Empty_Class_Name_Should_Throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "")
def test_Finding_Multiple_Elements_By_Empty_Class_Name_Should_Throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CLASS_NAME, "")
def test_Finding_ASingle_Element_By_Compound_Class_Name_Should_Throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "a b")
def test_Finding_ASingle_Element_By_Invalid_Class_Name_Should_Throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "!@#$%^&*")
def test_Finding_Multiple_Elements_By_Invalid_Class_Name_Should_Throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CLASS_NAME, "!@#$%^&*")
# By.xpath positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_XPath(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.XPATH, "//h1")
assert element.text == "XHTML Might Be The Future"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_XPath(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.XPATH, "//div")
assert len(elements) == 13
def test_Should_Be_Able_To_Find_Many_Elements_Repeatedly_By_XPath(driver, pages):
pages.load("xhtmlTest.html")
xpath = "//node()[contains(@id,'id')]"
assert len(driver.find_elements(By.XPATH, xpath)) == 3
xpath = "//node()[contains(@id,'nope')]"
assert len(driver.find_elements(By.XPATH, xpath)) == 0
def test_Should_Be_Able_To_Identify_Elements_By_Class(driver, pages):
pages.load("xhtmlTest.html")
header = driver.find_element(By.XPATH, "//h1[@class='header']")
assert header.text == "XHTML Might Be The Future"
def test_Should_Be_Able_To_Find_An_Element_By_XPath_With_Multiple_Attributes(driver, pages):
pages.load("formPage.html")
element = driver.find_element(
By.XPATH, "//form[@name='optional']/input[@type='submit' and @value='Click!']")
assert element.tag_name.lower() == "input"
assert element.get_attribute("value") == "Click!"
def test_Finding_ALink_By_Xpath_Should_Locate_An_Element_With_The_Given_Text(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.XPATH, "//a[text()='click me']")
assert element.text == "click me"
def test_Finding_ALink_By_Xpath_Using_Contains_Keyword_Should_Work(driver, pages):
pages.load("nestedElements.html")
element = driver.find_element(By.XPATH, "//a[contains(.,'hello world')]")
assert "hello world" in element.text
@pytest.mark.xfail_chrome(raises=InvalidSelectorException)
@pytest.mark.xfail_chromiumedge(raises=InvalidSelectorException)
@pytest.mark.xfail_firefox(raises=InvalidSelectorException)
@pytest.mark.xfail_remote(raises=InvalidSelectorException)
@pytest.mark.xfail_marionette(raises=WebDriverException)
@pytest.mark.xfail_safari(raises=NoSuchElementException)
@pytest.mark.xfail_webkitgtk(raises=InvalidSelectorException)
def test_Should_Be_Able_To_Find_Element_By_XPath_With_Namespace(driver, pages):
pages.load("svgPage.html")
element = driver.find_element(By.XPATH, "//svg:svg//svg:text")
assert element.text == "Test Chart"
def test_Should_Be_Able_To_Find_Element_By_XPath_In_Xml_Document(driver, pages):
pages.load("simple.xml")
element = driver.find_element(By.XPATH, "//foo")
assert "baz" in element.text
# By.xpath negative
def test_Should_Throw_An_Exception_When_There_Is_No_Link_To_Click(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.XPATH, "//a[@id='Not here']")
def test_Should_Throw_InvalidSelectorException_When_XPath_Is_Syntactically_Invalid_In_Driver_Find_Element(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_element(By.XPATH, "this][isnot][valid")
def test_Should_Throw_InvalidSelectorException_When_XPath_Is_Syntactically_Invalid_In_Driver_Find_Elements(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_elements(By.XPATH, "this][isnot][valid")
def test_Should_Throw_InvalidSelectorException_When_XPath_Is_Syntactically_Invalid_In_Element_Find_Element(driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_element(By.XPATH, "this][isnot][valid")
def test_Should_Throw_InvalidSelectorException_When_XPath_Is_Syntactically_Invalid_In_Element_Find_Elements(driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_elements(By.XPATH, "this][isnot][valid")
def test_Should_Throw_InvalidSelectorException_When_XPath_Returns_Wrong_Type_In_Driver_Find_Element(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_element(By.XPATH, "count(//input)")
def test_Should_Throw_InvalidSelectorException_When_XPath_Returns_Wrong_Type_In_Driver_Find_Elements(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_elements(By.XPATH, "count(//input)")
def test_Should_Throw_InvalidSelectorException_When_XPath_Returns_Wrong_Type_In_Element_Find_Element(driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_element(By.XPATH, "count(//input)")
def test_Should_Throw_InvalidSelectorException_When_XPath_Returns_Wrong_Type_In_Element_Find_Elements(driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_elements(By.XPATH, "count(//input)")
# By.css_Selector positive
def test_Should_Be_Able_To_Find_ASingle_Element_By_Css_Selector(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CSS_SELECTOR, "div.content")
assert element.tag_name.lower() == "div"
assert element.get_attribute("class") == "content"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Css_Selector(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CSS_SELECTOR, "p")
assert len(elements) > 1
def test_Should_Be_Able_To_Find_ASingle_Element_By_Compound_Css_Selector(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CSS_SELECTOR, "div.extraDiv, div.content")
assert element.tag_name.lower() == "div"
assert element.get_attribute("class") == "content"
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Compound_Css_Selector(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CSS_SELECTOR, "div.extraDiv, div.content")
assert len(elements) > 1
assert elements[0].get_attribute("class") == "content"
assert elements[1].get_attribute("class") == "extraDiv"
def test_Should_Be_Able_To_Find_An_Element_By_Boolean_Attribute_Using_Css_Selector(driver, pages):
pages.load("locators_tests/boolean_attribute_selected.html")
element = driver.find_element(By.CSS_SELECTOR, "option[selected='selected']")
assert element.get_attribute("value") == "two"
def test_Should_Be_Able_To_Find_An_Element_By_Boolean_Attribute_Using_Short_Css_Selector(driver, pages):
pages.load("locators_tests/boolean_attribute_selected.html")
element = driver.find_element(By.CSS_SELECTOR, "option[selected]")
assert element.get_attribute("value") == "two"
def test_Should_Be_Able_To_Find_An_Element_By_Boolean_Attribute_Using_Short_Css_Selector_On_Html4Page(driver, pages):
pages.load("locators_tests/boolean_attribute_selected_html4.html")
element = driver.find_element(By.CSS_SELECTOR, "option[selected]")
assert element.get_attribute("value") == "two"
# By.css_Selector negative
def test_Should_Not_Find_Element_By_Css_Selector_When_There_Is_No_Such_Element(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CSS_SELECTOR, ".there-is-no-such-class")
def test_Should_Not_Find_Elements_By_Css_Selector_When_There_Is_No_Such_Element(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CSS_SELECTOR, ".there-is-no-such-class")
assert len(elements) == 0
def test_Finding_ASingle_Element_By_Empty_Css_Selector_Should_Throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CSS_SELECTOR, "")
def test_Finding_Multiple_Elements_By_Empty_Css_Selector_Should_Throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CSS_SELECTOR, "")
def test_Finding_ASingle_Element_By_Invalid_Css_Selector_Should_Throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CSS_SELECTOR, "//a/b/c[@id='1']")
def test_Finding_Multiple_Elements_By_Invalid_Css_Selector_Should_Throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CSS_SELECTOR, "//a/b/c[@id='1']")
# By.link_Text positive
def test_Should_Be_Able_To_Find_ALink_By_Text(driver, pages):
pages.load("xhtmlTest.html")
link = driver.find_element(By.LINK_TEXT, "click me")
assert link.text == "click me"
def test_Should_Be_Able_To_Find_Multiple_Links_By_Text(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.LINK_TEXT, "click me")
assert len(elements) == 2
def test_Should_Find_Element_By_Link_Text_Containing_Equals_Sign(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.LINK_TEXT, "Link=equalssign")
assert element.get_attribute("id") == "linkWithEqualsSign"
def test_Should_Find_Multiple_Elements_By_Link_Text_Containing_Equals_Sign(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.LINK_TEXT, "Link=equalssign")
assert 1 == len(elements)
assert elements[0].get_attribute("id") == "linkWithEqualsSign"
def test_finds_By_Link_Text_On_Xhtml_Page(driver, pages):
pages.load("actualXhtmlPage.xhtml")
link_Text = "Foo"
element = driver.find_element(By.LINK_TEXT, link_Text)
assert element.text == link_Text
def test_Link_With_Formatting_Tags(driver, pages):
pages.load("simpleTest.html")
elem = driver.find_element(By.ID, "links")
res = elem.find_element(By.PARTIAL_LINK_TEXT, "link with formatting tags")
assert res.text == "link with formatting tags"
def test_Driver_Can_Get_Link_By_Link_Test_Ignoring_Trailing_Whitespace(driver, pages):
pages.load("simpleTest.html")
link = driver.find_element(By.LINK_TEXT, "link with trailing space")
assert link.get_attribute("id") == "linkWithTrailingSpace"
assert link.text == "link with trailing space"
# By.link_Text negative
def test_Should_Not_Be_Able_To_Locate_By_Link_Text_ASingle_Element_That_Does_Not_Exist(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.LINK_TEXT, "Not here either")
def test_Should_Not_Be_Able_To_Locate_By_Link_Text_Multiple_Elements_That_Do_Not_Exist(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.LINK_TEXT, "Not here either")
assert len(elements) == 0
# By.partial_Link_Text positive
def test_Should_Be_Able_To_Find_Multiple_Elements_By_Partial_Link_Text(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.PARTIAL_LINK_TEXT, "ick me")
assert len(elements) == 2
def test_Should_Be_Able_To_Find_ASingle_Element_By_Partial_Link_Text(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.PARTIAL_LINK_TEXT, "anon")
assert "anon" in element.text
def test_Should_Find_Element_By_Partial_Link_Text_Containing_Equals_Sign(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.PARTIAL_LINK_TEXT, "Link=")
assert element.get_attribute("id") == "linkWithEqualsSign"
def test_Should_Find_Multiple_Elements_By_Partial_Link_Text_Containing_Equals_Sign(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.PARTIAL_LINK_TEXT, "Link=")
assert len(elements) == 1
assert elements[0].get_attribute("id") == "linkWithEqualsSign"
# Misc tests
def test_Driver_Should_Be_Able_To_Find_Elements_After_Loading_More_Than_One_Page_At_ATime(driver, pages):
pages.load("formPage.html")
pages.load("xhtmlTest.html")
link = driver.find_element(By.LINK_TEXT, "click me")
assert link.text == "click me"
# You don't want to ask why this is here
def test_When_Finding_By_Name_Should_Not_Return_By_Id(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "id-name1")
assert element.get_attribute("value") == "name"
element = driver.find_element(By.ID, "id-name1")
assert element.get_attribute("value") == "id"
element = driver.find_element(By.NAME, "id-name2")
assert element.get_attribute("value") == "name"
element = driver.find_element(By.ID, "id-name2")
assert element.get_attribute("value") == "id"
def test_Should_Be_Able_To_Find_AHidden_Elements_By_Name(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "hidden")
assert element.get_attribute("name") == "hidden"
def test_Should_Not_Be_Able_To_Find_An_Element_On_ABlank_Page(driver, pages):
driver.get("about:blank")
with pytest.raises(NoSuchElementException):
driver.find_element(By.TAG_NAME, "a")
|
|
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
PrivateLinkFactory,
NodeFactory,
)
from website.util import permissions
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.fixture()
def read_only_user():
return AuthUserFactory()
@pytest.fixture()
def read_write_user():
return AuthUserFactory()
@pytest.fixture()
def non_contributor():
return AuthUserFactory()
@pytest.fixture()
def public_project(user, read_only_user, read_write_user):
public_project = ProjectFactory(is_public=True, creator=user)
public_project.add_contributor(read_only_user, permissions=[permissions.READ])
public_project.add_contributor(read_write_user, permissions=[permissions.WRITE])
public_project.save()
return public_project
@pytest.fixture()
def view_only_link(public_project):
view_only_link = PrivateLinkFactory(name='testlink')
view_only_link.nodes.add(public_project)
view_only_link.save()
return view_only_link
@pytest.fixture()
def component_one(user, public_project):
return NodeFactory(creator=user, parent=public_project, is_public=True)
@pytest.fixture()
def component_two(user, public_project):
return NodeFactory(creator=user, parent=public_project, is_public=False)
@pytest.fixture()
def project_two(user):
return NodeFactory(creator=user)
@pytest.fixture()
def first_level_component(user, public_project):
return NodeFactory(creator=user, parent=public_project)
@pytest.fixture()
def second_level_component(user, first_level_component):
return NodeFactory(creator=user, parent=first_level_component)
@pytest.fixture()
def component_one_payload(component_one):
return {
'data': [
{
'type': 'nodes',
'id': component_one._id
}
]
}
@pytest.mark.django_db
class TestViewOnlyLinksNodes:
@pytest.fixture()
def url(self, view_only_link):
return '/{}view_only_links/{}/nodes/'.format(API_BASE, view_only_link._id)
def test_view_only_links_nodes(self, app, user, read_only_user, read_write_user, non_contributor, url):
# test_admin_can_view_vol_nodes_detail
res = app.get(url, auth=user.auth)
assert res.status_code == 200
# test_read_write_cannot_view_vol_detail
res = app.get(url, auth=read_write_user.auth, expect_errors=True)
assert res.status_code == 403
# test_read_only_cannot_view_vol_detail
res = app.get(url, auth=read_only_user.auth, expect_errors=True)
assert res.status_code == 403
# test_logged_in_user_cannot_view_vol_detail
res = app.get(url, auth=non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_unauthenticated_user_cannot_view_vol_detail
res = app.get(url, expect_errors=True)
assert res.status_code == 403
@pytest.mark.django_db
class TestViewOnlyLinkNodesSet:
@pytest.fixture()
def url(self, view_only_link):
return '/{}view_only_links/{}/relationships/nodes/'.format(API_BASE, view_only_link._id)
def test_admin_can_set_single_node(self, app, user, public_project, component_one, component_one_payload, view_only_link, url):
res = app.post_json_api(url, component_one_payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 201
assert public_project in view_only_link.nodes.all()
assert component_one in view_only_link.nodes.all()
def test_admin_can_set_multiple_nodes(self, app, user, public_project, component_one, component_two, view_only_link, url):
payload = {
'data': [
{
'type': 'nodes',
'id': component_one._id
}, {
'type': 'nodes',
'id': component_two._id
}
]
}
res = app.post_json_api(url, payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 201
assert public_project in view_only_link.nodes.all()
assert component_one in view_only_link.nodes.all()
assert component_two in view_only_link.nodes.all()
def test_set_nodes_does_not_duplicate_nodes(self, app, user, public_project, component_one, view_only_link, url):
payload = {
'data': [
{
'type': 'nodes',
'id': public_project._id
}, {
'type': 'nodes',
'id': component_one._id
}, {
'type': 'nodes',
'id': component_one._id
}
]
}
res = app.post_json_api(url, payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 201
assert view_only_link.nodes.count() == 2
assert public_project in view_only_link.nodes.all()
assert component_one in view_only_link.nodes.all()
def test_set_node_not_component(self, app, user, project_two, url):
"""
Project One (already associated with VOL)
-> Level One Component (can be associated with VOL)
Project Two (CANNOT be associated with this VOL)
"""
payload = {
'data': [
{
'type': 'nodes',
'id': project_two._id
},
]
}
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.'.format(project_two._id)
def test_set_node_second_level_component_without_first_level_parent(self, app, user, public_project, second_level_component, view_only_link, url):
"""
Parent Project (already associated with VOL)
-> First Level Component (NOT included)
-> Second Level Component (included -- OK)
"""
payload = {
'data': [
{
'type': 'nodes',
'id': second_level_component._id
},
]
}
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
view_only_link.reload()
assert res.status_code == 201
assert len(res.json['data']) == 2
assert public_project in view_only_link.nodes.all()
assert second_level_component in view_only_link.nodes.all()
def test_set_node_second_level_component_with_first_level_parent(self, app, user, first_level_component, second_level_component, view_only_link, url):
"""
Parent Project (already associated with VOL)
-> First Level Component (included)
-> Second Level Component (included -- OK)
"""
payload = {
'data': [
{
'type': 'nodes',
'id': first_level_component._id
},
{
'type': 'nodes',
'id': second_level_component._id
}
]
}
res = app.post_json_api(url, payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 201
assert first_level_component in view_only_link.nodes.all()
assert second_level_component in view_only_link.nodes.all()
def test_view_only_link_nodes_set_errors(self, app, user, read_write_user, read_only_user, non_contributor, component_one_payload, component_one, url):
# test_invalid_nodes_in_payload
payload = {
'data': [{
'type': 'nodes',
'id': 'abcde'
}]
}
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_type_required_in_payload
payload = {
'data': [{
'id': component_one._id
}]
}
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
# test_id_required_in_payload
payload = {
'data': [{
'type': 'nodes',
}]
}
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
# test_read_write_contributor_cannot_set_nodes
res = app.post_json_api(url, component_one_payload, auth=read_write_user.auth, expect_errors=True)
assert res.status_code == 403
# test_read_only_contributor_cannot_set_nodes
res = app.post_json_api(url, component_one_payload, auth=read_only_user.auth, expect_errors=True)
assert res.status_code == 403
# test_logged_in_user_cannot_set_nodes
res = app.post_json_api(url, component_one_payload, auth=non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_unauthenticated_user_cannot_set_nodes
res = app.post_json_api(url, component_one_payload, expect_errors=True)
assert res.status_code == 401
@pytest.mark.django_db
class TestViewOnlyLinkNodesUpdate:
@pytest.fixture()
def url(self, view_only_link):
return '/{}view_only_links/{}/relationships/nodes/'.format(API_BASE, view_only_link._id)
@pytest.fixture()
def update_payload(self, public_project, component_one):
return {
'data': [{
'type': 'nodes',
'id': public_project._id
}, {
'type': 'nodes',
'id': component_one._id
}]
}
def test_admin_can_update_nodes_single_node_to_add(self, app, user, url, public_project, component_one, view_only_link, update_payload):
res = app.put_json_api(url, update_payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 200
assert len(res.json['data']) == 2
assert public_project in view_only_link.nodes.all()
assert component_one in view_only_link.nodes.all()
def test_admin_can_update_nodes_multiple_nodes_to_add(self, app, user, public_project, component_one, component_two, view_only_link, url, update_payload):
update_payload['data'].append({
'type': 'nodes',
'id': component_two._id
})
res = app.put_json_api(url, update_payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 200
assert len(res.json['data']) == 3
assert public_project in view_only_link.nodes.all()
assert component_one in view_only_link.nodes.all()
assert component_two in view_only_link.nodes.all()
def test_admin_can_update_nodes_single_node_to_remove(self, app, user, public_project, component_one, view_only_link, update_payload, url):
view_only_link.nodes.add(component_one)
view_only_link.save()
update_payload['data'].pop()
res = app.put_json_api(url, update_payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 200
assert len(res.json['data']) == 1
assert public_project in view_only_link.nodes.all()
assert component_one not in view_only_link.nodes.all()
def test_admin_can_update_nodes_multiple_nodes_to_remove(self, app, user, public_project, component_one, component_two, view_only_link, update_payload, url,):
view_only_link.nodes.add(component_one)
view_only_link.nodes.add(component_two)
view_only_link.save()
update_payload['data'].pop()
res = app.put_json_api(url, update_payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 200
assert len(res.json['data']) == 1
assert public_project in view_only_link.nodes.all()
assert component_one not in view_only_link.nodes.all()
assert component_two not in view_only_link.nodes.all()
def test_admin_can_update_nodes_single_add_single_remove(self, app, user, public_project, component_one, component_two, view_only_link, update_payload, url):
view_only_link.nodes.add(component_two)
view_only_link.save()
res = app.put_json_api(url, update_payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 200
assert len(res.json['data']) == 2
assert public_project in view_only_link.nodes.all()
assert component_one in view_only_link.nodes.all()
assert component_two not in view_only_link.nodes.all()
def test_admin_can_update_nodes_multiple_add_multiple_remove(self, app, user, public_project, component_one, component_two, view_only_link, url):
view_only_link.nodes.add(component_one)
view_only_link.nodes.add(component_two)
view_only_link.save()
component_three = NodeFactory(creator=user, parent=public_project)
component_four = NodeFactory(creator=user, parent=public_project)
payload = {
'data': [{
'type': 'nodes',
'id': public_project._id,
}, {
'type': 'nodes',
'id': component_three._id
}, {
'type': 'nodes',
'id': component_four._id
}]
}
res = app.put_json_api(url, payload, auth=user.auth)
view_only_link.reload()
assert res.status_code == 200
assert len(res.json['data']) == 3
assert public_project in view_only_link.nodes.all()
assert component_one not in view_only_link.nodes.all()
assert component_two not in view_only_link.nodes.all()
assert component_three in view_only_link.nodes.all()
assert component_four in view_only_link.nodes.all()
def test_update_nodes_no_changes(self, app, user, public_project, view_only_link, url):
payload = {
'data': [{
'type': 'nodes',
'id': public_project._id,
}]
}
res = app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
view_only_link.reload()
assert res.status_code == 200
assert len(res.json['data']) == 1
assert public_project in view_only_link.nodes.all()
def test_update_nodes_top_level_node_not_included(self, app, user, component_one, url):
"""
Parent Project (NOT included)
-> First Level Component (included) -- NOT ALLOWED
"""
payload = {
'data': [{
'type': 'nodes',
'id': component_one._id
}]
}
res = app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.'.format(component_one._id)
def test_update_node_not_component(self, app, user, project_two, component_two, url):
payload = {
'data': [{
'type': 'nodes',
'id': project_two._id
}]
}
res = app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.'.format(project_two._id)
def test_update_node_second_level_component_without_first_level_parent(self, app, user, public_project, second_level_component, view_only_link, url):
"""
Parent Project (included)
-> First Level Component (NOT included)
-> Second Level Component (included) -- OK
"""
payload = {
'data': [{
'type': 'nodes',
'id': public_project._id
}, {
'type': 'nodes',
'id': second_level_component._id
}]
}
res = app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
view_only_link.reload()
assert res.status_code == 200
assert len(res.json['data']) == 2
assert public_project in view_only_link.nodes.all()
assert second_level_component in view_only_link.nodes.all()
def test_update_node_second_level_component_with_first_level_parent(self, app, user, public_project, first_level_component, second_level_component, view_only_link, url):
"""
Parent Project (included)
-> First Level Component (included)
-> Second Level Component (included) -- OK
"""
payload = {
'data': [{
'type': 'nodes',
'id': public_project._id
}, {
'type': 'nodes',
'id': first_level_component._id
}, {
'type': 'nodes',
'id': second_level_component._id
}]
}
res = app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
view_only_link.reload()
assert res.status_code == 200
assert len(res.json['data']) == 3
assert public_project in view_only_link.nodes.all()
assert first_level_component in view_only_link.nodes.all()
assert second_level_component in view_only_link.nodes.all()
def test_view_only_link_nodes_update_errors(self, app, user, read_write_user, read_only_user, non_contributor, public_project, component_one, update_payload, url):
# test_invalid_nodes_in_payload
payload = {
'data': [{
'type': 'nodes',
'id': public_project._id
}, {
'type': 'nodes',
'id': 'abcde'
}]
}
res = app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_type_required_in_payload
payload = {
'data': [{
'type': 'nodes',
'id': public_project._id
}, {
'id': component_one._id
}]
}
res = app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
# test_id_required_in_payload
payload = {
'data': [{
'type': 'nodes',
'id': public_project._id
}, {
'type': 'nodes'
}]
}
res = app.put_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
# test_read_write_contributor_cannot_update_nodes
res = app.put_json_api(url, update_payload, auth=read_write_user.auth, expect_errors=True)
assert res.status_code == 403
# test_read_only_contributor_cannot_update_nodes
res = app.put_json_api(url, update_payload, auth=read_only_user.auth, expect_errors=True)
assert res.status_code == 403
# test_logged_in_user_cannot_update_nodes
res = app.put_json_api(url, update_payload, auth=non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_unauthenticated_user_cannot_update_nodes
res = app.put_json_api(url, update_payload, expect_errors=True)
assert res.status_code == 401
|
|
#!/usr/bin/env python
import sys
# Usage:
#
# #Create object
# your_map = EECSMap()
#
# #Use Object (examples)
# your_map.printObstacleMap()
# your_map.clearObstacleMap()
# your_map.printCostMap()
# your_map.setObstacle(3, 4, 1, DIRECTION.North)
# isBlocked = your_map.getObstacle(3, 4, DIRECTION.North)
# cell_cost = your_map.getCost(3, 4)
def enum(**enums):
return type('Enum', (), enums)
DIRECTION = enum(North=1, South=2, East=3, West=4)
class EECSMap():
def __init__(self):
self.horizontalWalls = [[0 for x in xrange(8)] for x in xrange(9)]
self.verticalWalls = [[0 for x in xrange(9)] for x in xrange(8)]
self.costMap = [[0 for x in xrange(8)] for x in xrange(8)]
self.horizontalWalls[0][0] = 1
self.horizontalWalls[0][1] = 1
self.horizontalWalls[0][2] = 1
self.horizontalWalls[0][3] = 1
self.horizontalWalls[0][4] = 1
self.horizontalWalls[0][5] = 1
self.horizontalWalls[0][6] = 1
self.horizontalWalls[0][7] = 1
self.horizontalWalls[1][0] = 0
self.horizontalWalls[1][1] = 0
self.horizontalWalls[1][2] = 0
self.horizontalWalls[1][3] = 0
self.horizontalWalls[1][4] = 0
self.horizontalWalls[1][5] = 0
self.horizontalWalls[1][6] = 1
self.horizontalWalls[1][7] = 0
self.horizontalWalls[2][0] = 0
self.horizontalWalls[2][1] = 0
self.horizontalWalls[2][2] = 1
self.horizontalWalls[2][3] = 1
self.horizontalWalls[2][4] = 1
self.horizontalWalls[2][5] = 0
self.horizontalWalls[2][6] = 0
self.horizontalWalls[2][7] = 0
self.horizontalWalls[3][0] = 0
self.horizontalWalls[3][1] = 1
self.horizontalWalls[3][2] = 1
self.horizontalWalls[3][3] = 0
self.horizontalWalls[3][4] = 0
self.horizontalWalls[3][5] = 0
self.horizontalWalls[3][6] = 1
self.horizontalWalls[3][7] = 1
self.horizontalWalls[4][0] = 0
self.horizontalWalls[4][1] = 1
self.horizontalWalls[4][2] = 1
self.horizontalWalls[4][3] = 1
self.horizontalWalls[4][4] = 1
self.horizontalWalls[4][5] = 0
self.horizontalWalls[4][6] = 1
self.horizontalWalls[4][7] = 1
self.horizontalWalls[5][0] = 0
self.horizontalWalls[5][1] = 0
self.horizontalWalls[5][2] = 1
self.horizontalWalls[5][3] = 1
self.horizontalWalls[5][4] = 1
self.horizontalWalls[5][5] = 0
self.horizontalWalls[5][6] = 0
self.horizontalWalls[5][7] = 0
self.horizontalWalls[6][0] = 0
self.horizontalWalls[6][1] = 0
self.horizontalWalls[6][2] = 0
self.horizontalWalls[6][3] = 0
self.horizontalWalls[6][4] = 0
self.horizontalWalls[6][5] = 0
self.horizontalWalls[6][6] = 0
self.horizontalWalls[6][7] = 0
self.horizontalWalls[7][0] = 0
self.horizontalWalls[7][1] = 1
self.horizontalWalls[7][2] = 1
self.horizontalWalls[7][3] = 0
self.horizontalWalls[7][4] = 1
self.horizontalWalls[7][5] = 1
self.horizontalWalls[7][6] = 1
self.horizontalWalls[7][7] = 0
self.horizontalWalls[8][0] = 1
self.horizontalWalls[8][1] = 1
self.horizontalWalls[8][2] = 1
self.horizontalWalls[8][3] = 1
self.horizontalWalls[8][4] = 1
self.horizontalWalls[8][5] = 1
self.horizontalWalls[8][6] = 1
self.horizontalWalls[8][7] = 1
self.verticalWalls[0][0] = 1
self.verticalWalls[0][1] = 0
self.verticalWalls[0][2] = 1
self.verticalWalls[0][3] = 0
self.verticalWalls[0][4] = 0
self.verticalWalls[0][5] = 0
self.verticalWalls[0][6] = 1
self.verticalWalls[0][7] = 0
self.verticalWalls[0][8] = 1
self.verticalWalls[1][0] = 1
self.verticalWalls[1][1] = 1
self.verticalWalls[1][2] = 1
self.verticalWalls[1][3] = 0
self.verticalWalls[1][4] = 0
self.verticalWalls[1][5] = 1
self.verticalWalls[1][6] = 0
self.verticalWalls[1][7] = 1
self.verticalWalls[1][8] = 1
self.verticalWalls[2][0] = 1
self.verticalWalls[2][1] = 1
self.verticalWalls[2][2] = 0
self.verticalWalls[2][3] = 0
self.verticalWalls[2][4] = 0
self.verticalWalls[2][5] = 0
self.verticalWalls[2][6] = 0
self.verticalWalls[2][7] = 0
self.verticalWalls[2][8] = 1
self.verticalWalls[3][0] = 1
self.verticalWalls[3][1] = 0
self.verticalWalls[3][2] = 0
self.verticalWalls[3][3] = 0
self.verticalWalls[3][4] = 0
self.verticalWalls[3][5] = 0
self.verticalWalls[3][6] = 1
self.verticalWalls[3][7] = 0
self.verticalWalls[3][8] = 1
self.verticalWalls[4][0] = 1
self.verticalWalls[4][1] = 1
self.verticalWalls[4][2] = 0
self.verticalWalls[4][3] = 0
self.verticalWalls[4][4] = 0
self.verticalWalls[4][5] = 0
self.verticalWalls[4][6] = 1
self.verticalWalls[4][7] = 0
self.verticalWalls[4][8] = 1
self.verticalWalls[5][0] = 1
self.verticalWalls[5][1] = 1
self.verticalWalls[5][2] = 1
self.verticalWalls[5][3] = 0
self.verticalWalls[5][4] = 0
self.verticalWalls[5][5] = 1
self.verticalWalls[5][6] = 1
self.verticalWalls[5][7] = 1
self.verticalWalls[5][8] = 1
self.verticalWalls[6][0] = 1
self.verticalWalls[6][1] = 1
self.verticalWalls[6][2] = 0
self.verticalWalls[6][3] = 1
self.verticalWalls[6][4] = 1
self.verticalWalls[6][5] = 1
self.verticalWalls[6][6] = 0
self.verticalWalls[6][7] = 1
self.verticalWalls[6][8] = 1
self.verticalWalls[7][0] = 1
self.verticalWalls[7][1] = 0
self.verticalWalls[7][2] = 0
self.verticalWalls[7][3] = 0
self.verticalWalls[7][4] = 0
self.verticalWalls[7][5] = 0
self.verticalWalls[7][6] = 0
self.verticalWalls[7][7] = 1
self.verticalWalls[7][8] = 1
for i in xrange(8):
for j in xrange(8):
self.costMap[i][j] = 0
self.obstacle_size_x = 8
self.obstacle_size_y = 8
self.costmap_size_x = 8
self.costmap_size_y = 8
# ***********************************************************************
# Function Name : getNeighborObstacle
# Description : Checks if the neighboring cell is blocked on the map.
# Input : i: The row coordinate of the current cell on the map.
# : j: The column coordinate of the current cell on the map
# : dir: A Direction enumeration (North, South, East, West)
# : indicating which neighboring cell to check for
# : obstacles
# Output : None
# Return : 1 if neighboring cell is blocked, 0 if neighboring cell
# : is clear, -1 if index i or j is out of bounds
# ***********************************************************************/
def getNeighborObstacle(self, i, j, dir):
if (((i < 0 or i > 7 or j < 0 or j > 8) and (dir == DIRECTION.West or dir == DIRECTION.East)) and ((j < 0 or j > 7 or i < 0 or i > 8) and (dir == DIRECTION.North or dir == DIRECTION.South))):
print "ERROR (getNeighborObstacle): index out of range"
return -1
isBlocked = 0
if dir == DIRECTION.North:
isBlocked = self.horizontalWalls[i][j]
elif dir == DIRECTION.South:
isBlocked = self.horizontalWalls[i+1][j]
elif dir == DIRECTION.West:
isBlocked = self.verticalWalls[i][j]
elif dir == DIRECTION.East:
isBlocked = self.verticalWalls[i][j+1]
return isBlocked
# ******************************************************************************
# Function Name : setObstacle
# Description : Used for map building, sets the obstacle status of a given map cell
# Input : i: The row coordinate of the current cell on the map.
# : j: The column coordinate of the current cell on the map
# : isBlocked: A boolean (0 or 1) value indicated if the cell is blocked
# : dir: A Direction enumeration (North, South, East, West) indicating
# : which neighboring cell to set for obstacles
# Output : None
# Return : 0 if successful, -11 if i or j is out of map bounds, -2 if isBlocked is not 0 or 1
# *****************************************************************************/
def setObstacle(self, i, j, isBlocked, dir):
if (((i < 0 or i > 7 or j < 0 or j > 8) and (dir == DIRECTION.West or dir == DIRECTION.East)) or ((j < 0 or j > 7 or i < 0 or i > 8) and (dir == DIRECTION.North or dir == DIRECTION.South))):
print "ERROR (setObstacle): index out of range, obstacle not set"
return -1
if isBlocked > 1:
print "ERROR (setObstacle): isBlocked not a valid input, obstacle not set"
return -2
if dir == DIRECTION.North:
self.horizontalWalls[i][j] = isBlocked
elif dir == DIRECTION.South:
self.horizontalWalls[i+1][j] = isBlocked
elif dir == DIRECTION.West:
self.verticalWalls[i][j] = isBlocked
elif dir == DIRECTION.East:
self.verticalWalls[i][j+1] = isBlocked
return 0
# ******************************************************************************
# Function Name : getNeighborCost
# Description : Retrieves the calculated cost of a neighboring cell on the map.
# Input : i: The row coordinate of the current cell on the map.
# : j: The column coordinate of the current cell on the map
# : dir: A Direction enumeration (North, South, East, West) indicating
# : which neighboring cell to retrieve the cost.
# Output : None
# Return : Positive float valued cost for the neighboring cell, -1 on error
# *****************************************************************************/
def getNeighborCost(self, i, j, dir):
if (i < 0 or i > 7 or j < 0 or j > 7):
print "ERROR (getNeighborCost): index out of range"
return -1
cellValue = 0
if dir == DIRECTION.North:
if (i == 0):
cellValue = 1000
else:
cellValue = self.costMap[i-1][j]
elif dir == DIRECTION.South:
if(i == 7):
cellValue = 1000
else:
cellValue = self.costMap[i+1][j]
elif dir == DIRECTION.West:
if (j == 0):
cellValue = 1000
else:
cellValue = self.costMap[i][j-1]
elif dir == DIRECTION.East:
if (j == 7):
cellValue = 1000
else:
cellValue = self.costMap[i][j+1]
return cellValue
# ******************************************************************************
# Function Name : setNeighborCost
# Description : Sets the calculated cost of a neighboring cell on the map.
# Input : i: The row coordinate of the current cell on the map.
# : j: The column coordinate of the current cell on the map
# : dir: A Direction enumeration (North, South, East, West) indicating
# : which neighboring cell to retrieve the cost.
# : val: Positive float valued cost for the neighboring cell
# Output : None
# Return : None
# *****************************************************************************/
def setNeighborCost(self, i, j, dir, val):
if (i < 0 or i > 7 or j < 0 or j > 7):
print "ERROR (setNeighborCost): index out of range, value not set"
return
if dir == DIRECTION.North:
if (i > 0):
self.costMap[i-1][j] = val
elif dir == DIRECTION.South:
if (i < 7):
self.costMap[i+1][j] = val
elif dir == DIRECTION.West:
if (j > 0):
self.costMap[i][j-1] = val
elif dir == DIRECTION.East:
if (j < 7):
self.costMap[i][j+1] = val
# ******************************************************************************
# Function Name : setCost
# Description : Used for map building, sets the calculated cost of a given map cell
# Input : i: The row coordinate of the current cell on the map.
# : j: The column coordinate of the current cell on the map
# : val: An integer value (0 to 1023) indicated the cost of a map cell
# Output : None
# Return : 0 if successful, -1 if i or j is out of map bounds
# *****************************************************************************/
def setCost(self, i, j, val):
if (i < 0 or i > 7 or j < 0 or j > 7):
print "ERROR (setCost): index out of range"
return -1
self.costMap[i][j] = val
return 0
# ******************************************************************************
# Function Name : getCost
# Description : Used for map building, gets the calculated cost of a given map cell
# Input : i: The row coordinate of the current cell on the map.
# : j: The column coordinate of the current cell on the map
# Output : None
# Return : cost >= 0 if successful, -1 if i or j is out of map bounds
# *****************************************************************************/
def getCost(self, i, j):
if (i < 0 or i > 7 or j < 0 or j > 7):
print "ERROR (getCost): index out of range"
return -1
return self.costMap[i][j]
# ******************************************************************************
# Function Name : clearCostMap
# Description : Sets all of the values in the cost map to 0
# Input : None
# Output : None
# Return : None
# *****************************************************************************/
def clearCostMap(self):
for i in xrange(8):
for j in xrange(8):
self.costMap[i][j] = 0
# ******************************************************************************
# Function Name : clearObstacleMap
# Description : Sets all of the values in the obstacle map to 0
# Input : None
# Output : None
# Return : None
# *****************************************************************************/
def clearObstacleMap(self):
for i in xrange(8):
for j in xrange(9):
self.verticalWalls[i][j] = 0
for i in xrange(9):
for j in xrange(8):
self.horizontalWalls[i][j] = 0
# ******************************************************************************
# Function Name : printCostMap
# Description : When connected to a terminal, will print out the 8x8 cost map
# Input : None
# Output : None
# Return : None
# *****************************************************************************/
def printCostMap(self):
print "Cost Map:"
for i in xrange(8):
for j in xrange(8):
print(str(self.costMap[i][j])),
print " "
# ******************************************************************************
# Function Name : printObstacleMap
# Description : When connected to a terminal, will print out the 8x8 obstacle map
# Input : None
# Output : None
# Return : None
# *****************************************************************************/
def printObstacleMap(self):
print "Obstacle Map: "
for i in xrange(8):
for j in xrange(8):
if (self.horizontalWalls[i][j] == 0):
if i == 0:
sys.stdout.write(" ---")
else:
sys.stdout.write(" ")
else:
sys.stdout.write(" ---")
print " "
for j in xrange(8):
if (self.verticalWalls[i][j] == 0):
if j == 7:
sys.stdout.write(" O |")
elif j == 0:
sys.stdout.write("| O ")
else:
sys.stdout.write(" O ")
else:
if j == 7:
sys.stdout.write("| O |")
else:
sys.stdout.write("| O ")
print " "
for j in xrange(8):
sys.stdout.write(" ---")
print " "
# ******************************************************************************
# Function Name : getCostmapSize
# Description : Retrieve the size of a given dimension of the costmap
# Input : bool xDim (true for x dimension, false for y dimension)
# Output : None
# Return : costmap size in the requested dimension
# *****************************************************************************/
def getCostmapSize(self, xDim):
if (xDim):
return self.costmap_size_x
else:
return self.costmap_size_y
# ******************************************************************************
# Function Name : getObstacleMapSize
# Description : Retrieve the size of a given dimension of the Obstacle Map
# Input : bool xDim (true for x dimension, false for y dimension)
# Output : None
# Return : obstacle map size in the requested dimension
# *****************************************************************************/
def getObstacleMapSize(self, xDim):
if xDim:
return self.obstacle_size_x
else:
return self.obstacle_size_y
|
|
# Copyright (c) 2006-2021 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r"""(simple but) rich text formatting tools
Usage:
>>> t = Text('this ', 'is a ', Tag('em', 'very'), Text(' rich', ' text'))
>>> print(t.render_as('latex'))
this is a \emph{very} rich text
>>> print(str(t))
this is a very rich text
>>> t = t.capitalize().add_period()
>>> print(t.render_as('latex'))
This is a \emph{very} rich text.
>>> print(str(t))
This is a very rich text.
>>> print(Symbol('ndash').render_as('latex'))
--
>>> t = Text('Some ', Tag('em', Text('nested ', Tag('tt', 'Text', Text(' objects')))), '.')
>>> print(t.render_as('latex'))
Some \emph{nested \texttt{Text objects}}.
>>> print(str(t))
Some nested Text objects.
>>> t = t.upper()
>>> print(t.render_as('latex'))
SOME \emph{NESTED \texttt{TEXT OBJECTS}}.
>>> print(str(t))
SOME NESTED TEXT OBJECTS.
>>> t = Text(', ').join(['one', 'two', Tag('em', 'three')])
>>> print(t.render_as('latex'))
one, two, \emph{three}
>>> print(str(t))
one, two, three
>>> t = Text(Symbol('nbsp')).join(['one', 'two', Tag('em', 'three')])
>>> print(t.render_as('latex'))
one~two~\emph{three}
>>> print(str(t))
one<nbsp>two<nbsp>three
"""
from __future__ import absolute_import, unicode_literals
import itertools
import warnings
from abc import ABCMeta, abstractmethod
from pybtex import textutils
from pybtex.utils import collect_iterable, deprecated
# workaround for doctests in Python 2/3
def str_repr(string):
"""
>>> print(str_repr('test'))
'test'
>>> print(str_repr(u'test'))
'test'
"""
result = repr(string)
if result.startswith('u'):
return result[1:]
else:
return result
def ensure_text(value):
if isinstance(value, str):
return String(value)
elif isinstance(value, BaseText):
return value
else:
bad_type = type(value).__name__
raise ValueError('parts must be strings or BaseText instances, not ' + bad_type)
class BaseText(object):
__metaclass__ = ABCMeta
@abstractmethod
def __str__(self):
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
return not self == other
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def __contains__(self, item):
raise NotImplementedError
@abstractmethod
def __getitem__(self, key):
raise NotImplementedError
def __add__(self, other):
"""
Concatenate this Text with another Text or string.
>>> Text('Longcat is ') + Tag('em', 'long')
Text('Longcat is ', Tag('em', 'long'))
"""
return Text(self, other)
def append(self, text):
"""
Append text to the end of this text.
Normally, this is the same as concatenating texts with +,
but for tags and similar objects the appended text is placed _inside_ the tag.
>>> text = Tag('em', 'Look here')
>>> print((text + '!').render_as('html'))
<em>Look here</em>!
>>> print(text.append('!').render_as('html'))
<em>Look here!</em>
"""
return self + text
def join(self, parts):
"""Join a list using this text (like string.join)
>>> letters = ['a', 'b', 'c']
>>> print(str(String('-').join(letters)))
a-b-c
>>> print(str(String('-').join(iter(letters))))
a-b-c
"""
if not parts:
return Text()
joined = []
for part in parts:
if joined:
joined.append(self)
joined.append(part)
return Text(*joined)
@abstractmethod
def split(self, sep=None, keep_empty_parts=None):
raise NotImplementedError
@abstractmethod
def startswith(self, prefix):
"""
Return True if string starts with the prefix,
otherwise return False.
prefix can also be a tuple of suffixes to look for.
"""
raise NotImplementedError
@abstractmethod
def endswith(self, suffix):
"""
Return True if the string ends with the specified suffix,
otherwise return False.
suffix can also be a tuple of suffixes to look for.
"""
raise NotImplementedError
@abstractmethod
def isalpha(self):
raise NotImplementedError
def add_period(self, period='.'):
"""
Add a period to the end of text, if the last character is not ".", "!" or "?".
>>> text = Text("That's all, folks")
>>> print(str(text.add_period()))
That's all, folks.
>>> text = Text("That's all, folks!")
>>> print(str(text.add_period()))
That's all, folks!
"""
if self and not textutils.is_terminated(self):
return self.append(period)
else:
return self
def abbreviate(self):
def abbreviate_word(word):
if word.isalpha():
return word[0].add_period()
else:
return word
parts = self.split(textutils.delimiter_re)
return String('').join(abbreviate_word(part) for part in parts)
def capfirst(self):
"""
Capitalize the first letter of the text.
>>> Text(Tag('em', 'long Cat')).capfirst()
Text(Tag('em', 'Long Cat'))
"""
return self[:1].upper() + self[1:]
def capitalize(self):
"""
Capitalize the first letter of the text and lowercase the rest.
>>> Text(Tag('em', 'LONG CAT')).capitalize()
Text(Tag('em', 'Long cat'))
"""
return self[:1].upper() + self[1:].lower()
@abstractmethod
def lower(self):
raise NotImplementedError
@abstractmethod
def upper(self):
raise NotImplementedError
@abstractmethod
def render(self, backend):
raise NotImplementedError
def render_as(self, backend_name):
r"""
Render this :py:class:`Text` into markup.
This is a wrapper method that loads a formatting backend plugin
and calls :py:meth:`Text.render`.
>>> text = Text('Longcat is ', Tag('em', 'looooooong'), '!')
>>> print(text.render_as('html'))
Longcat is <em>looooooong</em>!
>>> print(text.render_as('latex'))
Longcat is \emph{looooooong}!
>>> print(text.render_as('text'))
Longcat is looooooong!
:param backend_name: The name of the output backend (like ``"latex"`` or
``"html"``).
"""
from pybtex.plugin import find_plugin
backend_cls = find_plugin('pybtex.backends', backend_name)
return self.render(backend_cls())
def _unpack(self):
"""
For Text object, iterate over all text parts.
Else, yield the object itself.
Used for unpacking Text objects passed as children to another Text object.
"""
yield self
def _typeinfo(self):
"""
Return the type of this object and its parameters
(not including the actual text content).
Used for:
- merging similar tags together (<em>A</em><em>B</em> -> <em>AB</em>),
- creating similar text objects with different text content.
"""
return None, ()
class BaseMultipartText(BaseText):
info = ()
def __init__(self, *parts):
"""Create a text object consisting of one or more parts.
Empty parts are ignored:
>>> Text() == Text('') == Text('', '', '')
True
>>> Text('Word', '') == Text('Word')
True
Text() objects are unpacked and their children are included directly:
>>> Text(Text('Multi', ' '), Tag('em', 'part'), Text(' ', Text('text!')))
Text('Multi ', Tag('em', 'part'), ' text!')
>>> Tag('strong', Text('Multi', ' '), Tag('em', 'part'), Text(' ', 'text!'))
Tag('strong', 'Multi ', Tag('em', 'part'), ' text!')
Similar objects are merged together:
>>> Text('Multi', Tag('em', 'part'), Text(Tag('em', ' ', 'text!')))
Text('Multi', Tag('em', 'part text!'))
>>> Text('Please ', HRef('/', 'click'), HRef('/', ' here'), '.')
Text('Please ', HRef('/', 'click here'), '.')
"""
parts = (ensure_text(part) for part in parts)
nonempty_parts = (part for part in parts if part)
unpacked_parts = itertools.chain(*[part._unpack() for part in nonempty_parts])
merged_parts = self._merge_similar(unpacked_parts)
self.parts = list(merged_parts)
self.length = sum(len(part) for part in self.parts)
def __str__(self):
return ''.join(str(part) for part in self.parts)
def __eq__(self, other):
"""
Rich text objects support equality comparison:
>>> Text('Cat') == Text('cat')
False
>>> Text('Cat') == Text('Cat')
True
"""
return (
isinstance(other, BaseText) and
self._typeinfo() == other._typeinfo() and
self.parts == other.parts
)
def __len__(self):
"""
``len(text)`` returns the number of characters in the text, ignoring
the markup:
>>> len(Text('Long cat'))
8
>>> len(Text(Tag('em', 'Long'), ' cat'))
8
>>> len(Text(HRef('http://example.com/', 'Long'), ' cat'))
8
"""
return self.length
def __contains__(self, item):
"""
``value in text`` returns ``True`` if any part of the ``text``
contains the substring ``value``:
>>> 'Long cat' in Text('Long cat!')
True
Substrings splitted across multiple text parts are not matched:
>>> 'Long cat' in Text(Tag('em', 'Long'), 'cat!')
False
"""
if not isinstance(item, str):
raise TypeError(item)
return not item or any(part.__contains__(item) for part in self.parts)
def __getitem__(self, key):
"""
Slicing and extracting characters works like with regular strings,
formatting is preserved.
>>> Text('Longcat is ', Tag('em', 'looooooong!'))[:15]
Text('Longcat is ', Tag('em', 'looo'))
>>> Text('Longcat is ', Tag('em', 'looooooong!'))[-1]
Text(Tag('em', '!'))
"""
if isinstance(key, int):
start = key
end = None
elif isinstance(key, slice):
start, end, step = key.indices(len(self))
if step != 1:
raise NotImplementedError
else:
raise TypeError(key, type(key))
if start < 0:
start = len(self) + start
if end is None:
end = start + 1
if end < 0:
end = len(self) + end
return self._slice_end(len(self) - start)._slice_beginning(end - start)
def _slice_beginning(self, slice_length):
"""
Return a text consistng of the first slice_length characters
of this text (with formatting preserved).
"""
parts = []
length = 0
for part in self.parts:
if length + len(part) > slice_length:
parts.append(part[:slice_length - length])
break
else:
parts.append(part)
length += len(part)
return self._create_similar(parts)
def _slice_end(self, slice_length):
"""
Return a text consistng of the last slice_length characters
of this text (with formatting preserved).
"""
parts = []
length = 0
for part in reversed(self.parts):
if length + len(part) > slice_length:
parts.append(part[len(part) - (slice_length - length):])
break
else:
parts.append(part)
length += len(part)
return self._create_similar(reversed(parts))
def append(self, text):
"""
Append text to the end of this text.
For Tags, HRefs, etc. the appended text is placed *inside* the tag.
>>> text = Tag('strong', 'Chuck Norris')
>>> print((text + ' wins!').render_as('html'))
<strong>Chuck Norris</strong> wins!
>>> print(text.append(' wins!').render_as('html'))
<strong>Chuck Norris wins!</strong>
"""
return self._create_similar(self.parts + [text])
@collect_iterable
def split(self, sep=None, keep_empty_parts=None):
"""
>>> Text('a + b').split()
[Text('a'), Text('+'), Text('b')]
>>> Text('a, b').split(', ')
[Text('a'), Text('b')]
"""
if keep_empty_parts is None:
keep_empty_parts = sep is not None
tail = [''] if keep_empty_parts else []
for part in self.parts:
split_part = part.split(sep, keep_empty_parts=True)
if not split_part:
continue
for item in split_part[:-1]:
if tail:
yield self._create_similar(tail + [item])
tail = []
else:
if item or keep_empty_parts:
yield self._create_similar([item])
tail.append(split_part[-1])
if tail:
tail_text = self._create_similar(tail)
if tail_text or keep_empty_parts:
yield tail_text
def startswith(self, prefix):
"""
Return True if the text starts with the given prefix.
>>> Text('Longcat!').startswith('Longcat')
True
Prefixes split across multiple parts are not matched:
>>> Text(Tag('em', 'Long'), 'cat!').startswith('Longcat')
False
"""
if not self.parts:
return False
else:
return self.parts[0].startswith(prefix)
def endswith(self, suffix):
"""
Return True if the text ends with the given suffix.
>>> Text('Longcat!').endswith('cat!')
True
Suffixes split across multiple parts are not matched:
>>> Text('Long', Tag('em', 'cat'), '!').endswith('cat!')
False
"""
if not self.parts:
return False
else:
return self.parts[-1].endswith(suffix)
def isalpha(self):
"""
Return True if all characters in the string are alphabetic and there is
at least one character, False otherwise.
"""
return bool(self) and all(part.isalpha() for part in self.parts)
def lower(self):
"""
Convert rich text to lowercase.
>>> Text(Tag('em', 'Long cat')).lower()
Text(Tag('em', 'long cat'))
"""
return self._create_similar(part.lower() for part in self.parts)
def upper(self):
"""
Convert rich text to uppsercase.
>>> Text(Tag('em', 'Long cat')).upper()
Text(Tag('em', 'LONG CAT'))
"""
return self._create_similar(part.upper() for part in self.parts)
def render(self, backend):
"""
Render this :py:class:`Text` into markup.
:param backend: The formatting backend (an instance of
:py:class:`pybtex.backends.BaseBackend`).
"""
rendered_list = [part.render(backend) for part in self.parts]
assert all(isinstance(item, backend.RenderType)
for item in rendered_list)
return backend.render_sequence(rendered_list)
def _typeinfo(self):
"""Return the type and the parameters used to create this text object.
>>> text = Tag('strong', 'Heavy rain!')
>>> text._typeinfo() == (Tag, ('strong',))
True
"""
return type(self), self.info
def _create_similar(self, parts):
"""
Create a new text object of the same type with the same parameters,
with different text content.
>>> text = Tag('strong', 'Bananas!')
>>> text._create_similar(['Apples!'])
Tag('strong', 'Apples!')
"""
cls, cls_args = self._typeinfo()
args = list(cls_args) + list(parts)
return cls(*args)
def _merge_similar(self, parts):
"""Merge adjacent text objects with the same type and parameters together.
>>> text = Text()
>>> parts = [Tag('em', 'Breaking'), Tag('em', ' '), Tag('em', 'news!')]
>>> list(text._merge_similar(parts))
[Tag('em', 'Breaking news!')]
"""
groups = itertools.groupby(parts, lambda value: value._typeinfo())
for typeinfo, group in groups:
cls, info = typeinfo
group = list(group)
if cls and len(group) > 1:
group_parts = itertools.chain(*(text.parts for text in group))
args = list(info) + list(group_parts)
yield cls(*args)
else:
for text in group:
yield text
@deprecated('0.19', 'use __unicode__() instead')
def plaintext(self):
return str(self)
@deprecated('0.19')
def enumerate(self):
for n, child in enumerate(self.parts):
try:
for p in child.enumerate():
yield p
except AttributeError:
yield self, n
@deprecated('0.19')
def reversed(self):
for n, child in reversed(list(enumerate(self.parts))):
try:
for p in child.reversed():
yield p
except AttributeError:
yield self, n
@deprecated('0.19', 'use slicing instead')
def get_beginning(self):
try:
l, i = next(self.enumerate())
except StopIteration:
pass
else:
return l.parts[i]
@deprecated('0.19', 'use slicing instead')
def get_end(self):
try:
l, i = next(self.reversed())
except StopIteration:
pass
else:
return l.parts[i]
@deprecated('0.19', 'use slicing instead')
def apply_to_start(self, f):
return self.map(f, lambda index, length: index == 0)
@deprecated('0.19', 'use slicing instead')
def apply_to_end(self, f):
return self.map(f, lambda index, length: index == length - 1)
@deprecated('0.19')
def map(self, f, condition=None):
if condition is None:
condition = lambda index, length: True
def iter_map_with_condition():
length = len(self)
for index, child in enumerate(self.parts):
if hasattr(child, 'map'):
yield child.map(f, condition) if condition(index, length) else child
else:
yield f(child) if condition(index, length) else child
return self._create_similar(iter_map_with_condition())
class String(BaseText):
"""
A :py:class:`String` is a wrapper for a plain Python string.
>>> from pybtex.richtext import String
>>> print(String('Crime & Punishment').render_as('text'))
Crime & Punishment
>>> print(String('Crime & Punishment').render_as('html'))
Crime & Punishment
:py:class:`String` supports the same methods as :py:class:`Text`.
"""
def __init__(self, *parts):
"""
All arguments must be plain unicode strings.
Arguments are concatenated together.
>>> print(str(String('November', ', ', 'December', '.')))
November, December.
"""
self.value = ''.join(parts)
def __repr__(self):
return str_repr(self.value)
def __str__(self):
return str(self.value)
def __eq__(self, other):
"""
Compare two :py:class:`.String` objects.
"""
return type(other) == type(self) and self.value == other.value
def __len__(self):
return self.value.__len__()
def __contains__(self, item):
return self.value.__contains__(item)
def __getitem__(self, index):
return String(self.value.__getitem__(index))
def __add__(self, other):
return BaseText.__add__(self, other)
def split(self, sep=None, keep_empty_parts=None):
if keep_empty_parts is None:
keep_empty_parts = sep is not None
if sep is None:
from .textutils import whitespace_re
parts = whitespace_re.split(self.value)
elif isinstance(sep, str):
parts = self.value.split(sep)
else:
try:
split_method = sep.split
except AttributeError:
raise TypeError('sep must be None, string or compiled regular expression')
else:
parts = split_method(self.value)
return [String(part) for part in parts if part or keep_empty_parts]
def startswith(self, prefix):
"""
Return True if string starts with the prefix,
otherwise return False.
prefix can also be a tuple of suffixes to look for.
"""
return self.value.startswith(prefix)
def endswith(self, suffix):
"""
Return True if the string ends with the specified suffix,
otherwise return False.
suffix can also be a tuple of suffixes to look for.
return self.value.endswith(text)
"""
return self.value.endswith(suffix)
def isalpha(self):
return self.value.isalpha()
def lower(self):
return String(self.value.lower())
def upper(self):
return String(self.value.upper())
@property
def parts(self):
return [str(self)]
def _typeinfo(self):
return String, ()
def render(self, backend):
return backend.format_str(self.value)
class Text(BaseMultipartText):
"""
The :py:class:`Text` class is the top level container that may contain
:py:class:`String`, :py:class:`Tag` or :py:class:`HRef` objects.
"""
def __repr__(self):
return 'Text({})'.format(', '.join(repr(part) for part in self.parts))
def _unpack(self):
for part in self.parts:
yield part
@classmethod
def from_latex(cls, latex):
import codecs
import latexcodec # noqa
from pybtex.markup import LaTeXParser
return LaTeXParser(codecs.decode(latex, 'ulatex')).parse()
class Tag(BaseMultipartText):
r"""
A :py:class:`Tag` represents something like an HTML tag
or a LaTeX formatting command:
>>> from pybtex.richtext import Tag
>>> tag = Tag('em', 'The TeXbook')
>>> print(tag.render_as('html'))
<em>The TeXbook</em>
>>> print(tag.render_as('latex'))
\emph{The TeXbook}
:py:class:`Tag` supports the same methods as :py:class:`Text`.
"""
def __check_name(self, name):
depr_map = {}
depr_map[u'emph'] = u'em'
if name in depr_map:
msg = u"The tag '%s' is deprecated" % name
msg += u", use '%s' instead." % depr_map[name]
warnings.warn(msg, DeprecationWarning, stacklevel=3)
return depr_map[name]
return name
def __init__(self, name, *args):
if not isinstance(name, (str, Text)):
raise ValueError(
"name must be str or Text (got %s)" % name.__class__.__name__)
self.name = self.__check_name(str(name))
self.info = self.name,
super(Tag, self).__init__(*args)
def __repr__(self):
if self.parts:
reprparts = ', '.join(repr(part) for part in self.parts)
return 'Tag({}, {})'.format(str_repr(self.name), reprparts)
else:
return 'Tag({})'.format(str_repr(self.name))
def render(self, backend):
text = super(Tag, self).render(backend)
return backend.format_tag(self.name, text)
class HRef(BaseMultipartText):
"""
A :py:class:`HRef` represends a hyperlink:
>>> from pybtex.richtext import Tag
>>> href = HRef('http://ctan.org/', 'CTAN')
>>> print(href.render_as('html'))
<a href="http://ctan.org/">CTAN</a>
>>> print(href.render_as('latex'))
\\href{http://ctan.org/}{CTAN}
>>> href = HRef(String('http://ctan.org/'), String('http://ctan.org/'))
>>> print(href.render_as('latex'))
\\url{http://ctan.org/}
:py:class:`HRef` supports the same methods as :py:class:`Text`.
"""
def __init__(self, url, *args, external=False):
if not isinstance(url, (str, BaseText)):
raise ValueError(
"url must be str or Text (got %s)" % url.__class__.__name__)
self.url = str(url)
self.info = self.url,
self.external = external
super(HRef, self).__init__(*args)
def __repr__(self):
reprparts = ', '.join(repr(part) for part in self.parts)
return 'HRef({}, {})'.format(str_repr(self.url), reprparts)
def render(self, backend):
text = super(HRef, self).render(backend)
return backend.format_href(self.url, text, self.external)
class Protected(BaseMultipartText):
r"""
A :py:class:`Protected` represents a "protected" piece of text.
- :py:meth:`Protected.lower`, :py:meth:`Protected.upper`,
:py:meth:`Protected.capitalize`, and :py:meth:`Protected.capitalize()`
are no-ops and just return the :py:class:`Protected` object itself.
- :py:meth:`Protected.split` never splits the text. It always returns a
one-element list containing the :py:class:`Protected` object itself.
- In LaTeX output, :py:class:`Protected` is {surrounded by braces}. HTML
and plain text backends just output the text as-is.
>>> from pybtex.richtext import Protected
>>> text = Protected('The CTAN archive')
>>> text.lower()
Protected('The CTAN archive')
>>> text.split()
[Protected('The CTAN archive')]
>>> print(text.render_as('latex'))
{The CTAN archive}
>>> print(text.render_as('html'))
<span class="bibtex-protected">The CTAN archive</span>
.. versionadded:: 0.20
"""
def __init__(self, *args):
super(Protected, self).__init__(*args)
def __repr__(self):
reprparts = ', '.join(repr(part) for part in self.parts)
return 'Protected({})'.format(reprparts)
def capfirst(self):
return self
def capitalize(self):
return self
def lower(self):
return self
def upper(self):
return self
def split(self, sep=None, keep_empty_parts=None):
return [self]
def render(self, backend):
text = super(Protected, self).render(backend)
return backend.format_protected(text)
class Symbol(BaseText):
"""A special symbol. This class is rarely used and may be removed in
future versions.
Examples of special symbols are non-breaking spaces and dashes.
:py:class:`Symbol` supports the same methods as :py:class:`Text`.
"""
def __init__(self, name):
self.name = name
self.info = self.name,
def __len__(self):
return 1
def __repr__(self):
return "Symbol(%s)" % str_repr(self.name)
def __str__(self):
# XXX
return u'<%s>' % self.name
def __eq__(self, other):
return self.name == other.name
def __contains__(self, item):
return False
def __getitem__(self, index):
# mimic the behavior of a 1-element string
try:
result = 'a'[index]
except IndexError:
raise IndexError('richtext.Symbol index out of range')
else:
return self if result else String()
def split(self, sep=None, keep_empty_parts=None):
return [self]
def startswith(self, text):
return False
def endswith(self, text):
return False
def isalpha(self):
return False
def render(self, backend):
return backend.symbols[self.name]
def upper(self):
return self
def lower(self):
return self
nbsp = Symbol('nbsp')
|
|
"""Test an Edition that tracks LSST document releases (``lsst_doc``)."""
from __future__ import annotations
from typing import TYPE_CHECKING
from keeper.testutils import MockTaskQueue
if TYPE_CHECKING:
from unittest.mock import Mock
from keeper.testutils import TestClient
def test_lsst_doc_edition(client: TestClient, mocker: Mock) -> None:
"""Test an edition that tracks LSST Doc semantic versions.
1. Create a build on `master`; it should be tracked because the LSST_DOC
mode tracks master if a semantic version tag hasn't been pushed yet.
2. Create a ticket branch; it isn't tracked.
3. Create a v1.0 build; it is tracked.
4. Create another build on `master`; it isn't tracked because we already
have the v1.0 build.
5. Create a v0.9 build that is not tracked because it's older.
6. Create a v1.1 build that **is** tracked because it's newer.
"""
task_queue = mocker.patch(
"keeper.taskrunner.inspect_task_queue", return_value=None
)
task_queue = MockTaskQueue(mocker)
# Create default organization
from keeper.models import Organization, db
org = Organization(
slug="test",
title="Test",
root_domain="lsst.io",
fastly_domain="global.ssl.fastly.net",
bucket_name="bucket-name",
)
db.session.add(org)
db.session.commit()
# ========================================================================
# Add product /products/ldm-151
mocker.resetall()
p1_data = {
"slug": "ldm-151",
"doc_repo": "https://github.com/lsst/LDM-151",
"main_mode": "lsst_doc",
"title": "Applications Design",
"root_domain": "lsst.io",
"root_fastly_domain": "global.ssl.fastly.net",
"bucket_name": "bucket-name",
}
r = client.post("/products/", p1_data)
task_queue.apply_task_side_effects()
p1_url = r.headers["Location"]
assert r.status == 201
# ========================================================================
# Get the URL for the default edition
r = client.get(p1_url + "/editions/")
main_edition_url = sorted(r.json["editions"])[0]
assert main_edition_url == "http://example.test/editions/1"
# ========================================================================
# Create a build on 'master'
mocker.resetall()
b1_data = {
"slug": "b1",
"github_requester": "jonathansick",
"git_refs": ["master"],
}
r = client.post("/products/ldm-151/builds/", b1_data)
task_queue.apply_task_side_effects()
b1_url = r.headers["Location"]
# ========================================================================
# Confirm build on 'master'
mocker.resetall()
r = client.patch(b1_url, {"uploaded": True})
task_queue.apply_task_side_effects()
task_queue.assert_edition_build_v1(main_edition_url, b1_url)
# The 'master' edition was also automatically created to track master.
r = client.get(p1_url + "/editions/")
master_edition_url = sorted(r.json["editions"])[1]
task_queue.assert_edition_build_v1(master_edition_url, b1_url)
# Check that it's tracking the master branch
r = client.get(master_edition_url)
assert r.json["mode"] == "git_refs"
assert r.json["slug"] == "master"
assert r.json["title"] == "master"
assert r.json["tracked_refs"] == ["master"]
# Test that the main edition updated because there are no builds yet
# with semantic versions
r = client.get(main_edition_url)
assert r.json["build_url"] == b1_url
assert r.json["pending_rebuild"] is False
# ========================================================================
# Create a ticket branch build
mocker.resetall()
b2_data = {
"slug": "b2",
"github_requester": "jonathansick",
"git_refs": ["tickets/DM-1"],
}
r = client.post("/products/ldm-151/builds/", b2_data)
task_queue.apply_task_side_effects()
b2_url = r.headers["Location"]
# ========================================================================
# Confirm ticket branch build
mocker.resetall()
r = client.patch(b2_url, {"uploaded": True})
task_queue.apply_task_side_effects()
task_queue.assert_edition_build_v1(
"http://example.test/editions/3", b2_url
)
# Test that the main edition *did not* update because this build is
# neither for master not a semantic version.
# with semantic versions
r = client.get(main_edition_url)
assert r.json["build_url"] == b1_url
# ========================================================================
# Create a build with a semantic version tag.
mocker.resetall()
b3_data = {
"slug": "b3",
"github_requester": "jonathansick",
"git_refs": ["v1.0"],
}
r = client.post("/products/ldm-151/builds/", b3_data)
task_queue.apply_task_side_effects()
b3_url = r.headers["Location"]
# ========================================================================
# Confirm v1.0 build
mocker.resetall()
r = client.patch(b3_url, {"uploaded": True})
task_queue.apply_task_side_effects()
task_queue.assert_edition_build_v1(
"http://example.test/editions/1", b3_url
)
task_queue.assert_edition_build_v1(
"http://example.test/editions/4", b3_url
)
# Test that the main edition updated
r = client.get(main_edition_url)
assert r.json["build_url"] == b3_url
# Test that the v1-0 edition updated
r = client.get("http://example.test/editions/4")
assert r.json["build_url"] == b3_url
# ========================================================================
# Create another build on 'master'
mocker.resetall()
b4_data = {
"slug": "b4",
"github_requester": "jonathansick",
"git_refs": ["master"],
}
r = client.post("/products/ldm-151/builds/", b4_data)
task_queue.apply_task_side_effects()
b4_url = r.headers["Location"]
# ========================================================================
# Confirm master build
mocker.resetall()
r = client.patch(b4_url, {"uploaded": True})
task_queue.apply_task_side_effects()
# Test that the main edition *did not* update because now it's sticking
# to only show semantic versions.
r = client.get(main_edition_url)
assert r.json["build_url"] == b3_url
# Test that the **master** edition did update, though
r = client.get(master_edition_url)
assert r.json["build_url"] == b4_url
# ========================================================================
# Create a build with a **older** semantic version tag.
mocker.resetall()
b5_data = {
"slug": "b5",
"github_requester": "jonathansick",
"git_refs": ["v0.9"],
}
r = client.post("/products/ldm-151/builds/", b5_data)
task_queue.apply_task_side_effects()
b5_url = r.headers["Location"]
# ========================================================================
# Confirm v0.9 build
mocker.resetall()
r = client.patch(b5_url, {"uploaded": True})
task_queue.apply_task_side_effects()
# Test that the main edition *did not* update b/c it's older
r = client.get(main_edition_url)
assert r.json["build_url"] == b3_url
# ========================================================================
# Create a build with a **newer** semantic version tag.
mocker.resetall()
b6_data = {
"slug": "b6",
"github_requester": "jonathansick",
"git_refs": ["v1.1"],
}
r = client.post("/products/ldm-151/builds/", b6_data)
task_queue.apply_task_side_effects()
b6_url = r.headers["Location"]
mocker.resetall()
r = client.patch(b6_url, {"uploaded": True})
task_queue.apply_task_side_effects()
# Test that the main edition updated
r = client.get(main_edition_url)
assert r.json["build_url"] == b6_url
task_queue.assert_edition_build_v1(
"http://example.test/editions/1", b6_url
)
task_queue.assert_edition_build_v1(
"http://example.test/editions/6", b6_url
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WebApplicationFirewallPoliciesOperations:
"""WebApplicationFirewallPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.WebApplicationFirewallPolicyListResult"]:
"""Lists all of the protection policies within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.WebApplicationFirewallPolicyListResult"]:
"""Gets all the WAF policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
async def get(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> "_models.WebApplicationFirewallPolicy":
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
policy_name: str,
parameters: "_models.WebApplicationFirewallPolicy",
**kwargs: Any
) -> "_models.WebApplicationFirewallPolicy":
"""Creates or update policy with specified rule set name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param parameters: Policy to be created.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.WebApplicationFirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
|
|
import os
import base64
from cryptography.hazmat.backends import default_backend
import cryptography.hazmat.primitives.hashes as c_hashes
import cryptography.hazmat.primitives.asymmetric.rsa as c_rsa
import cryptography.hazmat.primitives.asymmetric.padding as c_padding
import cryptography.hazmat.primitives.serialization as c_serialization
from cryptography.exceptions import InvalidSignature
import synapse.glob as s_glob
import synapse.cortex as s_cortex
import synapse.lib.cache as c_cache
from synapse.common import *
from synapse.eventbus import EventBus
'''
Custom PKI API / objects for use in synapse.
Glossary:
* Token - {
'iden':<guid>,
'name':<name>, # humon readable name (aka "who")
'root':<int>, # set to 1 for root cert
'pubkey':<byts>, # DER encoded public key bytes
'can':{ <tag>:True, ... }, # rights granted to the token
'issued':<time> # epoch time when embedded in cert and signed
}
* Certificate - ( <byts>, { # msgpack bytes (most often a token)
'signs':( (<iden>,<sign>), ... ),
'certs':( <cert>, ... ),
}
)
'''
backend = default_backend()
c_sha1 = c_hashes.SHA1()
c_sha256 = c_hashes.SHA256()
c_oaep_sha1 = c_padding.OAEP(mgf=c_padding.MGF1(algorithm=c_sha1), algorithm=c_sha1, label=None)
c_pss_sha256 = c_padding.PSS(mgf=c_padding.MGF1(c_sha256),salt_length=c_padding.PSS.MAX_LENGTH)
homedir = os.path.expanduser('~')
pkipath = os.path.join(homedir,'.synpki')
pkicore = 'sqlite:///%s' % (pkipath,)
def getUserPki():
# TODO env var
# TODO agent?
'''
Return the current/default PkiStor for the current user.
'''
with s_glob.lock:
if s_glob.pki == None:
core = s_cortex.openurl(pkicore)
s_glob.pki = PkiStor(core)
return s_glob.pki
def obj2b64(obj):
return base64.b64encode( msgenpack(obj) )
def b642obj(txt):
return msgunpack( base64.b64decode(txt) )
def initTokenTufo(iden, pubkey, root=False, can=(), **props):
props['root'] = root
props['pubkey'] = pubkey
props['can'] = { c:True for c in can }
return (iden,props)
def initTokenCert(token):
byts = msgenpack(token)
return tufo(byts,signs=(),certs=())
def pubToDer(pub):
'''
DER encode an RSA public key
'''
return pub.public_bytes(
encoding=c_serialization.Encoding.DER,
format=c_serialization.PublicFormat.SubjectPublicKeyInfo,
)
def keyToDer(key):
'''
DER encode an RSA key
'''
return key.private_bytes(
encoding=c_serialization.Encoding.DER,
format=c_serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=c_serialization.NoEncryption()
)
def pubEncBytes(pub, byts):
return pub.encrypt(byts,c_oaep_sha1)
def keyDecBytes(key, byts):
return key.decrypt(byts,c_oaep_sha1)
def genRsaKey(bits=4096):
'''
Generate a new RSA key pair.
'''
return c_rsa.generate_private_key(public_exponent=65537, key_size=bits, backend=backend)
class PkiStor(EventBus):
'''
A PkiStor models public key authentication tokens using a cortex
and provides APIs for creating, verifying, and using tokens for AAA.
'''
def __init__(self, core):
EventBus.__init__(self)
self.core = core
self.keys = c_cache.Cache()
self.keys.setOnMiss( self._getRsaKey )
self.pubs = c_cache.Cache()
self.pubs.setOnMiss( self._getPubKey )
self.certs = c_cache.Cache()
self.certs.setOnMiss( self._getTokenCert )
self.tokens = c_cache.Cache()
self.tokens.setOnMiss( self._getTokenTufo )
core.onfini( self.keys.fini )
core.onfini( self.pubs.fini )
core.onfini( self.certs.fini )
core.onfini( self.tokens.fini )
model = core.genDataModel()
model.addTufoForm('syn:token', ptype='str', doc='synapse identity token (user/host)')
model.addTufoProp('syn:token', 'user', doc='humon readable user name for this token')
model.addTufoProp('syn:token', 'host', doc='humon readable host name for this token')
model.addTufoProp('syn:token', 'blob', doc='Base64 encoded token blob')
model.addTufoProp('syn:token', 'cert', doc='Base64 encoded certificate blob')
model.addTufoProp('syn:token', 'rsakey', doc='base64( der( rsa.private ) )')
def setTokenTufo(self, token, save=False):
'''
Add a trusted token tufo and optionally save.
Example:
pki.setTokenTufo(token,save=True)
'''
iden = token[0]
host = token[1].get('host')
user = token[1].get('user')
self.tokens.put(iden,token)
if save:
tokn = self.core.formTufoByProp('syn:token',iden)
b64blob = base64.b64encode( msgenpack( token ) )
props = dict(blob=b64blob)
if host != None:
props['host'] = host
if user != None:
props['user'] = user
self.core.setTufoProps(tokn, **props)
def getIdenByUser(self, user):
'''
Get user token iden by name.
'''
tokn = self.core.getTufoByProp('syn:token:user', user)
if tokn == None:
return None
return tokn[1].get('syn:token')
def getIdenByHost(self, host):
'''
Get host token iden by name.
'''
# FIXME cache
tokn = self.core.getTufoByProp('syn:token:host', host)
if tokn == None:
return None
return tokn[1].get('syn:token')
def getTokenTufo(self, iden):
'''
Return the tufo for the given token iden.
Example:
tokn = pki.getTokenTufo(iden)
'''
return self.tokens.get(iden)
def _getTokenTufo(self, iden):
tokn = self.core.getTufoByProp('syn:token',iden)
if tokn == None:
return None
blob = tokn[1].get('syn:token:blob')
if blob == None:
return None
return msgunpack( base64.b64decode( blob ) )
def getPubKey(self, iden):
'''
Retrieve the RSA public key for the given iden (or None).
'''
return self.pubs.get(iden)
def _getPubKey(self, iden):
# load all pubkeys from tokens
token = self.getTokenTufo(iden)
pubder = token[1].get('pubkey')
return c_serialization.load_der_public_key(pubder, backend)
def getRsaKey(self, iden):
'''
Retrieve the RSA private key for the given iden (or None).
Example:
rsakey = pki.getRsaKey(iden)
'''
return self.keys.get(iden)
def _getRsaKey(self, iden):
tokn = self.core.getTufoByProp('syn:token',iden)
if tokn == None:
return None
keyb64 = tokn[1].get('syn:token:rsakey')
if keyb64 == None:
return None
rsader = base64.b64decode(keyb64)
return c_serialization.load_der_private_key(rsader, password=None, backend=backend)
def setRsaKey(self, iden, key, save=False):
'''
Set the RSA private key for an iden and optionally save.
Example:
rsakey = genRsaKey(bits=4096)
pki.setRsaKey(iden,rsakey)
'''
self.keys.put(iden,key)
if not save:
return
tokn = self.core.formTufoByProp('syn:token',iden)
rsab64 = base64.b64encode( keyToDer( key ) )
props = {'syn:token:rsakey':rsab64}
self.core.setTufoProps(tokn,**props)
def genRootToken(self, bits=4096, save=False):
'''
Generate a new root token and optionally save.
Example:
tokn = self.genRootToken()
'''
key = genRsaKey(bits=bits)
pub = key.public_key()
iden = guid()
pubder = pubToDer(pub)
token = initTokenTufo(iden, pubder, root=True)
self.setRsaKey(iden,key,save=save)
self.setTokenTufo(token,save=save)
return token
def genHostToken(self, host, can=(), bits=4096, save=True):
'''
Generate a new host token with the specified capabilities.
Example:
tokn = pki.genHostToken('visi.kenshoto.com')
'''
iden = guid()
key = genRsaKey(bits=bits)
self.setRsaKey(iden, key, save=save)
pubder = pubToDer( key.public_key() )
token = initTokenTufo(iden, pubder, host=host, can=can)
self.setTokenTufo(token, save=save)
return token
def genUserToken(self, user, root=False, can=(), bits=4096, save=True):
'''
Generate a new user token with the specified capabilities.
Example:
tokn = pki.genUserToken('visi@kenshoto.com', can=('sign:cert','mesh:join'))
skey = pki.getUserKey( tokn[1].get('syntok') )
'''
iden = guid()
key = genRsaKey(bits=bits)
self.setRsaKey(iden, key, save=save)
pubder = pubToDer( key.public_key() )
token = initTokenTufo(iden, pubder, user=user, can=can, root=root)
self.setTokenTufo(token, save=save)
return token
def genTokenCert(self, token, signas=None, save=True):
'''
Generate and optionally sign a cert tuple for the given token.
Example:
cert = pki.genTokenCert(tokn, signas=iden)
Notes:
* See docs for synapse.lib.pki module for cert structure
'''
token[1]['cert:issued:at'] = int(time.time())
if signas != None:
token[1]['cert:issued:by'] = signas
cert = initTokenCert(token)
if signas != None:
cert = self.signTokenCert(signas,cert)
self.setTokenCert(token[0], cert, save=save)
return cert
def signTokenCert(self, iden, cert, save=True):
'''
Add a signature to the given certificate tuple.
Example:
cert = pki.signTokenCert(iden,cert)
'''
signs = cert[1].get('signs',())
certs = cert[1].get('certs',())
sign = self.genByteSign(iden,cert[0])
mcrt = self.getTokenCert(iden)
if mcrt != None:
cert[1]['certs'] = certs + (mcrt,)
cert[1]['signs'] = signs + ( (iden,sign), )
self.setTokenCert(iden, cert, save=save)
return cert
def genByteSign(self, iden, byts):
'''
Generate a signature for the given bytes by the specificed iden.
Example:
sign = pki.genByteSign(iden,byts)
Notes:
If no RSA key exists for iden, return None.
'''
key = self.getRsaKey(iden)
if key == None:
return None
signer = key.signer(c_pss_sha256,c_sha256)
signer.update(byts)
return signer.finalize()
def delTokenTufo(self, iden):
'''
Delete an entire token tufo by iden.
'''
tokn = self.core.formTufoByProp('syn:token', iden)
self.keys.pop(iden)
self.pubs.pop(iden)
self.certs.pop(iden)
self.tokens.pop(iden)
def loadCertToken(self, cert, save=False, force=False):
'''
Verify and load a the token within a certificate.
'''
for subcert in cert[1].get('certs'):
self.loadCertToken(subcert, save=save)
if force:
token = msgunpack(cert[0])
self.setTokenTufo(token, save=save)
return token
for iden,sign in cert[1].get('signs'):
token = self.getTokenTufo(iden)
if token == None:
continue
if not token[1].get('root') and not token[1].get('can',{}).get('sign:cert'):
continue
if not self.isValidSign(iden,sign,cert[0]):
continue
# it's a totally valid cert!
token = msgunpack(cert[0])
self.setTokenTufo(token, save=save)
return token
return None
def setTokenCert(self, iden, cert, save=True):
'''
Set a token cert in the PkiStor and optionally persist.
Example:
pki.setTokenCert(cert)
'''
self.certs.put(iden,cert)
if save:
b64bytes = base64.b64encode( msgenpack( cert ) )
tokn = self.core.formTufoByProp('syn:token', iden)
self.core.setTufoProps(tokn,cert=b64bytes)
def getTokenCert(self, iden):
'''
Retrieve a cert for the given iden.
Example:
cert = pki.getTokenCert(iden)
'''
return self.certs.get(iden)
def _getTokenCert(self, iden):
tokn = self.core.getTufoByProp('syn:token',iden)
if tokn == None:
return None
b64c = tokn[1].get('syn:token:cert')
if not b64c:
return None
byts = base64.b64decode(b64c)
cert = msgunpack(byts)
return cert
def isValidSign(self, iden, sign, byts):
'''
Check if the given signature is valid for the given bytes.
Example:
if not pki.isValidSign(iden, sign, byts):
bail()
'''
token = self.getTokenTufo(iden)
if token == None:
return False
# FIXME sign / optional can args
pub = self.getPubKey(iden)
verifier = pub.verifier(sign,c_pss_sha256,c_sha256)
verifier.update(byts)
try:
verifier.verify()
return True
except InvalidSignature as e:
return False
def iterTokenTufos(self):
'''
Yield each of the known token dictionaries in the PkiStor.
Example:
for tokn in stor.iterTokenTufos():
dostuff(tokn)
'''
for tokn in self.core.getTufosByProp('syn:token:blob'):
blob = tokn[1].get('syn:token:blob')
if not blob:
continue
yield b642obj(blob)
def encToIden(self, iden, byts):
'''
Encrypt the given bytes to the target iden's public key.
Notes:
* as usual this should be used to gen/pass symetric key...
'''
pub = self.pubs.get(iden)
if pub == None:
return None
return pubEncBytes(pub,byts)
def decToIden(self, iden, byts):
'''
Decrypt the given bytes which were sent to using iden's public key.
'''
key = self.keys.get(iden)
if key == None:
return None
return keyDecBytes(key,byts)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# Copyright 2018 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_bgp_policy_filter
short_description: Configures a BGP Policy Import/Export Rule
description:
- Use BGP to publish and consume routes from disparate networks.
author:
- Joshua Colson (@freakinhippie)
- Garfield Lee Freeman (@shinmog)
version_added: "2.9"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
notes:
- Checkmode is supported.
- Panorama is supported.
extends_documentation_fragment:
- panos.transitional_provider
- panos.full_template_support
options:
state:
description:
- Add or remove BGP Policy Filter.
- I(state=return-object) is deprecated and will be removed in 2.12.
choices:
- present
- absent
- return-object
default: 'present'
commit:
description:
- Commit configuration if changed.
default: True
type: bool
filter_type:
description:
- The type of filter.
choices:
- non-exist
- advertise
- suppress
required: True
policy_name:
description:
- The name of the policy object.
policy_type:
description:
- The type of policy object.
choices:
- conditional-advertisement
- aggregate
required: True
name:
description:
- Name of filter.
required: True
enable:
description:
- Enable filter.
default: True
type: bool
address_prefix:
description:
- List of address prefix strings or dicts with "name"/"exact" keys.
- Using the dict form for address prefixes should only be used with
I(policy_type=aggregate).
type: list
match_afi:
description:
- Address Family Identifier.
choices:
- ip
- ipv6
match_as_path_regex:
description:
- AS-path regular expression.
match_community_regex:
description:
- Community AS-path regular expression.
match_extended_community_regex:
description:
- Extended Community AS-path regular expression.
match_from_peer:
description:
- Filter by peer that sent this route.
match_med:
description:
- Multi-Exit Discriminator.
match_nexthop:
description:
- Next-hop attributes.
match_route_table:
description:
- Route table to match rule.
choices:
- unicast
- multicast
- both
match_safi:
description:
- Subsequent Address Family Identifier.
choices:
- ip
- ipv6
vr_name:
description:
- Name of the virtual router; it must already exist and have BGP configured.
- See M(panos_virtual_router).
default: default
'''
EXAMPLES = '''
'''
RETURN = '''
# Default return values
panos_obj:
description: a serialized policy filter is returned when state == 'return-object'
returned: success
type: string
sample: "LUFRPT14MW5xOEo1R09KVlBZNnpnemh0VHRBOWl6TGM9bXcwM3JHUGVhRlNiY0dCR0srNERUQT09"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.panos.panos import get_connection
from ansible.module_utils._text import to_text
try:
from pandevice.errors import PanDeviceError
from pandevice.network import VirtualRouter
from pandevice.network import Bgp
from pandevice.network import BgpPolicyAggregationAddress
from pandevice.network import BgpPolicyConditionalAdvertisement
from pandevice.network import BgpPolicyNonExistFilter
from pandevice.network import BgpPolicyAdvertiseFilter
from pandevice.network import BgpPolicySuppressFilter
from pandevice.network import BgpPolicyAddressPrefix
except ImportError:
pass
def purge_stale_prefixes(cur_filter, new_prefixes):
if cur_filter is None:
return
new_names = set(p.get('name') for p in new_prefixes if 'name' in p)
cur_names = set(p.name for p in cur_filter.findall(network.BgpPolicyAddressPrefix))
stale_prefixes = cur_names - new_names
for name in stale_prefixes:
cur_filter.find(name, network.BgpPolicyAddressPrefix).delete()
def setup_args():
return dict(
# TODO(gfreeman) - remove this later on and use the default state.
state=dict(
default='present', choices=['present', 'absent', 'return-object'],
help='Add or remove BGP Policy Filter'),
commit=dict(
type='bool', default=True,
help='Commit configuration if changed'),
vr_name=dict(
default='default',
help='Name of the virtual router; it must already exist; see panos_virtual_router'),
policy_type=dict(
type='str', required=True, choices=['conditional-advertisement', 'aggregate'],
help='The type of policy object'),
policy_name=dict(
type='str',
help='The name of the policy object'),
filter_type=dict(
type='str', required=True, choices=['non-exist', 'advertise', 'suppress'],
help='The type of filter'),
name=dict(
type='str', required=True,
help='Name of filter'),
enable=dict(
default=True, type='bool',
help='Enable filter'),
match_afi=dict(
type='str', choices=['ip', 'ipv6'],
help='Address Family Identifier'),
match_safi=dict(
type='str', choices=['ip', 'ipv6'],
help='Subsequent Address Family Identifier'),
match_route_table=dict(
type='str', default='unicast', choices=['unicast', 'multicast', 'both'],
help='Route table to match rule'),
match_nexthop=dict(
type='list',
help='Next-hop attributes'),
match_from_peer=dict(
type='list',
help='Filter by peer that sent this route'),
match_med=dict(
type='int',
help='Multi-Exit Discriminator'),
match_as_path_regex=dict(
type='str',
help='AS-path regular expression'),
match_community_regex=dict(
type='str',
help='Community AS-path regular expression'),
match_extended_community_regex=dict(
type='str',
help='Extended Community AS-path regular expression'),
address_prefix=dict(
type='list',
help='List of Address Prefix objects'),
)
def main():
helper = get_connection(
template=True,
template_stack=True,
with_classic_provider_spec=True,
argument_spec=setup_args(),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
parent = helper.get_pandevice_parent(module)
vr = VirtualRouter(module.params['vr_name'])
parent.add(vr)
try:
vr.refresh()
except PanDeviceError as e:
module.fail_json(msg='Failed refresh: {0}'.format(e))
bgp = vr.find('', Bgp)
if bgp is None:
module.fail_json(msg='BGP is not configured for virtual router {0}'.format(vr.name))
policy = None
if module.params['policy_type'] == 'conditional-advertisement':
policy_cls = BgpPolicyConditionalAdvertisement
else:
policy_cls = BgpPolicyAggregationAddress
policy = bgp.find_or_create(module.params['policy_name'], policy_cls)
obj_type = None
if module.params['filter_type'] == 'non-exist':
obj_type = BgpPolicyNonExistFilter
elif module.params['filter_type'] == 'advertise':
obj_type = BgpPolicyAdvertiseFilter
elif module.params['filter_type'] == 'suppress':
obj_type = BgpPolicySuppressFilter
else:
module.fail_json(msg='Unknown filter_type: {0}'.format(module.params['filter_type']))
listing = policy.findall(obj_type)
spec = {
'name': module.params['name'],
'enable': module.params['enable'],
'match_afi': module.params['match_afi'],
'match_safi': module.params['match_safi'],
'match_route_table': module.params['match_route_table'],
'match_nexthop': module.params['match_nexthop'],
'match_from_peer': module.params['match_from_peer'],
'match_med': module.params['match_med'],
'match_as_path_regex': module.params['match_as_path_regex'],
'match_community_regex': module.params['match_community_regex'],
'match_extended_community_regex': module.params['match_extended_community_regex'],
}
obj = obj_type(**spec)
policy.add(obj)
# Handle address prefixes.
for x in module.params['address_prefix']:
if isinstance(x, dict):
if 'name' not in x:
module.fail_json(msg='Address prefix dict requires "name": {0}'.format(x))
obj.add(BgpPolicyAddressPrefix(
to_text(x['name'], encoding='utf-8', errors='surrogate_or_strict'),
None if x.get('exact') is None else module.boolean(x['exact']),
))
else:
obj.add(BgpPolicyAddressPrefix(to_text(x, encoding='utf-8', errors='surrogate_or_strict')))
if module.params['state'] == 'return-object':
module.deprecate('state=return-object is deprecated', '2.12')
import pickle
from base64 import b64encode
obj.parent = None
panos_obj = b64encode(pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL))
module.exit_json(msg='returning serialized object', panos_obj=panos_obj)
changed = helper.apply_state(obj, listing, module)
if changed and module.params['commit']:
helper.commit(module)
module.exit_json(changed=changed, msg='done')
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.