repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
JupiterLikeThePlanet/Quoter | vendor/bundle/ruby/2.3.0/gems/twilio-ruby-4.2.1/docs/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| mit |
tomasreimers/tensorflow-emscripten | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 5 | 55320 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Concrete implementation of this class should provide the following functions:
* _get_train_ops
* _get_eval_ops
* _get_predict_ops
`Estimator` implemented below is a good example of how to use this class.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated_arg_values(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate. '
'input_fn (and in most cases, input_feature_key) will become required '
'args, and use_deprecated_input_fn will default to False and be removed '
'altogether.',
use_deprecated_input_fn=True,
input_fn=None)
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
# The default return type of _get_eval_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_eval_ops returns an
# `eval_dict` dictionary of Tensors. The following else-statement code
# covers these cases, but will soon be deleted after the subclasses are
# updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
eval_ops = self._get_eval_ops(features, labels, metrics)
if isinstance(eval_ops, model_fn_lib.ModelFnOps): # Default signature
eval_dict = eval_ops.eval_metric_ops
else: # Legacy signature
eval_dict = eval_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
hooks = hooks or []
if feed_fn:
hooks.append(_FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._call_legacy_get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._call_legacy_get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.training_scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self.config.tf_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _call_legacy_get_predict_ops(self, features):
# The default return type of _get_predict_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_predict_ops returns a
# `predictions` Tensor or dict or Tensors. The following else-statement
# code covers these cases, but will soon be deleted after the subclasses
# are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
infer_ops = self._get_predict_ops(features)
if isinstance(infer_ops, model_fn_lib.ModelFnOps): # Default signature
return infer_ops
return model_fn_lib.ModelFnOps(
mode=model_fn_lib.ModeKeys.INFER, predictions=infer_ops)
def _call_legacy_get_train_ops(self, features, labels):
train_ops = self._get_train_ops(features, labels)
if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default signature
return train_ops
return model_fn_lib.ModelFnOps(
mode=model_fn_lib.ModeKeys.TRAIN,
predictions=None,
loss=train_ops[1],
train_op=train_ops[0])
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _get_arguments(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_ops should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
@experimental
def export_savedmodel(
self, export_dir_base, input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
exports_to_keep: Number of exports to keep.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if input_fn is None:
raise ValueError('input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the input_fn and collect the input alternatives.
input_ops = input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output signatures
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Locate the latest checkpoint
# TODO(soergel): does it help that we know we have one from this step?
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
with tf_session.Session('') as session:
variables.initialize_local_variables()
data_flow_ops.tables_initializer()
saver_for_restore = saver.Saver(
variables.global_variables(),
sharded=True)
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
data_flow_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
class _FeedFnHook(session_run_hook.SessionRunHook):
"""Runs feed_fn and sets the feed_dict accordingly."""
def __init__(self, feed_fn):
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [_FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
40123210/w17b_exam | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/base.py | 603 | 4652 | #!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
| agpl-3.0 |
vipins/ccccms | env/Lib/site-packages/django/contrib/gis/geos/base.py | 86 | 1682 | from ctypes import c_void_p
from types import NoneType
from django.contrib.gis.geos.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
GEOJSON = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, (self.ptr_type, NoneType)):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
| bsd-3-clause |
heladio/my-blog | pelica-env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/connection.py | 371 | 8967 | import datetime
import sys
import socket
from socket import timeout as SocketTimeout
import warnings
from .packages import six
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
HTTPSConnection = DummyConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
ConnectTimeoutError,
SystemTimeWarning,
SecurityWarning,
)
from .packages.ssl_match_hostname import match_hostname
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. '
'This feature is being removed by major browsers and deprecated by RFC 2818. '
'(See https://github.com/shazow/urllib3/issues/497 for details.)'),
SecurityWarning
)
match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
| mit |
idwanglu2010/git-repo | subcmds/init.py | 48 | 11402 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import re
import shutil
import sys
from color import Coloring
from command import InteractiveCommand, MirrorSafeCommand
from error import ManifestParseError
from project import SyncBuffer
from git_config import GitConfig
from git_command import git_require, MIN_GIT_VERSION
class Init(InteractiveCommand, MirrorSafeCommand):
common = True
helpSummary = "Initialize repo in the current directory"
helpUsage = """
%prog [options]
"""
helpDescription = """
The '%prog' command is run once to install and initialize repo.
The latest repo source code and manifest collection is downloaded
from the server and is installed in the .repo/ directory in the
current working directory.
The optional -b argument can be used to select the manifest branch
to checkout and use. If no branch is specified, master is assumed.
The optional -m argument can be used to specify an alternate manifest
to be used. If no manifest is specified, the manifest default.xml
will be used.
The --reference option can be used to point to a directory that
has the content of a --mirror sync. This will make the working
directory use as much data as possible from the local reference
directory when fetching from the server. This will make the sync
go a lot faster by reducing data traffic on the network.
Switching Manifest Branches
---------------------------
To switch to another manifest branch, `repo init -b otherbranch`
may be used in an existing client. However, as this only updates the
manifest, a subsequent `repo sync` (or `repo sync -d`) is necessary
to update the working directory files.
"""
def _Options(self, p):
# Logging
g = p.add_option_group('Logging options')
g.add_option('-q', '--quiet',
dest="quiet", action="store_true", default=False,
help="be quiet")
# Manifest
g = p.add_option_group('Manifest options')
g.add_option('-u', '--manifest-url',
dest='manifest_url',
help='manifest repository location', metavar='URL')
g.add_option('-b', '--manifest-branch',
dest='manifest_branch',
help='manifest branch or revision', metavar='REVISION')
g.add_option('-m', '--manifest-name',
dest='manifest_name', default='default.xml',
help='initial manifest file', metavar='NAME.xml')
g.add_option('--mirror',
dest='mirror', action='store_true',
help='create a replica of the remote repositories '
'rather than a client working directory')
g.add_option('--reference',
dest='reference',
help='location of mirror directory', metavar='DIR')
g.add_option('--depth', type='int', default=None,
dest='depth',
help='create a shallow clone with given depth; see git clone')
g.add_option('-g', '--groups',
dest='groups', default='all,-notdefault',
help='restrict manifest projects to ones with a specified group',
metavar='GROUP')
g.add_option('-p', '--platform',
dest='platform', default='auto',
help='restrict manifest projects to ones with a specified '
'platform group [auto|all|none|linux|darwin|...]',
metavar='PLATFORM')
# Tool
g = p.add_option_group('repo Version options')
g.add_option('--repo-url',
dest='repo_url',
help='repo repository location', metavar='URL')
g.add_option('--repo-branch',
dest='repo_branch',
help='repo branch or revision', metavar='REVISION')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
# Other
g = p.add_option_group('Other options')
g.add_option('--config-name',
dest='config_name', action="store_true", default=False,
help='Always prompt for name/e-mail')
def _SyncManifest(self, opt):
m = self.manifest.manifestProject
is_new = not m.Exists
if is_new:
if not opt.manifest_url:
print >>sys.stderr, 'fatal: manifest url (-u) is required.'
sys.exit(1)
if not opt.quiet:
print >>sys.stderr, 'Get %s' \
% GitConfig.ForUser().UrlInsteadOf(opt.manifest_url)
m._InitGitDir()
if opt.manifest_branch:
m.revisionExpr = opt.manifest_branch
else:
m.revisionExpr = 'refs/heads/master'
else:
if opt.manifest_branch:
m.revisionExpr = opt.manifest_branch
else:
m.PreSync()
if opt.manifest_url:
r = m.GetRemote(m.remote.name)
r.url = opt.manifest_url
r.ResetFetch()
r.Save()
groups = re.split('[,\s]+', opt.groups)
all_platforms = ['linux', 'darwin']
platformize = lambda x: 'platform-' + x
if opt.platform == 'auto':
if (not opt.mirror and
not m.config.GetString('repo.mirror') == 'true'):
groups.append(platformize(platform.system().lower()))
elif opt.platform == 'all':
groups.extend(map(platformize, all_platforms))
elif opt.platform in all_platforms:
groups.extend(platformize(opt.platform))
elif opt.platform != 'none':
print >>sys.stderr, 'fatal: invalid platform flag'
sys.exit(1)
groups = [x for x in groups if x]
groupstr = ','.join(groups)
if opt.platform == 'auto' and groupstr == 'all,-notdefault,platform-' + platform.system().lower():
groupstr = None
m.config.SetString('manifest.groups', groupstr)
if opt.reference:
m.config.SetString('repo.reference', opt.reference)
if opt.mirror:
if is_new:
m.config.SetString('repo.mirror', 'true')
else:
print >>sys.stderr, 'fatal: --mirror not supported on existing client'
sys.exit(1)
if not m.Sync_NetworkHalf(is_new=is_new):
r = m.GetRemote(m.remote.name)
print >>sys.stderr, 'fatal: cannot obtain manifest %s' % r.url
# Better delete the manifest git dir if we created it; otherwise next
# time (when user fixes problems) we won't go through the "is_new" logic.
if is_new:
shutil.rmtree(m.gitdir)
sys.exit(1)
if opt.manifest_branch:
m.MetaBranchSwitch(opt.manifest_branch)
syncbuf = SyncBuffer(m.config)
m.Sync_LocalHalf(syncbuf)
syncbuf.Finish()
if is_new or m.CurrentBranch is None:
if not m.StartBranch('default'):
print >>sys.stderr, 'fatal: cannot create default in manifest'
sys.exit(1)
def _LinkManifest(self, name):
if not name:
print >>sys.stderr, 'fatal: manifest name (-m) is required.'
sys.exit(1)
try:
self.manifest.Link(name)
except ManifestParseError as e:
print >>sys.stderr, "fatal: manifest '%s' not available" % name
print >>sys.stderr, 'fatal: %s' % str(e)
sys.exit(1)
def _Prompt(self, prompt, value):
sys.stdout.write('%-10s [%s]: ' % (prompt, value))
a = sys.stdin.readline().strip()
if a == '':
return value
return a
def _ShouldConfigureUser(self):
gc = self.manifest.globalConfig
mp = self.manifest.manifestProject
# If we don't have local settings, get from global.
if not mp.config.Has('user.name') or not mp.config.Has('user.email'):
if not gc.Has('user.name') or not gc.Has('user.email'):
return True
mp.config.SetString('user.name', gc.GetString('user.name'))
mp.config.SetString('user.email', gc.GetString('user.email'))
print ''
print 'Your identity is: %s <%s>' % (mp.config.GetString('user.name'),
mp.config.GetString('user.email'))
print 'If you want to change this, please re-run \'repo init\' with --config-name'
return False
def _ConfigureUser(self):
mp = self.manifest.manifestProject
while True:
print ''
name = self._Prompt('Your Name', mp.UserName)
email = self._Prompt('Your Email', mp.UserEmail)
print ''
print 'Your identity is: %s <%s>' % (name, email)
sys.stdout.write('is this correct [y/N]? ')
a = sys.stdin.readline().strip()
if a in ('yes', 'y', 't', 'true'):
break
if name != mp.UserName:
mp.config.SetString('user.name', name)
if email != mp.UserEmail:
mp.config.SetString('user.email', email)
def _HasColorSet(self, gc):
for n in ['ui', 'diff', 'status']:
if gc.Has('color.%s' % n):
return True
return False
def _ConfigureColor(self):
gc = self.manifest.globalConfig
if self._HasColorSet(gc):
return
class _Test(Coloring):
def __init__(self):
Coloring.__init__(self, gc, 'test color display')
self._on = True
out = _Test()
print ''
print "Testing colorized output (for 'repo diff', 'repo status'):"
for c in ['black','red','green','yellow','blue','magenta','cyan']:
out.write(' ')
out.printer(fg=c)(' %-6s ', c)
out.write(' ')
out.printer(fg='white', bg='black')(' %s ' % 'white')
out.nl()
for c in ['bold','dim','ul','reverse']:
out.write(' ')
out.printer(fg='black', attr=c)(' %-6s ', c)
out.nl()
sys.stdout.write('Enable color display in this user account (y/N)? ')
a = sys.stdin.readline().strip().lower()
if a in ('y', 'yes', 't', 'true', 'on'):
gc.SetString('color.ui', 'auto')
def _ConfigureDepth(self, opt):
"""Configure the depth we'll sync down.
Args:
opt: Options from optparse. We care about opt.depth.
"""
# Opt.depth will be non-None if user actually passed --depth to repo init.
if opt.depth is not None:
if opt.depth > 0:
# Positive values will set the depth.
depth = str(opt.depth)
else:
# Negative numbers will clear the depth; passing None to SetString
# will do that.
depth = None
# We store the depth in the main manifest project.
self.manifest.manifestProject.config.SetString('repo.depth', depth)
def Execute(self, opt, args):
git_require(MIN_GIT_VERSION, fail=True)
if opt.reference:
opt.reference = os.path.expanduser(opt.reference)
self._SyncManifest(opt)
self._LinkManifest(opt.manifest_name)
if os.isatty(0) and os.isatty(1) and not self.manifest.IsMirror:
if opt.config_name or self._ShouldConfigureUser():
self._ConfigureUser()
self._ConfigureColor()
self._ConfigureDepth(opt)
if self.manifest.IsMirror:
init_type = 'mirror '
else:
init_type = ''
print ''
print 'repo %sinitialized in %s' % (init_type, self.manifest.topdir)
| apache-2.0 |
balamurugan01/foundation6 | node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py | 2698 | 3270 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| mit |
oppia/oppia | core/controllers/skill_mastery_test.py | 4 | 12773 | # Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Question Player controller."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import skill_services
from core.tests import test_utils
import feconf
class SkillMasteryDataHandlerTest(test_utils.GenericTestBase):
"""Tests update skill mastery degree."""
def setUp(self):
"""Completes the setup for SkillMasteryDataHandler."""
super(SkillMasteryDataHandlerTest, self).setUp()
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.skill_id_1 = skill_services.get_new_skill_id()
self.save_new_skill(
self.skill_id_1, self.user_id, description='Skill Description 1')
self.skill_id_2 = skill_services.get_new_skill_id()
self.save_new_skill(
self.skill_id_2, self.user_id, description='Skill Description 2')
self.degree_of_mastery_1 = 0.3
self.degree_of_mastery_2 = 0.5
def test_get_with_valid_skill_ids_list(self):
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_1, self.degree_of_mastery_1)
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_2, self.degree_of_mastery_2)
skill_ids = [self.skill_id_1, self.skill_id_2]
self.login(self.NEW_USER_EMAIL)
response_json = self.get_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
params={
'comma_separated_skill_ids': ','.join(skill_ids)
})
degrees_of_mastery = {
self.skill_id_1: self.degree_of_mastery_1,
self.skill_id_2: self.degree_of_mastery_2
}
self.assertEqual(
response_json['degrees_of_mastery'], degrees_of_mastery)
self.logout()
def test_get_with_skill_without_skill_mastery(self):
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_1, self.degree_of_mastery_1)
skill_ids = [self.skill_id_1, self.skill_id_2]
self.login(self.NEW_USER_EMAIL)
response_json = self.get_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
params={
'comma_separated_skill_ids': ','.join(skill_ids)
})
degrees_of_mastery = {
self.skill_id_1: self.degree_of_mastery_1,
self.skill_id_2: None
}
self.assertEqual(
response_json['degrees_of_mastery'], degrees_of_mastery)
self.logout()
def test_get_with_no_skill_ids_returns_400(self):
self.login(self.NEW_USER_EMAIL)
json_response = self.get_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
expected_status_int=400)
self.assertEqual(
json_response['error'],
'Expected request to contain parameter comma_separated_skill_ids.')
self.logout()
def test_get_with_invalid_skill_ids_returns_400(self):
skill_ids = ['invalid_skill_id']
self.login(self.NEW_USER_EMAIL)
json_response = self.get_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
params={
'comma_separated_skill_ids': ','.join(skill_ids)
}, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Invalid skill ID invalid_skill_id')
self.logout()
def test_get_with_nonexistent_skill_ids_returns_404(self):
skill_id_3 = skill_services.get_new_skill_id()
skill_ids = [self.skill_id_1, skill_id_3]
self.login(self.NEW_USER_EMAIL)
self.get_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
params={
'comma_separated_skill_ids': ','.join(skill_ids)
}, expected_status_int=404)
self.logout()
def test_put_with_valid_skill_mastery_dict(self):
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_1, self.degree_of_mastery_1)
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_2, self.degree_of_mastery_2)
payload = {}
mastery_change_per_skill = {
self.skill_id_1: 0.3,
self.skill_id_2: -0.3
}
payload['mastery_change_per_skill'] = mastery_change_per_skill
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token)
degrees_of_mastery = {
self.skill_id_1: 0.6,
self.skill_id_2: 0.2
}
self.assertEqual(
skill_services.get_multi_user_skill_mastery(
self.user_id, [self.skill_id_1, self.skill_id_2]),
degrees_of_mastery)
self.logout()
def test_put_with_skill_with_no_skill_mastery(self):
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_1, self.degree_of_mastery_1)
payload = {}
mastery_change_per_skill = {
self.skill_id_1: 0.3,
self.skill_id_2: 0.3
}
payload['mastery_change_per_skill'] = mastery_change_per_skill
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token)
degrees_of_mastery = {
self.skill_id_1: 0.6,
self.skill_id_2: 0.3
}
self.assertEqual(
skill_services.get_multi_user_skill_mastery(
self.user_id, [self.skill_id_1, self.skill_id_2]),
degrees_of_mastery)
self.logout()
def test_put_with_skill_mastery_lower_than_zero(self):
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_1, self.degree_of_mastery_1)
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_2, self.degree_of_mastery_2)
payload = {}
mastery_change_per_skill = {
self.skill_id_1: -0.5,
self.skill_id_2: 0.3
}
payload['mastery_change_per_skill'] = mastery_change_per_skill
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token)
degrees_of_mastery = {
self.skill_id_1: 0.0,
self.skill_id_2: 0.8
}
self.assertEqual(
skill_services.get_multi_user_skill_mastery(
self.user_id, [self.skill_id_1, self.skill_id_2]),
degrees_of_mastery)
self.logout()
def test_put_with_skill_mastery_higher_than_one(self):
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_1, self.degree_of_mastery_1)
skill_services.create_user_skill_mastery(
self.user_id, self.skill_id_2, self.degree_of_mastery_2)
payload = {}
mastery_change_per_skill = {
self.skill_id_1: 0.9,
self.skill_id_2: 0.3
}
payload['mastery_change_per_skill'] = mastery_change_per_skill
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token)
degrees_of_mastery = {
self.skill_id_1: 1.0,
self.skill_id_2: 0.8
}
self.assertEqual(
skill_services.get_multi_user_skill_mastery(
self.user_id, [self.skill_id_1, self.skill_id_2]),
degrees_of_mastery)
self.logout()
def test_put_with_invalid_type_returns_400(self):
payload = {}
mastery_change_per_skill = [self.skill_id_1, self.skill_id_2]
payload['mastery_change_per_skill'] = mastery_change_per_skill
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Expected payload to contain mastery_change_per_skill as a dict.'
)
self.logout()
def test_put_with_no_mastery_change_per_skill_returns_400(self):
payload = {}
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Expected payload to contain mastery_change_per_skill as a dict.'
)
self.logout()
def test_put_with_invalid_skill_ids_returns_400(self):
payload = {}
mastery_change_per_skill = {
'invalid_skill_id': 0.3
}
payload['mastery_change_per_skill'] = mastery_change_per_skill
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'], 'Invalid skill ID invalid_skill_id')
self.logout()
def test_put_with_nonexistent_skill_ids_returns_404(self):
skill_id_3 = skill_services.get_new_skill_id()
payload = {}
mastery_change_per_skill = {
self.skill_id_1: 0.3,
self.skill_id_2: 0.5,
skill_id_3: 0.6
}
payload['mastery_change_per_skill'] = mastery_change_per_skill
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token, expected_status_int=404)
self.logout()
def test_put_with_invalid_type_of_degree_of_mastery_returns_400(self):
payload = {}
mastery_change_per_skill = {
self.skill_id_1: 0.1,
self.skill_id_2: {}
}
payload['mastery_change_per_skill'] = mastery_change_per_skill
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Expected degree of mastery of skill %s to be a number, '
'received %s.' % (self.skill_id_2, '{}'))
mastery_change_per_skill = {
self.skill_id_1: 0.1,
self.skill_id_2: True
}
payload['mastery_change_per_skill'] = mastery_change_per_skill
json_response = self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Expected degree of mastery of skill %s to be a number, '
'received %s.' % (self.skill_id_2, 'True'))
self.logout()
def test_put_with_no_logged_in_user_returns_401(self):
payload = {}
mastery_change_per_skill = {
self.skill_id_1: 0.3,
self.skill_id_2: 0.5
}
payload['mastery_change_per_skill'] = mastery_change_per_skill
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s' % feconf.SKILL_MASTERY_DATA_URL,
payload, csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
json_response['error'],
'You must be logged in to access this resource.')
| apache-2.0 |
msumit/qds-sdk-py | qds_sdk/commands.py | 1 | 56029 | """
The commands module contains the base definition for
a generic Qubole command and the implementation of all
the specific commands
"""
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
from qds_sdk.exception import ParseError
from qds_sdk.account import Account
from qds_sdk.util import GentleOptionParser
from qds_sdk.util import OptionParsingError
from qds_sdk.util import OptionParsingExit
from optparse import SUPPRESS_HELP
import boto
import time
import logging
import sys
import re
import pipes
import os
import json
log = logging.getLogger("qds_commands")
# Pattern matcher for s3 path
_URI_RE = re.compile(r's3://([^/]+)/?(.*)')
class Command(Resource):
"""
qds_sdk.Command is the base Qubole command class. Different types of Qubole
commands can subclass this.
"""
"""all commands use the /commands endpoint"""
rest_entity_path = "commands"
@staticmethod
def is_done(status):
"""
Does the status represent a completed command
Args:
`status`: a status string
Returns:
True/False
"""
return status == "cancelled" or status == "done" or status == "error"
@staticmethod
def is_success(status):
return status == "done"
@classmethod
def create(cls, **kwargs):
"""
Create a command object by issuing a POST request to the /command endpoint
Note - this does not wait for the command to complete
Args:
`**kwargs`: keyword arguments specific to command type
Returns:
Command object
"""
conn = Qubole.agent()
if kwargs.get('command_type') is None:
kwargs['command_type'] = cls.__name__
if kwargs.get('tags') is not None:
kwargs['tags'] = kwargs['tags'].split(',')
return cls(conn.post(cls.rest_entity_path, data=kwargs))
@classmethod
def run(cls, **kwargs):
"""
Create a command object by issuing a POST request to the /command endpoint
Waits until the command is complete. Repeatedly polls to check status
Args:
`**kwargs`: keyword arguments specific to command type
Returns:
Command object
"""
err_pointer, tmp_pointer, new_bytes = 0, 0, 0
cmd = cls.create(**kwargs)
while not Command.is_done(cmd.status):
time.sleep(Qubole.poll_interval)
cmd = cls.find(cmd.id)
if kwargs.get('print_logs_live', False):
log, err_length, tmp_length = cmd.get_log_partial(err_pointer, tmp_pointer)
if err_length != "0":
err_pointer += int(err_length)
new_bytes = int(err_length) + int(tmp_length) - tmp_pointer
tmp_pointer = int(tmp_length)
else:
tmp_pointer += int(tmp_length)
new_bytes = int(tmp_length)
if len(log) > 0 and new_bytes > 0:
print >>sys.stderr, log[-new_bytes:]
#
return cmd
@classmethod
def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {"status": "kill"}
return conn.put(cls.element_path(id), data)
def cancel(self):
"""
Cancels command represented by this object
"""
self.__class__.cancel_id(self.id)
@classmethod
def get_log_id(cls, id):
"""
Fetches log for the command represented by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
r = conn.get_raw(cls.element_path(id) + "/logs")
return r.text
def get_log(self):
"""
Fetches log for the command represented by this object
Returns:
The log as a string
"""
log_path = self.meta_data['logs_resource']
conn = Qubole.agent()
r = conn.get_raw(log_path)
return r.text
def get_log_partial(self, err_pointer=0, tmp_pointer=0):
"""
Fetches log for the command represented by this object
Returns:
The log as a string
"""
log_path = self.meta_data['logs_resource']
conn = Qubole.agent()
r = conn.get_raw(log_path, params={'err_file_processed':err_pointer, 'tmp_file_processed':tmp_pointer})
if 'err_length' in r.headers.keys() and 'tmp_length' in r.headers.keys():
return [r.text, r.headers['err_length'], r.headers['tmp_length']]
return [r.text, 0, 0]
@classmethod
def get_jobs_id(cls, id):
"""
Fetches information about the hadoop jobs which were started by this
command id. This information is only available for commands which have
completed (i.e. Status = 'done', 'cancelled' or 'error'.) Also, the
cluster which ran this command should be running for this information
to be available. Otherwise only the URL and job_id is shown.
Args:
`id`: command id
"""
conn = Qubole.agent()
r = conn.get_raw(cls.element_path(id) + "/jobs")
return r.text
def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True):
"""
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
"""
result_path = self.meta_data['results_resource']
conn = Qubole.agent()
r = conn.get(result_path, {'inline': inline})
if r.get('inline'):
if sys.version_info < (3, 0, 0):
fp.write(r['results'].encode('utf8'))
else:
import io
if isinstance(fp, io.TextIOBase):
fp.buffer.write(r['results'].encode('utf8'))
elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase):
fp.write(r['results'].encode('utf8'))
else:
# Can this happen? Don't know what's the right thing to do in this case.
pass
else:
if fetch:
storage_credentials = conn.get(Account.credentials_rest_entity_path)
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token = storage_credentials['session_token'])
log.info("Starting download from result locations: [%s]" % ",".join(r['result_location']))
#fetch latest value of num_result_dir
num_result_dir = Command.find(self.id).num_result_dir
for s3_path in r['result_location']:
# In Python 3,
# If the delim is None, fp should be in binary mode because
# boto expects it to be.
# If the delim is not None, then both text and binary modes
# work.
_download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim,
skip_data_avail_check=isinstance(self, PrestoCommand))
else:
fp.write(",".join(r['result_location']))
class HiveCommand(Command):
usage = ("hivecmd <submit|run> [options]")
optparser = GentleOptionParser(usage=usage)
optparser.add_option("-q", "--query", dest="query", help="query string")
optparser.add_option("-f", "--script_location", dest="script_location",
help="Path where hive query to run is stored. Can be S3 URI or local file path")
optparser.add_option("--macros", dest="macros",
help="expressions to expand macros used in query")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--sample_size", dest="sample_size",
help="size of sample in bytes on which to run query")
optparser.add_option("--cluster-label", dest="label",
help="the label of the cluster to run the command on")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--name", dest="name",
help="Assign a name to this query")
optparser.add_option("--hive-version", dest="hive_version",
help="Specifies the hive version to be used. eg: 0.13,1.2,etc.")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
optparser.add_option("--retry", dest="retry", default=0, choices=[1,2,3], help="Number of retries for a job")
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
if options.query is None and options.script_location is None:
raise ParseError("One of query or script location"
" must be specified",
cls.optparser.format_help())
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
if options.script_location is not None:
if options.query is not None:
raise ParseError(
"Both query and script_location cannot be specified",
cls.optparser.format_help())
if ((options.script_location.find("s3://") != 0) and
(options.script_location.find("s3n://") != 0)):
# script location is local file
try:
q = open(options.script_location).read()
except IOError as e:
raise ParseError("Unable to open script location: %s" %
str(e),
cls.optparser.format_help())
options.script_location = None
options.query = q
if options.macros is not None:
options.macros = json.loads(options.macros)
v = vars(options)
v["command_type"] = "HiveCommand"
return v
class SqlCommand(Command):
usage = ("sqlcmd <submit|run> [options]")
optparser = GentleOptionParser(usage=usage)
optparser.add_option("-q", "--query", dest="query", help="query string")
optparser.add_option("-f", "--script_location", dest="script_location",
help="Path where hive query to run is stored. Can be S3 URI or local file path")
optparser.add_option("--macros", dest="macros",
help="expressions to expand macros used in query")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--sample_size", dest="sample_size",
help="size of sample in bytes on which to run query")
optparser.add_option("--cluster-label", dest="label",
help="the label of the cluster to run the command on")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--name", dest="name",
help="Assign a name to this query")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
if options.query is None and options.script_location is None:
raise ParseError("One of query or script location"
" must be specified",
cls.optparser.format_help())
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
if options.script_location is not None:
if options.query is not None:
raise ParseError(
"Both query and script_location cannot be specified",
cls.optparser.format_help())
if ((options.script_location.find("s3://") != 0) and
(options.script_location.find("s3n://") != 0)):
# script location is local file
try:
q = open(options.script_location).read()
except IOError as e:
raise ParseError("Unable to open script location: %s" %
str(e),
cls.optparser.format_help())
options.script_location = None
options.query = q
if options.macros is not None:
options.macros = json.loads(options.macros)
v = vars(options)
v["command_type"] = "SqlCommand"
return v
class SparkCommand(Command):
usage = ("sparkcmd <submit|run> [options]")
allowedlanglist = ["python", "scala","R"]
optparser = GentleOptionParser(usage=usage)
optparser.add_option("--program", dest="program",help=SUPPRESS_HELP)
optparser.add_option("--cmdline", dest="cmdline", help="command line for Spark")
optparser.add_option("--sql", dest="sql", help="sql for Spark")
optparser.add_option("-f", "--script_location", dest="script_location",
help="Path where spark program to run is stored. Has to be a local file path")
optparser.add_option("--macros", dest="macros",
help="expressions to expand macros used in query")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--cluster-label", dest="label", help="the label of the cluster to run the command on")
optparser.add_option("--language", dest="language", choices = allowedlanglist, help=SUPPRESS_HELP)
optparser.add_option("--app-id", dest="app_id", type=int, help="The Spark Job Server app id to submit this snippet to.")
optparser.add_option("--notify", action="store_true", dest="can_notify", default=False, help="sends an email on command completion")
optparser.add_option("--name", dest="name", help="Assign a name to this query")
optparser.add_option("--arguments", dest = "arguments", help = "Spark Submit Command Line Options")
optparser.add_option("--user_program_arguments", dest = "user_program_arguments", help = "Arguments for User Program")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
optparser.add_option("--retry", dest="retry", default=0, help="Number of retries")
@classmethod
def validate_program(cls, options):
bool_program = options.program is not None
bool_other_options = options.script_location is not None or options.cmdline is not None or options.sql is not None
# if both are false then no option is specified ==> raise ParseError
# if both are true then atleast two option specified ==> raise ParseError
if bool_program == bool_other_options:
raise ParseError("Exactly One of script location or program or cmdline or sql should be specified", cls.optparser.format_help())
if bool_program:
if options.language is None:
raise ParseError("Unspecified language for Program", cls.optparser.format_help())
@classmethod
def validate_cmdline(cls, options):
bool_cmdline = options.cmdline is not None
bool_other_options = options.script_location is not None or options.program is not None or options.sql is not None
# if both are false then no option is specified ==> raise ParseError
# if both are true then atleast two option specified ==> raise ParseError
if bool_cmdline == bool_other_options:
raise ParseError("Exactly One of script location or program or cmdline or sql should be specified", cls.optparser.format_help())
if bool_cmdline:
if options.language is not None:
raise ParseError("Language cannot be specified with the commandline option", cls.optparser.format_help())
if options.app_id is not None:
raise ParseError("app_id cannot be specified with the commandline option", cls.optparser.format_help())
@classmethod
def validate_sql(cls, options):
bool_sql = options.sql is not None
bool_other_options = options.script_location is not None or options.program is not None or options.cmdline is not None
# if both are false then no option is specified => raise PraseError
# if both are true then atleast two option specified => raise ParseError
if bool_sql == bool_other_options:
raise ParseError("Exactly One of script location or program or cmdline or sql should be specified", cls.optparser.format_help())
if bool_sql:
if options.language is not None:
raise ParseError("Language cannot be specified with the 'sql' option", cls.optparser.format_help())
@classmethod
def validate_script_location(cls, options):
bool_script_location = options.script_location is not None
bool_other_options = options.program is not None or options.cmdline is not None or options.sql is not None
# if both are false then no option is specified ==> raise ParseError
# if both are true then atleast two option specified ==> raise ParseError
if bool_script_location == bool_other_options:
raise ParseError("Exactly One of script location or program or cmdline or sql should be specified", cls.optparser.format_help())
if bool_script_location:
if options.language is not None:
raise ParseError("Both script location and language cannot be specified together", cls.optparser.format_help())
# for now, aws script_location is not supported and throws an error
if ((options.script_location.find("s3://") != 0) and
(options.script_location.find("s3n://") != 0)):
# script location is local file so set the program as the text from the file
try:
q = open(options.script_location).read()
except IOError as e:
raise ParseError("Unable to open script location: %s" %
str(e),
cls.optparser.format_help())
fileName, fileExtension = os.path.splitext(options.script_location)
# getting the language of the program from the file extension
if fileExtension == ".py":
options.language = "python"
elif fileExtension == ".scala":
options.language = "scala"
elif fileExtension == ".R":
options.language = "R"
elif fileExtension == ".sql":
options.language = "sql"
else:
raise ParseError("Invalid program type %s. Please choose one from python, scala, R or sql." % str(fileExtension),
cls.optparser.format_help())
else:
raise ParseError("Invalid location, Please choose a local file location",
cls.optparser.format_help())
options.script_location = None
if options.language == "sql":
options.sql = q
options.language = None
else:
options.program = q
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
SparkCommand.validate_program(options)
SparkCommand.validate_script_location(options)
SparkCommand.validate_cmdline(options)
SparkCommand.validate_sql(options)
if options.macros is not None:
options.macros = json.loads(options.macros)
v = vars(options)
v["command_type"] = "SparkCommand"
return v
class PrestoCommand(Command):
usage = ("prestocmd <submit|run> [options]")
optparser = GentleOptionParser(usage=usage)
optparser.add_option("-q", "--query", dest="query", help="query string")
optparser.add_option("-f", "--script_location", dest="script_location",
help="Path where presto query to run is stored. Can be S3 URI or local file path")
optparser.add_option("--macros", dest="macros",
help="expressions to expand macros used in query")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--cluster-label", dest="label",
help="the label of the cluster to run the command on")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--name", dest="name",
help="Assign a name to this query")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
optparser.add_option("--retry", dest="retry", default=0, choices=[1,2,3], help="Number of retries for a job")
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
if options.query is None and options.script_location is None:
raise ParseError("One of query or script location"
" must be specified",
cls.optparser.format_help())
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
if options.script_location is not None:
if options.query is not None:
raise ParseError(
"Both query and script_location cannot be specified",
cls.optparser.format_help())
if ((options.script_location.find("s3://") != 0) and
(options.script_location.find("s3n://") != 0)):
# script location is local file
try:
q = open(options.script_location).read()
except IOError as e:
raise ParseError("Unable to open script location: %s" %
str(e),
cls.optparser.format_help())
options.script_location = None
options.query = q
if options.macros is not None:
options.macros = json.loads(options.macros)
v = vars(options)
v["command_type"] = "PrestoCommand"
return v
class HadoopCommand(Command):
subcmdlist = ["jar", "s3distcp", "streaming"]
usage = "hadoopcmd <submit|run> [options] <%s> <arg1> [arg2] ..." % "|".join(subcmdlist)
optparser = GentleOptionParser(usage=usage)
optparser.add_option("--cluster-label", dest="label",
help="the label of the cluster to run the command on")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--name", dest="name",
help="Assign a name to this command")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
optparser.add_option("--retry", dest="retry", default=0, choices=[1,2,3], help="Number of retries for a job")
optparser.disable_interspersed_args()
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
parsed = {}
try:
(options, args) = cls.optparser.parse_args(args)
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
parsed['label'] = options.label
parsed['can_notify'] = options.can_notify
parsed['name'] = options.name
parsed['tags'] = options.tags
parsed["command_type"] = "HadoopCommand"
parsed['print_logs'] = options.print_logs
parsed['print_logs_live'] = options.print_logs_live
if len(args) < 2:
raise ParseError("Need at least two arguments", cls.usage)
subcmd = args.pop(0)
if subcmd not in cls.subcmdlist:
raise ParseError("First argument must be one of <%s>" %
"|".join(cls.subcmdlist))
parsed["sub_command"] = subcmd
parsed["sub_command_args"] = " ".join("'" + str(a) + "'" for a in args)
return parsed
class ShellCommand(Command):
usage = ("shellcmd <submit|run> [options] [arg1] [arg2] ...")
optparser = GentleOptionParser(usage=usage)
optparser.add_option("-s", "--script", dest="inline", help="inline script that can be executed by bash")
optparser.add_option("-f", "--script_location", dest="script_location",
help="Path where bash script to run is stored. Can be S3 URI or local file path")
optparser.add_option("-i", "--files", dest="files",
help="List of files [optional] Format : file1,file2 (files in s3 bucket) These files will be copied to the working directory where the command is executed")
optparser.add_option("-a", "--archives", dest="archives",
help="List of archives [optional] Format : archive1,archive2 (archives in s3 bucket) These are unarchived in the working directory where the command is executed")
optparser.add_option("--cluster-label", dest="label",
help="the label of the cluster to run the command on")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--name", dest="name",
help="Assign a name to this command")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
if options.inline is None and options.script_location is None:
raise ParseError("One of script or it's location"
" must be specified",
cls.optparser.format_help())
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
if options.script_location is not None:
if options.inline is not None:
raise ParseError(
"Both script and script_location cannot be specified",
cls.optparser.format_help())
if ((options.script_location.find("s3://") != 0) and
(options.script_location.find("s3n://") != 0)):
# script location is local file
try:
s = open(options.script_location).read()
except IOError as e:
raise ParseError("Unable to open script location: %s" %
str(e),
cls.optparser.format_help())
options.script_location = None
options.inline = s
if (args is not None) and (len(args) > 0):
if options.inline is not None:
raise ParseError(
"Extra arguments can only be "
"supplied with a script_location in S3 right now",
cls.optparser.format_help())
setattr(options, 'parameters',
" ".join([pipes.quote(a) for a in args]))
else:
if (args is not None) and (len(args) > 0):
raise ParseError(
"Extra arguments can only be supplied with a script_location",
cls.optparser.format_help())
v = vars(options)
v["command_type"] = "ShellCommand"
return v
class PigCommand(Command):
usage = ("pigcmd <submit|run> [options] [key1=value1] [key2=value2] ...")
optparser = GentleOptionParser(usage=usage)
optparser.add_option("-s", "--script", dest="latin_statements",
help="latin statements that has to be executed")
optparser.add_option("-f", "--script_location", dest="script_location",
help="Path where bash script to run is stored. Can be S3 URI or local file path")
optparser.add_option("--cluster-label", dest="label",
help="the label of the cluster to run the command on")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--name", dest="name",
help="Assign a name to this command")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
optparser.add_option("--retry", dest="retry", choices=[1,2,3], default=0, help="Number of retries for a job")
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
if options.latin_statements is None and options.script_location is None:
raise ParseError("One of script or it's location"
" must be specified",
cls.optparser.format_help())
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
if options.script_location is not None:
if options.latin_statements is not None:
raise ParseError(
"Both script and script_location cannot be specified",
cls.optparser.format_help())
if ((options.script_location.find("s3://") != 0) and
(options.script_location.find("s3n://") != 0)):
# script location is local file
try:
s = open(options.script_location).read()
except IOError as e:
raise ParseError("Unable to open script location: %s" %
str(e),
cls.optparser.format_help())
options.script_location = None
options.latin_statements = s
if (args is not None) and (len(args) > 0):
if options.latin_statements is not None:
raise ParseError(
"Extra arguments can only be "
"supplied with a script_location in S3 right now",
cls.optparser.format_help())
p = {}
for a in args:
kv = a.split('=')
if len(kv) != 2:
raise ParseError("Arguments to pig script must be of this format k1=v1 k2=v2 k3=v3...")
p[kv[0]] = kv[1]
setattr(options, 'parameters', p)
else:
if (args is not None) and (len(args) > 0):
raise ParseError(
"Extra arguments can only be supplied with a script_location",
cls.optparser.format_help())
v = vars(options)
v["command_type"] = "PigCommand"
return v
class DbExportCommand(Command):
usage = ("dbexportcmd <submit|run> [options]")
optparser = GentleOptionParser(usage=usage)
optparser.add_option("-m", "--mode", dest="mode",
help="Can be 1 for Hive export or 2 for HDFS/S3 export")
optparser.add_option("--hive_table", dest="hive_table",
help="Mode 1: Name of the Hive Table from which data will be exported")
optparser.add_option("--partition_spec", dest="partition_spec",
help="Mode 1: (optional) Partition specification for Hive table")
optparser.add_option("--dbtap_id", dest="dbtap_id",
help="Modes 1 and 2: DbTap Id of the target database in Qubole")
optparser.add_option("--db_table", dest="db_table",
help="Modes 1 and 2: Table to export to in the target database")
optparser.add_option("--db_update_mode", dest="db_update_mode",
help="Modes 1 and 2: (optional) can be 'allowinsert' or "
"'updateonly'. If updateonly is "
"specified - only existing rows are updated. If allowinsert "
"is specified - then existing rows are updated and non existing "
"rows are inserted. If this option is not specified - then the "
"given the data will be appended to the table")
optparser.add_option("--db_update_keys", dest="db_update_keys",
help="Modes 1 and 2: Columns used to determine the uniqueness of rows for "
"'updateonly' mode")
optparser.add_option("--export_dir", dest="export_dir",
help="Mode 2: HDFS/S3 location from which data will be exported")
optparser.add_option("--fields_terminated_by", dest="fields_terminated_by",
help="Mode 2: Hex of the char used as column separator "
"in the dataset, for eg. \0x20 for space")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--name", dest="name",
help="Assign a name to this command")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
optparser.add_option("--retry", dest="retry", default=0, choices=[1,2,3], help="Number of retries for a job")
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
if options.mode not in ["1", "2"]:
raise ParseError("mode must be either '1' or '2'",
cls.optparser.format_help())
if (options.dbtap_id is None) or (options.db_table is None):
raise ParseError("dbtap_id and db_table are required",
cls.optparser.format_help())
if options.mode is "1":
if options.hive_table is None:
raise ParseError("hive_table is required for mode 1",
cls.optparser.format_help())
elif options.export_dir is None: # mode 2
raise ParseError("export_dir is required for mode 2",
cls.optparser.format_help())
if options.db_update_mode is not None:
if options.db_update_mode not in ["allowinsert", "updateonly"]:
raise ParseError("db_update_mode should either be left blank for append "
"mode or be 'updateonly' or 'allowinsert'",
cls.optparser.format_help())
if options.db_update_mode is "updateonly":
if options.db_update_keys is None:
raise ParseError("db_update_keys is required when db_update_mode "
"is 'updateonly'",
cls.optparser.format_help())
elif options.db_update_keys is not None:
raise ParseError("db_update_keys is used only when db_update_mode "
"is 'updateonly'",
cls.optparser.format_help())
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
v = vars(options)
v["command_type"] = "DbExportCommand"
return v
class DbexportCommand(DbExportCommand):
pass
class DbImportCommand(Command):
usage = "dbimportcmd <submit|run> [options]"
optparser = GentleOptionParser(usage=usage)
optparser.add_option("-m", "--mode", dest="mode",
help="Can be 1 for Hive export or 2 for HDFS/S3 export")
optparser.add_option("--hive_table", dest="hive_table",
help="Mode 1: Name of the Hive Table from which data will be exported")
optparser.add_option("--dbtap_id", dest="dbtap_id",
help="Modes 1 and 2: DbTap Id of the target database in Qubole")
optparser.add_option("--db_table", dest="db_table",
help="Modes 1 and 2: Table to export to in the target database")
optparser.add_option("--where_clause", dest="db_where",
help="Mode 1: where clause to be applied to the table before extracting rows to be imported")
optparser.add_option("--parallelism", dest="db_parallelism",
help="Mode 1 and 2: Number of parallel threads to use for extracting data")
optparser.add_option("--extract_query", dest="db_extract_query",
help="Modes 2: SQL query to be applied at the source database for extracting data. "
"$CONDITIONS must be part of the where clause")
optparser.add_option("--boundary_query", dest="db_boundary_query",
help="Mode 2: query to be used get range of rowids to be extracted")
optparser.add_option("--split_column", dest="db_split_column",
help="column used as rowid to split data into range")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--name", dest="name",
help="Assign a name to this command")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
optparser.add_option("--retry", dest="retry", default=0, choices=[1,2,3], help="Number of retries for a job")
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
if options.mode not in ["1", "2"]:
raise ParseError("mode must be either '1' or '2'",
cls.optparser.format_help())
if (options.dbtap_id is None) or (options.db_table is None):
raise ParseError("dbtap_id and db_table are required",
cls.optparser.format_help())
# TODO: Semantic checks for parameters in mode 1 and 2
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
v = vars(options)
v["command_type"] = "DbImportCommand"
return v
class CompositeCommand(Command):
@classmethod
def compose(cls, sub_commands, macros=None, cluster_label=None, notify=False, name=None, tags=None):
"""
Args:
`sub_commands`: list of sub-command dicts
Returns:
Dictionary that can be used in create method
Example Usage:
cmd1 = HiveCommand.parse(['--query', "show tables"])
cmd2 = PigCommand.parse(['--script_location', "s3://paid-qubole/PigAPIDemo/scripts/script1-hadoop-s3-small.pig"])
composite = CompositeCommand.compose([cmd1, cmd2])
cmd = CompositeCommand.run(**composite)
"""
if macros is not None:
macros = json.loads(macros)
return {
"sub_commands": sub_commands,
"command_type": "CompositeCommand",
"macros": macros,
"label": cluster_label,
"tags": tags,
"can_notify": notify,
"name": name
}
class DbTapQueryCommand(Command):
usage = "dbtapquerycmd <submit|run> [options]"
optparser = GentleOptionParser(usage=usage)
optparser.add_option("--db_tap_id", dest="db_tap_id",
help="dbTap Id of the target database in Qubole")
optparser.add_option("-q", "--query", dest="query", help="query string")
optparser.add_option("--notify", action="store_true", dest="can_notify",
default=False, help="sends an email on command completion")
optparser.add_option("--macros", dest="macros",
help="expressions to expand macros used in query")
optparser.add_option("--tags", dest="tags",
help="comma-separated list of tags to be associated with the query ( e.g., tag1 tag1,tag2 )")
optparser.add_option("--name", dest="name",
help="Assign a name to this command")
optparser.add_option("--print-logs", action="store_true", dest="print_logs",
default=False, help="Fetch logs and print them to stderr.")
optparser.add_option("--print-logs-live", action="store_true", dest="print_logs_live",
default=False, help="Fetch logs and print them to stderr while command is running.")
@classmethod
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
if (options.db_tap_id is None):
raise ParseError("db_tap_id is required",
cls.optparser.format_help())
if (options.query is None):
raise ParseError("query is required",
cls.optparser.format_help())
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
if options.macros is not None:
options.macros = json.loads(options.macros)
v = vars(options)
v["command_type"] = "DbTapQueryCommand"
return v
def _read_iteratively(key_instance, fp, delim):
key_instance.open_read()
while True:
try:
# Default buffer size is 8192 bytes
data = next(key_instance)
if sys.version_info < (3, 0, 0):
fp.write(str(data).replace(chr(1), delim))
else:
import io
if isinstance(fp, io.TextIOBase):
fp.buffer.write(data.decode('utf-8').replace(chr(1), delim).encode('utf8'))
elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase):
fp.write(data.decode('utf8').replace(chr(1), delim).encode('utf8'))
else:
# Can this happen? Don't know what's the right thing to do in this case.
pass
except StopIteration:
# Stream closes itself when the exception is raised
return
def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None, skip_data_avail_check=False):
'''
Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded
'''
#Progress bar to display download progress
def _callback(downloaded, total):
'''
Call function for upload.
`downloaded`: File size already downloaded (int)
`total`: Total file size to be downloaded (int)
'''
if (total is 0) or (downloaded == total):
return
progress = downloaded*100/total
sys.stderr.write('\r[{0}] {1}%'.format('#'*progress, progress))
sys.stderr.flush()
def _is_complete_data_available(bucket_paths, num_result_dir):
if num_result_dir == -1:
return True
unique_paths = set()
files = {}
for one_path in bucket_paths:
name = one_path.name.replace(key_prefix, "", 1)
if name.startswith('_tmp.'):
continue
path = name.split("/")
dir = path[0].replace("_$folder$", "", 1)
unique_paths.add(dir)
if len(path) > 1:
file = int(path[1])
if dir not in files:
files[dir] = []
files[dir].append(file)
if len(unique_paths) < num_result_dir:
return False
for k in files:
v = files.get(k)
if len(v) > 0 and max(v) + 1 > len(v):
return False
return True
m = _URI_RE.match(s3_path)
bucket_name = m.group(1)
bucket = boto_conn.get_bucket(bucket_name)
retries = 6
if s3_path.endswith('/') is False:
#It is a file
key_name = m.group(2)
key_instance = bucket.get_key(key_name)
while key_instance is None and retries > 0:
retries = retries - 1
log.info("Results file is not available on s3. Retry: " + str(6-retries))
time.sleep(10)
key_instance = bucket.get_key(key_name)
if key_instance is None:
raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.")
log.info("Downloading file from %s" % s3_path)
if delim is None:
key_instance.get_contents_to_file(fp) # cb=_callback
else:
# Get contents as string. Replace parameters and write to file.
_read_iteratively(key_instance, fp, delim=delim)
else:
#It is a folder
key_prefix = m.group(2)
bucket_paths = bucket.list(key_prefix)
if not skip_data_avail_check:
complete_data_available = _is_complete_data_available(bucket_paths, num_result_dir)
while complete_data_available is False and retries > 0:
retries = retries - 1
log.info("Results dir is not available on s3. Retry: " + str(6-retries))
time.sleep(10)
complete_data_available = _is_complete_data_available(bucket_paths, num_result_dir)
if complete_data_available is False:
raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.")
for one_path in bucket_paths:
name = one_path.name
# Eliminate _tmp_ files which ends with $folder$
if name.endswith('$folder$'):
continue
log.info("Downloading file from %s" % name)
if delim is None:
one_path.get_contents_to_file(fp) # cb=_callback
else:
_read_iteratively(one_path, fp, delim=delim)
| apache-2.0 |
joeyinbox/pointing-gesture-recognition | src/relevancy.py | 1 | 9582 | #! /usr/bin/python
from classes.Dataset import *
from classes.DatasetManager import *
from classes.Settings import *
from classes.Utils import *
import numpy as np
# Definition of the Relevancy class
class Relevancy():
# Load required classes
datasetManager = DatasetManager()
settings = Settings()
utils = Utils()
repartition = [
"training",
"testing",
"validating"
]
direction = [
"back-right",
"right",
"front-right",
"front",
"front-left",
"left",
"back-left"
]
orientation = [
"up",
"lateral",
"down"
]
negativeType = [
"closed",
"opened",
"four",
"three",
"peace",
"rock"
]
# Returns the repartition between positive and negative files
#
# @param None
# @return tuple Tuple of the repartition for positive and negative files
def getRepartition(self):
# Get detailed counts
positive = {}
negative = {}
for repartition in self.repartition:
positive[repartition] = {}
negative[repartition] = {}
for direction in self.direction:
positive[repartition][direction] = {}
negative[repartition][direction] = {}
for orientation in self.orientation:
positive[repartition][direction][orientation] = self.getDetailedPositiveRepartition(repartition, direction, orientation)
for negativeType in self.negativeType:
negative[repartition][direction][negativeType] = self.getDetailedNegativeRepartition(repartition, direction, negativeType)
return (positive, negative)
# Returns the number of files in a given positive folder
#
# @param type Type of dataset
# @param direction Direction featured in the dataset
# @param orientation Orientation featured in the dataset
# @return numeric Number of files in a given positive folder
def getDetailedPositiveRepartition(self, type, direction, orientation=""):
return self.utils.getFileNumberInFolder(self.settings.getPositiveFolder()+type+"/"+direction+"/"+orientation+"/")
# Returns the number of files in a given negative folder
#
# @param type Type of dataset
# @param direction Direction featured in the dataset
# @param orientation Orientation featured in the dataset
# @return numeric Number of files in a given negative folder
def getDetailedNegativeRepartition(self, type, direction, orientation=""):
return self.utils.getFileNumberInFolder(self.settings.getNegativeFolder()+type+"/"+direction+"/"+orientation+"/")
# Display the general repartition
#
# @param None
# @return None
def showRepartition(self):
positive,negative = self.getRepartition()
print "\n\nPositive repartition\n"
positive = self.showPositiveRepartition(positive)
print "\n\nNegative repartition\n"
negative = self.showNegativeRepartition(negative)
print "\n\nTotal repartition\n"
self.showTotalRepartition(positive, negative)
# Display and returns the positive repartition
#
# @param positive Array of all positive file repartition
# @return dict Informations about the repartition of the positive dataset
def showPositiveRepartition(self, positive):
totalPositive = 0
totalTraining = 0
totalTesting = 0
totalValidating = 0
for direction in self.direction:
training = 0
testing = 0
validating = 0
for orientation in self.orientation:
if len(direction+orientation)<10:
shift = "\t"
else:
shift = ""
print("--- {0} {1}{2}\tTraining: {3} \t\tTesting: {4} \t\tValidating: {5}".format(direction, orientation, shift, positive["training"][direction][orientation], positive["testing"][direction][orientation], positive["validating"][direction][orientation]))
training += positive["training"][direction][orientation]
testing += positive["testing"][direction][orientation]
validating += positive["validating"][direction][orientation]
tmp = training+testing+validating
totalTraining += training
totalTesting += testing
totalValidating += validating
print("--- {0}\t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)\n---".format(direction,
training,
(training/float(tmp))*100,
testing,
(testing/float(tmp))*100,
validating,
(validating/float(tmp))*100))
totalPositive = totalTraining+totalTesting+totalValidating
print("--- Total: {0} \t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)".format(totalPositive,
totalTraining,
(totalTraining/float(totalPositive))*100,
totalTesting,
(totalTesting/float(totalPositive))*100,
totalValidating,
(totalValidating/float(totalPositive))*100))
return {"total":totalPositive,"totalTraining":totalTraining,"totalTesting":totalTesting,"totalValidating":totalValidating}
# Display and returns the negative repartition
#
# @param negative Array of all negative file repartition
# @return dict Informations about the repartition of the negative dataset
def showNegativeRepartition(self, negative):
totalNegative = 0
totalTraining = 0
totalTesting = 0
totalValidating = 0
for direction in self.direction:
training = 0
testing = 0
validating = 0
for negativeType in self.negativeType:
if len(direction+negativeType)<11:
shift = "\t"
else:
shift = ""
print("--- {0} {1}{2}\tTraining: {3} \t\tTesting: {4} \t\tValidating: {5}".format(direction, negativeType, shift, negative["training"][direction][negativeType], negative["testing"][direction][negativeType], negative["validating"][direction][negativeType]))
training += negative["training"][direction][negativeType]
testing += negative["testing"][direction][negativeType]
validating += negative["validating"][direction][negativeType]
tmp = training+testing+validating
totalTraining += training
totalTesting += testing
totalValidating += validating
print("--- {0}\t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)\n---".format(direction,
training,
(training/float(tmp))*100,
testing,
(testing/float(tmp))*100,
validating,
(validating/float(tmp))*100))
totalNegative = totalTraining+totalTesting+totalValidating
print("--- Total: {0} \t\tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)".format(totalNegative,
totalTraining,
(totalTraining/float(totalNegative))*100,
totalTesting,
(totalTesting/float(totalNegative))*100,
totalValidating,
(totalValidating/float(totalNegative))*100))
return {"total":totalNegative,"totalTraining":totalTraining,"totalTesting":totalTesting,"totalValidating":totalValidating}
# Display the general repartition
#
# @param positive Array of all positive file repartition informations
# @param negative Array of all negative file repartition informations
# @return None
def showTotalRepartition(self, positive, negative):
total = positive["total"]+negative["total"]
totalTraining = positive["totalTraining"]+negative["totalTraining"]
totalTesting = positive["totalTesting"]+negative["totalTesting"]
totalValidating = positive["totalValidating"]+negative["totalValidating"]
print("--- Positive:\t{0} \tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)".format(positive["total"],
positive["totalTraining"],
(positive["totalTraining"]/float(positive["total"]))*100,
positive["totalTesting"],
(positive["totalTesting"]/float(positive["total"]))*100,
positive["totalValidating"],
(positive["totalValidating"]/float(positive["total"]))*100))
print("--- Negative:\t{0} \tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)".format(negative["total"],
negative["totalTraining"],
(negative["totalTraining"]/float(negative["total"]))*100,
negative["totalTesting"],
(negative["totalTesting"]/float(negative["total"]))*100,
negative["totalValidating"],
(negative["totalValidating"]/float(negative["total"]))*100))
print("--- Total:\t{0} \tTraining: {1} ({2:0.0f}%) \tTesting: {3} ({4:0.0f}%) \tValidating: {5} ({6:0.0f}%)".format(total,
totalTraining,
(totalTraining/float(total))*100,
totalTesting,
(totalTesting/float(total))*100,
totalValidating,
(totalValidating/float(total))*100))
if __name__ == "__main__":
app = Relevancy()
app.showRepartition() | gpl-2.0 |
jmalacho/ansible-examples | filter_plugins/etc_hosts_filters.py | 1 | 1086 | # Jinja2 filters for use in building dictionary with /etc/hosts to template
# Initialize a Dictionary keys with default value (usefull for preseting the value to an array)
def initDict( in_default, keyarray ):
r={}
for k in keyarray:
r[k]=in_default
return r
# Appends the hostname with subdomain first, then its shortname
def mergeInventory( in_dict, domain, groups, hostvars ):
for host in groups[group]:
in_dict[ host ].append( hostvars[host]["hostname"] + "." + domain )
in_dict[ host ].append( hostvars[host]["hostname"] )
return in_dict
# Generally Merges a Dictionary of array by appending to each array
# Used to merge in the dictionary of host-aliases
def mergeDictOfArrays( in_dict, update ):
for k, v in update.iteritems():
in_dict.setdefault(k, [] )
in_dict[k]+=v
return in_dict
# Boilerplate code to add filter to Jinja2
class FilterModule(object):
def filters(self):
return { 'initDict': initDict,
'mergeInventory': mergeInventory,
'mergeDictOfArrays': mergeDictOfArrays,
}
| apache-2.0 |
atareao/calendar-indicator | src/rfc3339.py | 1 | 17200 | #rfc3339.py -- Implementation of the majority of RFC 3339 for python.
# Copyright (c) 2008, 2009, 2010 LShift Ltd. <query@lshift.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Implementation of the majority of http://www.ietf.org/rfc/rfc3339.txt.
Use datetime.datetime.isoformat() as an inverse of the various parsing
routines in this module.
Limitations, with respect to RFC 3339:
- Section 4.3, "Unknown Local Offset Convention", is not implemented.
- Section 5.6, "Internet Date/Time Format", is the ONLY supported format
implemented by the various parsers in this module. (Section 5.6 is
reproduced in its entirety below.)
- Section 5.7, "Restrictions", is left to the datetime.datetime constructor
to implement, with the exception of limits on timezone
minutes-east-of-UTC magnitude. In particular, leap seconds are not
addressed by this module. (And it appears that they are not supported
by datetime, either.)
Potential Improvements:
- Support for leap seconds. (There's a table of them in RFC 3339 itself,
and http://tf.nist.gov/pubs/bulletin/leapsecond.htm updates monthly.)
Extensions beyond the RFC:
- Accepts (but will not generate) dates formatted with a time-offset
missing a colon. (Implemented because Facebook are generating
broken RFC 3339 timestamps.)
Here's an excerpt from RFC 3339 itself:
5.6. Internet Date/Time Format
The following profile of ISO 8601 [ISO8601] dates SHOULD be used in
new protocols on the Internet. This is specified using the syntax
description notation defined in [ABNF].
date-fullyear = 4DIGIT
date-month = 2DIGIT ; 01-12
date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on
; month/year
time-hour = 2DIGIT ; 00-23
time-minute = 2DIGIT ; 00-59
time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second
; rules
time-secfrac = "." 1*DIGIT
time-numoffset = ("+" / "-") time-hour ":" time-minute
time-offset = "Z" / time-numoffset
partial-time = time-hour ":" time-minute ":" time-second
[time-secfrac]
full-date = date-fullyear "-" date-month "-" date-mday
full-time = partial-time time-offset
date-time = full-date "T" full-time
NOTE: Per [ABNF] and ISO8601, the "T" and "Z" characters in this
syntax may alternatively be lower case "t" or "z" respectively.
This date/time format may be used in some environments or contexts
that distinguish between the upper- and lower-case letters 'A'-'Z'
and 'a'-'z' (e.g. XML). Specifications that use this format in
such environments MAY further limit the date/time syntax so that
the letters 'T' and 'Z' used in the date/time syntax must always
be upper case. Applications that generate this format SHOULD use
upper case letters.
NOTE: ISO 8601 defines date and time separated by "T".
Applications using this syntax may choose, for the sake of
readability, to specify a full-date and full-time separated by
(say) a space character.
"""
import datetime, time, calendar
import re
__all__ = [
'tzinfo',
'UTC_TZ',
'parse_date',
'parse_datetime',
'parse_time',
'now',
'utcfromtimestamp',
'utctotimestamp',
'datetimetostr',
'timestamptostr',
'strtotimestamp',
'timetostr',
]
ZERO = datetime.timedelta(0)
class tzinfo(datetime.tzinfo):
"""
Implementation of a fixed-offset tzinfo.
"""
def __init__(self, minutesEast = 0, name = 'Z'):
"""
minutesEast -> number of minutes east of UTC that this tzinfo represents.
name -> symbolic (but uninterpreted) name of this tzinfo.
"""
self.minutesEast = minutesEast
self.offset = datetime.timedelta(minutes = minutesEast)
self.name = name
def utcoffset(self, dt):
"""Returns minutesEast from the constructor, as a datetime.timedelta."""
return self.offset
def dst(self, dt):
"""This is a fixed offset tzinfo, so always returns a zero timedelta."""
return ZERO
def tzname(self, dt):
"""Returns the name from the constructor."""
return self.name
def __repr__(self):
"""If minutesEast==0, prints specially as rfc3339.UTC_TZ."""
if self.minutesEast == 0:
return "rfc3339.UTC_TZ"
else:
return "rfc3339.tzinfo(%s,%s)" % (self.minutesEast, repr(self.name))
UTC_TZ = tzinfo(0, 'Z')
date_re_str = r'(\d\d\d\d)-(\d\d)-(\d\d)'
time_re_str = r'(\d\d):(\d\d):(\d\d)(\.(\d+))?([zZ]|(([-+])(\d\d):?(\d\d)))'
def make_re(*parts):
return re.compile(r'^\s*' + ''.join(parts) + r'\s*$')
date_re = make_re(date_re_str)
datetime_re = make_re(date_re_str, r'[ tT]', time_re_str)
time_re = make_re(time_re_str)
def parse_date(s):
"""
Given a string matching the 'full-date' production above, returns
a datetime.date instance. Any deviation from the allowed format
will produce a raised ValueError.
>>> parse_date("2008-08-24")
datetime.date(2008, 8, 24)
>>> parse_date(" 2008-08-24 ")
datetime.date(2008, 8, 24)
>>> parse_date("2008-08-00")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: day is out of range for month
>>> parse_date("2008-06-31")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: day is out of range for month
>>> parse_date("2008-13-01")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 134, in parse_date
return datetime.date(int(y), int(m), int(d))
ValueError: month must be in 1..12
>>> parse_date("22008-01-01")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 136, in parse_date
raise ValueError('Invalid RFC 3339 date string', s)
ValueError: ('Invalid RFC 3339 date string', '22008-01-01')
>>> parse_date("2008-08-24").isoformat()
'2008-08-24'
"""
m = date_re.match(s)
if m:
(y, m, d) = m.groups()
return datetime.date(int(y), int(m), int(d))
else:
raise ValueError('Invalid RFC 3339 date string', s)
def _offset_to_tzname(offset):
"""
Converts an offset in minutes to an RFC 3339 "time-offset" string.
>>> _offset_to_tzname(0)
'+00:00'
>>> _offset_to_tzname(-1)
'-00:01'
>>> _offset_to_tzname(-60)
'-01:00'
>>> _offset_to_tzname(-779)
'-12:59'
>>> _offset_to_tzname(1)
'+00:01'
>>> _offset_to_tzname(60)
'+01:00'
>>> _offset_to_tzname(779)
'+12:59'
"""
offset = int(offset)
if offset < 0:
tzsign = '-'
else:
tzsign = '+'
offset = abs(offset)
tzhour = offset / 60
tzmin = offset % 60
return '%s%02d:%02d' % (tzsign, tzhour, tzmin)
def _parse_time_components(s, hour, min, sec, frac_sec, wholetz, tzsign, tzhour, tzmin):
if frac_sec:
frac_sec = float('0.' + frac_sec)
else:
frac_sec = 0
microsec = int((frac_sec * 1000000) + 0.5)
if wholetz == 'z' or wholetz == 'Z':
tz = UTC_TZ
else:
tzhour = int(tzhour)
tzmin = int(tzmin)
offset = tzhour * 60 + tzmin
if offset == 0:
tz = UTC_TZ
else:
if tzhour > 24 or tzmin > 60 or offset > 1439: ## see tzinfo docs for the 1439 part
raise ValueError('Invalid timezone offset', s, wholetz)
if tzsign == '-':
offset = -offset
tz = tzinfo(offset, _offset_to_tzname(offset))
return int(hour), int(min), int(sec), microsec, tz
def parse_time(s):
"""
Given a string matching the 'full-time' production above, returns
a datetime.time instance. Any deviation from the allowed
format will produce a raised ValueError.
>>> parse_time("00:00:00Z")
datetime.time(0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_time(" 00:00:00Z ")
datetime.time(0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_time("00:00:00")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 302, in parse_time
raise ValueError('Invalid RFC 3339 time string', s)
ValueError: ('Invalid RFC 3339 time string', '00:00:00')
>>> parse_time("00:00:00+00:00")
datetime.time(0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_time("00:00:00+01:00")
datetime.time(0, 0, tzinfo=rfc3339.tzinfo(60,'+01:00'))
>>> parse_time("00:00:00-01:00")
datetime.time(0, 0, tzinfo=rfc3339.tzinfo(-60,'-01:00'))
>>> parse_time("00:00:00-01:23")
datetime.time(0, 0, tzinfo=rfc3339.tzinfo(-83,'-01:23'))
>>> parse_time("24:00:00Z")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 206, in parse_datetime
tz)
ValueError: hour must be in 0..23
"""
m = time_re.match(s)
if m:
(hour, min, sec, ignore1, frac_sec, wholetz, ignore2,
tzsign, tzhour, tzmin) = m.groups()
hour, min, sec, microsec, tz = _parse_time_components(
s, hour, min, sec, frac_sec, wholetz, tzsign, tzhour, tzmin)
return datetime.time(hour, min, sec, microsec, tz)
else:
raise ValueError('Invalid RFC 3339 time string', s)
def parse_datetime(s):
"""
Given a string matching the 'date-time' production above, returns
a datetime.datetime instance. Any deviation from the allowed
format will produce a raised ValueError.
>>> parse_datetime("2008-08-24T00:00:00Z")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime(" 2008-08-24T00:00:00Z ")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime("2008-08-24T00:00:00")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 208, in parse_datetime
raise ValueError('Invalid RFC 3339 datetime string', s)
ValueError: ('Invalid RFC 3339 datetime string', '2008-08-24T00:00:00')
>>> parse_datetime("2008-08-24T00:00:00+00:00")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime("2008-08-24T00:00:00+01:00")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(60,'+01:00'))
>>> parse_datetime("2008-08-24T00:00:00-01:00")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(-60,'-01:00'))
>>> parse_datetime("2008-08-24T00:00:00-01:23")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(-83,'-01:23'))
>>> parse_datetime("2008-08-24T24:00:00Z")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "rfc3339.py", line 206, in parse_datetime
tz)
ValueError: hour must be in 0..23
>>> midnightUTC = parse_datetime("2008-08-24T00:00:00Z")
>>> oneamBST = parse_datetime("2008-08-24T01:00:00+01:00")
>>> midnightUTC == oneamBST
True
>>> elevenpmUTC = parse_datetime("2008-08-23T23:00:00Z")
>>> midnightBST = parse_datetime("2008-08-24T00:00:00+01:00")
>>> midnightBST == elevenpmUTC
True
>>> elevenpmUTC.isoformat()
'2008-08-23T23:00:00+00:00'
>>> oneamBST.isoformat()
'2008-08-24T01:00:00+01:00'
>>> parse_datetime("2008-08-24T00:00:00.123Z").isoformat()
'2008-08-24T00:00:00.123000+00:00'
Facebook generates incorrectly-formatted RFC 3339 timestamps, with
the time-offset missing the colon:
>>> parse_datetime("2008-08-24T00:00:00+0000")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime("2008-08-24T00:00:00+0100")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(60,'+01:00'))
>>> parse_datetime("2008-08-24T00:00:00-0100")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(-60,'-01:00'))
>>> parse_datetime("2008-08-24T00:00:00-0123")
datetime.datetime(2008, 8, 24, 0, 0, tzinfo=rfc3339.tzinfo(-83,'-01:23'))
While we accept such broken time-offsets, we don't generate them:
>>> parse_datetime("2008-08-24T00:00:00+0100").isoformat()
'2008-08-24T00:00:00+01:00'
Seconds don't have to be integers:
>>> parse_datetime("2008-08-24T00:00:11.25Z")
datetime.datetime(2008, 8, 24, 0, 0, 11, 250000, tzinfo=rfc3339.UTC_TZ)
>>> parse_datetime("2008-08-24T00:00:11.25-0123")
datetime.datetime(2008, 8, 24, 0, 0, 11, 250000, tzinfo=rfc3339.tzinfo(-83,'-01:23'))
>>> parse_datetime("2008-08-24T00:00:11.25+0123")
datetime.datetime(2008, 8, 24, 0, 0, 11, 250000, tzinfo=rfc3339.tzinfo(83,'+01:23'))
Rendering non-integer seconds produces an acceptable, if
non-minimal result:
>>> parse_datetime("2008-08-24T00:00:11.25Z").isoformat()
'2008-08-24T00:00:11.250000+00:00'
"""
m = datetime_re.match(s)
if m:
(y, m, d, hour, min, sec, ignore1, frac_sec, wholetz, ignore2,
tzsign, tzhour, tzmin) = m.groups()
hour, min, sec, microsec, tz = _parse_time_components(
s, hour, min, sec, frac_sec, wholetz, tzsign, tzhour, tzmin)
return datetime.datetime(
int(y), int(m), int(d), hour, min, sec, microsec, tz)
else:
raise ValueError('Invalid RFC 3339 datetime string', s)
def now():
"""Return a timezone-aware datetime.datetime object in
rfc3339.UTC_TZ timezone, representing the current moment
(time.time()). Useful as a replacement for the (timezone-unaware)
datetime.datetime.now() method."""
return utcfromtimestamp(time.time())
def utcfromtimestamp(unix_epoch_timestamp):
"""Interprets its argument as a count of seconds elapsed since the
Unix epoch, and returns a datetime.datetime in rfc3339.UTC_TZ
timezone."""
(y, m, d, hour, min, sec) = time.gmtime(unix_epoch_timestamp)[:6]
return datetime.datetime(y, m, d, hour, min, sec, 0, UTC_TZ)
def utctotimestamp(dt):
"""Returns a count of the elapsed seconds between the Unix epoch
and the passed-in datetime.datetime object."""
return calendar.timegm(dt.utctimetuple())
def timetostr(t):
"""
Return a RFC3339 time string corresponding to the given time object.
>>> timetostr(datetime.time(0, 0, tzinfo=UTC_TZ))
'00:00:00Z'
>>> timetostr(datetime.time(0, 0, tzinfo=tzinfo(60, '+01:00')))
'00:00:00+01:00'
>>> timetostr(datetime.time(0, 0, 11, 250000, tzinfo=tzinfo(-83, '-01:23')))
'00:00:11.250000-01:23'
"""
if t.utcoffset() is not None:
if t.utcoffset() != ZERO:
return t.isoformat()
else:
t = t.replace(tzinfo=None)
return "%sZ" % t.isoformat()
def datetimetostr(dt):
"""
Return a RFC3339 date-time string corresponding to the given
datetime object. Special-case both absent timezone and timezone
offset zero to use 'Z' instead of '+00:00'.
>>> datetimetostr(datetime.datetime(2008, 8, 24, 0, 0, tzinfo=UTC_TZ))
'2008-08-24T00:00:00Z'
>>> datetimetostr(datetime.datetime(2008, 8, 24, 0, 0))
'2008-08-24T00:00:00Z'
>>> datetimetostr(datetime.datetime(2008, 8, 24, 0, 0, tzinfo=tzinfo(60, '+01:00')))
'2008-08-24T00:00:00+01:00'
>>> datetimetostr(datetime.datetime(2008, 8, 24, 0, 0, 11, 250000, tzinfo=tzinfo(-83, '-01:23')))
'2008-08-24T00:00:11.250000-01:23'
"""
if dt.utcoffset() is None:
return "%sZ" % dt.isoformat()
if dt.utcoffset() == ZERO:
return "%sZ" % dt.replace(tzinfo=None).isoformat()
return dt.isoformat()
def timestamptostr(ts):
"""Return a RFC3339 date-time string corresponding to the given
Unix-epoch timestamp."""
return datetimetostr(utcfromtimestamp(ts))
def strtotimestamp(s):
"""Return the Unix-epoch timestamp corresponding to the given RFC3339
date-time string."""
return utctotimestamp(parse_datetime(s)) | gpl-3.0 |
zlfben/gem5 | src/arch/sparc/SparcISA.py | 71 | 2254 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.SimObject import SimObject
class SparcISA(SimObject):
type = 'SparcISA'
cxx_class = 'SparcISA::ISA'
cxx_header = "arch/sparc/isa.hh"
| bsd-3-clause |
aequitas/home-assistant | homeassistant/components/rpi_pfio/switch.py | 7 | 2354 | """Support for switches using the PiFace Digital I/O module on a RPi."""
import logging
import voluptuous as vol
from homeassistant.components import rpi_pfio
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import ATTR_NAME, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
ATTR_INVERT_LOGIC = 'invert_logic'
CONF_PORTS = 'ports'
DEFAULT_INVERT_LOGIC = False
PORT_SCHEMA = vol.Schema({
vol.Optional(ATTR_NAME): cv.string,
vol.Optional(ATTR_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_PORTS, default={}): vol.Schema({
cv.positive_int: PORT_SCHEMA,
})
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PiFace Digital Output devices."""
switches = []
ports = config.get(CONF_PORTS)
for port, port_entity in ports.items():
name = port_entity.get(ATTR_NAME)
invert_logic = port_entity[ATTR_INVERT_LOGIC]
switches.append(RPiPFIOSwitch(port, name, invert_logic))
add_entities(switches)
class RPiPFIOSwitch(ToggleEntity):
"""Representation of a PiFace Digital Output."""
def __init__(self, port, name, invert_logic):
"""Initialize the pin."""
self._port = port
self._name = name or DEVICE_DEFAULT_NAME
self._invert_logic = invert_logic
self._state = False
rpi_pfio.write_output(self._port, 1 if self._invert_logic else 0)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
rpi_pfio.write_output(self._port, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
rpi_pfio.write_output(self._port, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
| apache-2.0 |
nelsonsar/ansible | contrib/inventory/vbox.py | 55 | 3084 | #!/usr/bin/env python
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
from subprocess import Popen,PIPE
try:
import json
except ImportError:
import simplejson as json
VBOX="VBoxManage"
def get_hosts(host=None):
returned = {}
try:
if host:
p = Popen([VBOX, 'showvminfo', host], stdout=PIPE)
else:
returned = { 'all': set(), '_metadata': {} }
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
except:
sys.exit(1)
hostvars = {}
prevkey = pref_k = ''
for line in p.stdout.readlines():
try:
k,v = line.split(':',1)
except:
continue
if k == '':
continue
v = v.strip()
if k.startswith('Name'):
if v not in hostvars:
curname = v
hostvars[curname] = {}
try: # try to get network info
x = Popen([VBOX, 'guestproperty', 'get', curname,"/VirtualBox/GuestInfo/Net/0/V4/IP"],stdout=PIPE)
ipinfo = x.stdout.read()
if 'Value' in ipinfo:
a,ip = ipinfo.split(':',1)
hostvars[curname]['ansible_ssh_host'] = ip.strip()
except:
pass
continue
if not host:
if k == 'Groups':
for group in v.split('/'):
if group:
if group not in returned:
returned[group] = set()
returned[group].add(curname)
returned['all'].add(curname)
continue
pref_k = 'vbox_' + k.strip().replace(' ','_')
if k.startswith(' '):
if prevkey not in hostvars[curname]:
hostvars[curname][prevkey] = {}
hostvars[curname][prevkey][pref_k]= v
else:
if v != '':
hostvars[curname][pref_k] = v
prevkey = pref_k
if not host:
returned['_metadata']['hostvars'] = hostvars
else:
returned = hostvars[host]
return returned
if __name__ == '__main__':
inventory = {}
hostname = None
if len(sys.argv) > 1:
if sys.argv[1] == "--host":
hostname = sys.argv[2]
if hostname:
inventory = get_hosts(hostname)
else:
inventory = get_hosts()
import pprint
pprint.pprint(inventory)
| gpl-3.0 |
larsw/guessit | guessit/transfo/guess_episode_special.py | 11 | 2935 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import found_guess
from guessit.containers import PropertiesContainer
class GuessEpisodeSpecial(Transformer):
def __init__(self):
Transformer.__init__(self, -205)
self.container = PropertiesContainer()
self.container.register_property('special', 'Special', 'Bonus', 'Omake', 'Ova', 'Oav', 'Pilot', 'Unaired')
self.container.register_property('special', 'Extras?', canonical_form='Extras')
def guess_special(self, string, node=None, options=None):
properties = self.container.find_properties(string, node, 'special', multiple=True)
guesses = self.container.as_guess(properties, multiple=True)
return guesses
def second_pass_options(self, mtree, options=None):
if not mtree.guess.get('type', '').startswith('episode'):
for unidentified_leaf in mtree.unidentified_leaves():
properties = self.container.find_properties(unidentified_leaf.value, unidentified_leaf, 'special')
guess = self.container.as_guess(properties)
if guess:
return {'type': 'episode'}
return None
def supported_properties(self):
return self.container.get_supported_properties()
def process(self, mtree, options=None):
if mtree.guess.get('type', '').startswith('episode') and (not mtree.info.get('episodeNumber') or mtree.info.get('season') == 0):
for title_leaf in mtree.leaves_containing('title'):
guesses = self.guess_special(title_leaf.value, title_leaf, options)
for guess in guesses:
found_guess(title_leaf, guess, update_guess=False)
for unidentified_leaf in mtree.unidentified_leaves():
guesses = self.guess_special(unidentified_leaf.value, unidentified_leaf, options)
for guess in guesses:
found_guess(unidentified_leaf, guess, update_guess=False)
return None
| lgpl-3.0 |
flyher/pymo | symbian/PythonForS60/module-repo/standard-modules/email/charset.py | 93 | 15684 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec',
]
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
# Flags for types of header encodings
QP = 1 # Quoted-Printable
BASE64 = 2 # Base64
SHORTEST = 3 # the shorter of QP and base64, but only for headers
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
# Defaults
CHARSETS = {
# input header enc body enc output conv
'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'iso-8859-3': (QP, QP, None),
'iso-8859-4': (QP, QP, None),
# iso-8859-5 is Cyrillic, and not especially used
# iso-8859-6 is Arabic, also not particularly used
# iso-8859-7 is Greek, QP will not make it readable
# iso-8859-8 is Hebrew, QP will not make it readable
'iso-8859-9': (QP, QP, None),
'iso-8859-10': (QP, QP, None),
# iso-8859-11 is Thai, QP will not make it readable
'iso-8859-13': (QP, QP, None),
'iso-8859-14': (QP, QP, None),
'iso-8859-15': (QP, QP, None),
'windows-1252':(QP, QP, None),
'viscii': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
'koi8-r': (BASE64, BASE64, None),
'utf-8': (SHORTEST, BASE64, 'utf-8'),
# We're making this one up to represent raw unencoded 8-bit
'8bit': (None, BASE64, 'utf-8'),
}
# Aliases for other commonly-used names for character sets. Map
# them to the real ones used in email.
ALIASES = {
'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii',
}
# Map charsets to their Unicode codec strings.
CODEC_MAP = {
'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
# Let that stuff pass through without conversion to/from Unicode.
'us-ascii': None,
}
# Convenience functions for extending the above mappings
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
# unicode because its .lower() is locale insensitive. If the argument
# is already a unicode, we leave it at that, but ensure that the
# charset is ASCII, as the standard (RFC XXX) requires.
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower()
# Set the input charset after filtering through the aliases
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
# charset_map dictionary. Try that first, but let the user override
# it.
henc, benc, conv = CHARSETS.get(self.input_charset,
(SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
# Set the attributes, allowing the arguments to override the default.
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
# Now set the codecs. If one isn't defined for input_charset,
# guess and try a Unicode codec with the same name as input_codec.
self.input_codec = CODEC_MAP.get(self.input_charset,
self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset,
self.output_charset)
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
assert self.body_encoding <> SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec <> self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
# Input codec not installed on system, so return the original
# string unchanged.
return s
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
try:
return ustr.encode(codec, 'replace')
except LookupError:
# Output codec not installed
return ustr
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
# The len(s) of a 7bit encoding is len(s)
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
else:
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
elif self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (module conversions)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
elif self.body_encoding is QP:
return email.quoprimime.body_encode(s)
else:
return s
| mit |
ttm/oscEmRede | venv/lib/python2.7/site-packages/jinja2/testsuite/regression.py | 414 | 8382 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.regression
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests corner cases and bugs.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Template, Environment, DictLoader, TemplateSyntaxError, \
TemplateNotFound, PrefixLoader
from jinja2._compat import text_type
env = Environment()
class CornerTestCase(JinjaTestCase):
def test_assigned_scoping(self):
t = env.from_string('''
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{{- item -}}
''')
assert t.render(item=42) == '[1][2][3][4]42'
t = env.from_string('''
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{%- set item = 42 %}
{{- item -}}
''')
assert t.render() == '[1][2][3][4]42'
t = env.from_string('''
{%- set item = 42 %}
{%- for item in (1, 2, 3, 4) -%}
[{{ item }}]
{%- endfor %}
{{- item -}}
''')
assert t.render() == '[1][2][3][4]42'
def test_closure_scoping(self):
t = env.from_string('''
{%- set wrapper = "<FOO>" %}
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{{- wrapper -}}
''')
assert t.render() == '[1][2][3][4]<FOO>'
t = env.from_string('''
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{%- set wrapper = "<FOO>" %}
{{- wrapper -}}
''')
assert t.render() == '[1][2][3][4]<FOO>'
t = env.from_string('''
{%- for item in (1, 2, 3, 4) %}
{%- macro wrapper() %}[{{ item }}]{% endmacro %}
{{- wrapper() }}
{%- endfor %}
{{- wrapper -}}
''')
assert t.render(wrapper=23) == '[1][2][3][4]23'
class BugTestCase(JinjaTestCase):
def test_keyword_folding(self):
env = Environment()
env.filters['testing'] = lambda value, some: value + some
assert env.from_string("{{ 'test'|testing(some='stuff') }}") \
.render() == 'teststuff'
def test_extends_output_bugs(self):
env = Environment(loader=DictLoader({
'parent.html': '(({% block title %}{% endblock %}))'
}))
t = env.from_string('{% if expr %}{% extends "parent.html" %}{% endif %}'
'[[{% block title %}title{% endblock %}]]'
'{% for item in [1, 2, 3] %}({{ item }}){% endfor %}')
assert t.render(expr=False) == '[[title]](1)(2)(3)'
assert t.render(expr=True) == '((title))'
def test_urlize_filter_escaping(self):
tmpl = env.from_string('{{ "http://www.example.org/<foo"|urlize }}')
assert tmpl.render() == '<a href="http://www.example.org/<foo">http://www.example.org/<foo</a>'
def test_loop_call_loop(self):
tmpl = env.from_string('''
{% macro test() %}
{{ caller() }}
{% endmacro %}
{% for num1 in range(5) %}
{% call test() %}
{% for num2 in range(10) %}
{{ loop.index }}
{% endfor %}
{% endcall %}
{% endfor %}
''')
assert tmpl.render().split() == [text_type(x) for x in range(1, 11)] * 5
def test_weird_inline_comment(self):
env = Environment(line_statement_prefix='%')
self.assert_raises(TemplateSyntaxError, env.from_string,
'% for item in seq {# missing #}\n...% endfor')
def test_old_macro_loop_scoping_bug(self):
tmpl = env.from_string('{% for i in (1, 2) %}{{ i }}{% endfor %}'
'{% macro i() %}3{% endmacro %}{{ i() }}')
assert tmpl.render() == '123'
def test_partial_conditional_assignments(self):
tmpl = env.from_string('{% if b %}{% set a = 42 %}{% endif %}{{ a }}')
assert tmpl.render(a=23) == '23'
assert tmpl.render(b=True) == '42'
def test_stacked_locals_scoping_bug(self):
env = Environment(line_statement_prefix='#')
t = env.from_string('''\
# for j in [1, 2]:
# set x = 1
# for i in [1, 2]:
# print x
# if i % 2 == 0:
# set x = x + 1
# endif
# endfor
# endfor
# if a
# print 'A'
# elif b
# print 'B'
# elif c == d
# print 'C'
# else
# print 'D'
# endif
''')
assert t.render(a=0, b=False, c=42, d=42.0) == '1111C'
def test_stacked_locals_scoping_bug_twoframe(self):
t = Template('''
{% set x = 1 %}
{% for item in foo %}
{% if item == 1 %}
{% set x = 2 %}
{% endif %}
{% endfor %}
{{ x }}
''')
rv = t.render(foo=[1]).strip()
assert rv == u'1'
def test_call_with_args(self):
t = Template("""{% macro dump_users(users) -%}
<ul>
{%- for user in users -%}
<li><p>{{ user.username|e }}</p>{{ caller(user) }}</li>
{%- endfor -%}
</ul>
{%- endmacro -%}
{% call(user) dump_users(list_of_user) -%}
<dl>
<dl>Realname</dl>
<dd>{{ user.realname|e }}</dd>
<dl>Description</dl>
<dd>{{ user.description }}</dd>
</dl>
{% endcall %}""")
assert [x.strip() for x in t.render(list_of_user=[{
'username':'apo',
'realname':'something else',
'description':'test'
}]).splitlines()] == [
u'<ul><li><p>apo</p><dl>',
u'<dl>Realname</dl>',
u'<dd>something else</dd>',
u'<dl>Description</dl>',
u'<dd>test</dd>',
u'</dl>',
u'</li></ul>'
]
def test_empty_if_condition_fails(self):
self.assert_raises(TemplateSyntaxError, Template, '{% if %}....{% endif %}')
self.assert_raises(TemplateSyntaxError, Template, '{% if foo %}...{% elif %}...{% endif %}')
self.assert_raises(TemplateSyntaxError, Template, '{% for x in %}..{% endfor %}')
def test_recursive_loop_bug(self):
tpl1 = Template("""
{% for p in foo recursive%}
{{p.bar}}
{% for f in p.fields recursive%}
{{f.baz}}
{{p.bar}}
{% if f.rec %}
{{ loop(f.sub) }}
{% endif %}
{% endfor %}
{% endfor %}
""")
tpl2 = Template("""
{% for p in foo%}
{{p.bar}}
{% for f in p.fields recursive%}
{{f.baz}}
{{p.bar}}
{% if f.rec %}
{{ loop(f.sub) }}
{% endif %}
{% endfor %}
{% endfor %}
""")
def test_else_loop_bug(self):
t = Template('''
{% for x in y %}
{{ loop.index0 }}
{% else %}
{% for i in range(3) %}{{ i }}{% endfor %}
{% endfor %}
''')
self.assertEqual(t.render(y=[]).strip(), '012')
def test_correct_prefix_loader_name(self):
env = Environment(loader=PrefixLoader({
'foo': DictLoader({})
}))
try:
env.get_template('foo/bar.html')
except TemplateNotFound as e:
assert e.name == 'foo/bar.html'
else:
assert False, 'expected error here'
def test_contextfunction_callable_classes(self):
from jinja2.utils import contextfunction
class CallableClass(object):
@contextfunction
def __call__(self, ctx):
return ctx.resolve('hello')
tpl = Template("""{{ callableclass() }}""")
output = tpl.render(callableclass = CallableClass(), hello = 'TEST')
expected = 'TEST'
self.assert_equal(output, expected)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CornerTestCase))
suite.addTest(unittest.makeSuite(BugTestCase))
return suite
| gpl-3.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.2/django/contrib/gis/tests/relatedapp/tests.py | 123 | 13948 | from django.test import TestCase
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite
from models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# the same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.failUnless(isinstance(d['point'], Geometry))
self.failUnless(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.failUnless('Aurora' in names)
self.failUnless('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
# TODO: Related tests for KML, GML, and distance lookups.
| mit |
lukovnikov/qelos | test/test_word.py | 1 | 17269 | from __future__ import print_function
from unittest import TestCase
import qelos as q
from torch.autograd import Variable
import torch
from torch import nn
import numpy as np
class TestWordEmb(TestCase):
def test_creation_simple(self):
dic = dict(zip(map(chr, range(97, 122)), range(122-97)))
m = q.WordEmb(10, worddic=dic)
embedding, _ = m(Variable(torch.LongTensor([0,1,2])))
self.assertEqual(embedding.size(), (3, 10))
trueemb = m.embedding.weight.data.cpu().numpy()[0]
self.assertTrue(np.allclose(trueemb, embedding[0].data.numpy()))
def test_creation_masked(self):
dic = dict(zip(map(chr, range(97, 122)), range(1, 122-97+1)))
dic[q.WordEmb.masktoken] = 0
m = q.WordEmb(10, worddic=dic)
embedding, mask = m(Variable(torch.LongTensor([0, 1, 2])))
self.assertEqual(embedding.size(), (3, 10))
trueemb = m.embedding.weight.data.cpu().numpy()[1]
self.assertTrue(np.allclose(trueemb, embedding[1].data.numpy()))
self.assertTrue(np.allclose(embedding[0].data.numpy(), np.zeros((10,))))
print(mask)
self.assertTrue(np.allclose(mask.data.numpy(), [0,1,1]))
class TestAdaptedWordEmb(TestCase):
def setUp(self):
wdic = {"<MASK>": 0, "<RARE>": 1, "the": 10, "a": 5, "his": 50, "abracadabrqmsd--qsdfmqgf-": 6}
wdic2 = {"<MASK>": 0, "<RARE>": 1, "the": 2, "a": 3, "his": 4, "abracadabrqmsd--qsdfmqgf-": 5, "qsdfqsdf": 7}
self.adapted = q.WordEmb(50, worddic=wdic)
self.vanilla = q.WordEmb(50, worddic=wdic, value=self.adapted.embedding.weight.data.numpy())
self.adapted = self.adapted.adapt(wdic2)
def test_map(self):
self.assertEqual(self.adapted * "a", 3)
self.assertEqual(self.adapted * "the", 2)
self.assertEqual(self.adapted * "his", 4)
self.assertEqual(self.adapted * "her", 1)
self.assertEqual(self.vanilla * "a", 5)
self.assertEqual(self.vanilla * "the", 10)
self.assertEqual(self.vanilla * "her", 1)
self.assertEqual(self.adapted * "qsdfqlmkdsjfmqlsdkjgmqlsjdf", 1)
print(self.vanilla * "the", self.adapted * "the")
self.assertTrue(np.allclose(self.vanilla % "the", self.adapted % "the"))
self.assertTrue(np.allclose(self.vanilla % "his", self.adapted % "his"))
def test_adapted_block(self):
pred, mask = self.adapted(Variable(torch.LongTensor([self.adapted * x for x in "the a his".split()])))
l = pred.sum()
l.backward()
grad = self.adapted.inner.embedding.weight.grad
self.assertTrue(grad.norm().data.numpy()[0] > 0)
vpred = np.asarray([self.vanilla % x for x in "the a his".split()])
self.assertTrue(np.allclose(pred.data.numpy(), vpred))
oovpred, mask = self.adapted(Variable(torch.LongTensor([6, 7]))) # two different kinds of OOV
print(self.adapted % 6)
print(self.vanilla % self.vanilla.raretoken)
# TODO self.assertTrue(np.allclose(oovpred.datasets.numpy(), np.zeros_like(oovpred.datasets.numpy())))
def test_adapted_prediction_shape(self):
xval = np.random.randint(0, 3, (5, 4))
x = Variable(torch.from_numpy(xval))
pred, mask = self.adapted(x)
self.assertEqual(pred.size(), (5, 4, 50))
self.assertEqual(mask.size(), (5, 4))
self.assertTrue(np.allclose(mask.data.numpy(), xval != 0))
class TestWordEmbOverriding(TestCase):
def setUp(self):
words = "<MASK> <RARE> the a his monkey inception key earlgrey"
wdic = dict(zip(words.split(), range(0, len(words.split()))))
overwords = "he his her mine cat monkey the interstellar grey key"
overwdic = dict(zip(overwords.split(), range(0, len(overwords.split()))))
self.baseemb = q.WordEmb(dim=50, worddic=wdic)
self.overemb = q.WordEmb(dim=50, worddic=overwdic)
self.emb = self.baseemb.override(self.overemb)
pass
def test_embed_masker(self):
v = Variable(torch.from_numpy(np.random.randint(0, 5, (4, 3))))
m, mask = self.emb(v)
self.assertTrue(np.all((v.data.numpy() != 0) == mask.data.numpy()))
def test_sameasover(self):
words = "the his monkey key"
pred, msk = self.emb(q.var(torch.LongTensor([self.emb * x for x in words.split()])).v)
pred = pred.data.numpy()
gpred, _ = self.overemb(q.var(torch.LongTensor([self.overemb * x for x in words.split()])).v)
gpred = gpred.data.numpy()
self.assertTrue(np.allclose(pred, gpred))
def test_sameasbase(self):
words = "inception earlgrey <MASK>"
pred, mask = self.emb(q.var(torch.LongTensor([self.emb * x for x in words.split()])).v)
pred = pred.data.numpy()
gpred, msk = self.baseemb(q.var(torch.LongTensor([self.baseemb * x for x in words.split()])).v)
gpred = gpred.data.numpy()
self.assertTrue(np.allclose(pred, gpred))
def test_notasover(self):
words = "inception earlgrey"
pred, mask = self.emb(q.var(torch.LongTensor([self.emb * x for x in words.split()])).v)
pred = pred.data.numpy()
gpred, _ = self.overemb(q.var(torch.LongTensor([self.baseemb * x for x in words.split()])).v)
gpred = gpred.data.numpy()
self.assertFalse(np.allclose(pred, gpred))
def test_notasbase(self):
words = "the his monkey key"
pred, mask = self.emb(q.var(torch.LongTensor([self.emb * x for x in words.split()])).v)
pred = pred.data.numpy()
gpred, msk = self.baseemb(q.var(torch.LongTensor([self.baseemb * x for x in words.split()])).v)
gpred = gpred.data.numpy()
self.assertFalse(np.allclose(pred, gpred))
class TestGlove(TestCase):
def setUp(self):
q.PretrainedWordEmb.defaultpath = "../data/glove/miniglove.%dd"
self.glove = q.PretrainedWordEmb(50)
print(self.glove.defaultpath)
def test_loaded(self):
thevector = self.glove % "the"
truevector = np.asarray([ 4.18000013e-01, 2.49679998e-01, -4.12420005e-01,
1.21699996e-01, 3.45270008e-01, -4.44569997e-02,
-4.96879995e-01, -1.78619996e-01, -6.60229998e-04,
-6.56599998e-01, 2.78430015e-01, -1.47670001e-01,
-5.56770027e-01, 1.46579996e-01, -9.50950012e-03,
1.16579998e-02, 1.02040000e-01, -1.27920002e-01,
-8.44299972e-01, -1.21809997e-01, -1.68009996e-02,
-3.32789987e-01, -1.55200005e-01, -2.31309995e-01,
-1.91809997e-01, -1.88230002e+00, -7.67459989e-01,
9.90509987e-02, -4.21249986e-01, -1.95260003e-01,
4.00710011e+00, -1.85939997e-01, -5.22870004e-01,
-3.16810012e-01, 5.92130003e-04, 7.44489999e-03,
1.77780002e-01, -1.58969998e-01, 1.20409997e-02,
-5.42230010e-02, -2.98709989e-01, -1.57490000e-01,
-3.47579986e-01, -4.56370004e-02, -4.42510009e-01,
1.87849998e-01, 2.78489990e-03, -1.84110001e-01,
-1.15139998e-01, -7.85809994e-01])
self.assertEqual(self.glove * "the", 2)
self.assertTrue(np.allclose(thevector, truevector))
self.assertEqual(self.glove.embedding.weight.size(), (4002, 50))
class TestComputedWordEmb(TestCase):
def setUp(self):
data = np.random.random((7, 10)).astype("float32")
computer = nn.Linear(10, 15)
worddic = "<MASK> <RARE> first second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.emb = q.ComputedWordEmb(data=data, computer=computer, worddic=worddic)
def test_shape(self):
x = Variable(torch.LongTensor([0, 1, 2]))
emb, msk = self.emb(x)
print(msk)
self.assertEqual(emb.size(), (3, 15))
self.assertTrue(np.allclose(msk.data.numpy(), [[0,1,1]]))
class TestMergedWordEmb(TestCase):
def setUp(self):
worddic = "<MASK> <RARE> first second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.emb1 = q.WordEmb(100, worddic=worddic)
self.emb2 = q.WordEmb(100, worddic=worddic)
def test_sum_merge(self):
emb = self.emb1.merge(self.emb2, mode="sum")
x = Variable(torch.LongTensor([0, 1, 2]))
emb1res, msk1 = self.emb1(x)
print(msk1)
emb2res, msk2 = self.emb2(x)
embres, msk = emb(x)
self.assertTrue(np.allclose(embres.data.numpy(), emb1res.data.numpy() + emb2res.data.numpy()))
def test_cat_merge(self):
emb = self.emb1.merge(self.emb2, mode="cat")
x = Variable(torch.LongTensor([0, 1, 2]))
emb1res, msk1 = self.emb1(x)
print(msk1)
emb2res, msk2 = self.emb2(x)
embres, msk = emb(x)
self.assertTrue(np.allclose(embres.data.numpy(), np.concatenate([emb1res.data.numpy(), emb2res.data.numpy()], axis=1)))
# TODO: test WordLinouts
class TestWordLinout(TestCase):
def setUp(self):
worddic = "<MASK> <RARE> first second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.linout = q.WordLinout(10, worddic=worddic)
def test_shape(self):
x = Variable(torch.randn(7, 10))
msk = Variable(torch.FloatTensor([[1,0,1,1,0,1,0]]*5 + [[0,1,0,0,1,0,1]]*2))
y = self.linout(x, mask=msk)
print(y)
self.assertEqual(y.size(), (7, 7))
# self.assertTrue(False)
class TestPretrainedWordLinout(TestCase):
def setUp(self):
q.PretrainedWordLinout.defaultpath = "../data/glove/miniglove.%dd"
self.glove = q.PretrainedWordLinout(50)
print(self.glove.defaultpath)
def test_loaded(self):
thevector = self.glove % "the"
truevector = np.asarray([
4.18000013e-01, 2.49679998e-01, -4.12420005e-01,
1.21699996e-01, 3.45270008e-01, -4.44569997e-02,
-4.96879995e-01, -1.78619996e-01, -6.60229998e-04,
-6.56599998e-01, 2.78430015e-01, -1.47670001e-01,
-5.56770027e-01, 1.46579996e-01, -9.50950012e-03,
1.16579998e-02, 1.02040000e-01, -1.27920002e-01,
-8.44299972e-01, -1.21809997e-01, -1.68009996e-02,
-3.32789987e-01, -1.55200005e-01, -2.31309995e-01,
-1.91809997e-01, -1.88230002e+00, -7.67459989e-01,
9.90509987e-02, -4.21249986e-01, -1.95260003e-01,
4.00710011e+00, -1.85939997e-01, -5.22870004e-01,
-3.16810012e-01, 5.92130003e-04, 7.44489999e-03,
1.77780002e-01, -1.58969998e-01, 1.20409997e-02,
-5.42230010e-02, -2.98709989e-01, -1.57490000e-01,
-3.47579986e-01, -4.56370004e-02, -4.42510009e-01,
1.87849998e-01, 2.78489990e-03, -1.84110001e-01,
-1.15139998e-01, -7.85809994e-01])
self.assertEqual(self.glove * "the", 2)
self.assertTrue(np.allclose(thevector, truevector))
self.assertEqual(self.glove.lin.weight.size(), (4002, 50))
class TestAdaptedWordLinout(TestCase):
def setUp(self):
wdic = {"<MASK>": 0, "<RARE>": 1, "the": 10, "a": 5, "his": 50, "abracadabrqmsd--qsdfmqgf-": 6}
wdic2 = {"<MASK>": 0, "<RARE>": 1, "the": 2, "a": 3, "his": 4, "abracadabrqmsd--qsdfmqgf-": 5, "qsdfqsdf": 7}
self.adapted = q.WordLinout(10, worddic=wdic)
self.vanilla = q.WordLinout(10, worddic=wdic, weight=self.adapted.lin.weight.data.numpy())
self.adapted = self.adapted.adapt(wdic2)
def test_map(self):
self.assertEqual(self.adapted * "a", 3)
self.assertEqual(self.adapted * "the", 2)
self.assertEqual(self.adapted * "his", 4)
self.assertEqual(self.adapted * "her", 1)
self.assertEqual(self.vanilla * "a", 5)
self.assertEqual(self.vanilla * "the", 10)
self.assertEqual(self.vanilla * "her", 1)
self.assertEqual(self.adapted * "qsdfqlmkdsjfmqlsdkjgmqlsjdf", 1)
print(self.vanilla * "the", self.adapted * "the")
print(self.vanilla % "the", self.adapted % "the")
self.assertTrue(np.allclose(self.vanilla % "the", self.adapted % "the"))
self.assertTrue(np.allclose(self.vanilla % "his", self.adapted % "his"))
def test_adapted_block(self):
pred = self.adapted(Variable(torch.FloatTensor(np.stack([self.adapted % x for x in "the a his".split()], axis=0))))
l = pred.sum()
l.backward()
grad = self.adapted.inner.lin.weight.grad
self.assertTrue(grad.norm().data.numpy()[0] > 0)
def test_adapted_prediction_shape(self):
xval = np.stack([self.adapted % "the", self.adapted % "a"], axis=0)
x = Variable(torch.from_numpy(xval))
pred = self.adapted(x)
self.assertEqual(pred.size(), (2, 8))
class TestOverriddenWordLinout(TestCase):
def setUp(self):
wdic = {"<MASK>": 0, "<RARE>": 1, "the": 10, "a": 5, "his": 50, "monkey": 6}
wdic2 = {"<MASK>": 0, "<RARE>": 1, "the": 2, "a": 3, "his": 4, "abracadabrqmsd--qsdfmqgf-": 5, "qsdfqsdf": 7}
self.base = q.WordLinout(10, worddic=wdic)
self.over = q.WordLinout(10, worddic=wdic2)
self.overridden = self.base.override(self.over)
def test_shapes(self):
x = Variable(torch.FloatTensor(np.stack([self.base % x for x in "the a his".split()], axis=0)))
pred = self.overridden(x)
self.assertEqual(pred.size(), (3, 51))
basepred = self.base(x)
overpred = self.over(x)
l = pred.sum()
l.backward()
self.assertTrue(self.base.lin.weight.grad.norm().data[0] > 0)
self.assertTrue(self.over.lin.weight.grad.norm().data[0] > 1)
basepred = basepred.data.numpy()
overpred = overpred.data.numpy()
pred = pred.data.numpy()
self.assertTrue(np.allclose(pred[:, 10], overpred[:, 2]))
self.assertTrue(np.allclose(pred[:, 5], overpred[:, 3]))
self.assertTrue(np.allclose(pred[:, 6], basepred[:, 6]))
class TestComputedWordLinout(TestCase):
def setUp(self):
data = np.random.random((7, 10)).astype("float32")
computer = nn.Linear(10, 15)
worddic = "<MASK> <RARE> first second third fourth fifth"
worddic = dict(zip(worddic.split(), range(len(worddic.split()))))
self.linout = q.ComputedWordLinout(data=data, computer=computer, worddic=worddic)
def test_basic(self):
x = Variable(torch.randn(3, 15)).float()
out = self.linout(x)
self.assertEqual(out.size(), (3, 7))
data = self.linout.data
computer = self.linout.computer
cout = torch.matmul(x, computer(data).t())
self.assertTrue(np.allclose(cout.data.numpy(), out.data.numpy()))
def test_masked(self):
x = Variable(torch.randn(3, 15)).float()
msk_nonzero_batches = [0,0,0,1,1,2]
msk_nonzero_values = [0,2,3,2,6,5]
msk = np.zeros((3, 7)).astype("int32")
msk[msk_nonzero_batches, msk_nonzero_values] = 1
print(msk)
msk = Variable(torch.from_numpy(msk))
out = self.linout(x, mask=msk)
self.assertEqual(out.size(), (3, 7))
data = self.linout.data
computer = self.linout.computer
cout = torch.matmul(x, computer(data).t())
cout = cout * msk.float()
self.assertTrue(np.allclose(cout.data.numpy(), out.data.numpy()))
def test_all_masked(self):
x = Variable(torch.randn(3, 15)).float()
msk = np.zeros((3, 7)).astype("int32")
print(msk)
msk = Variable(torch.from_numpy(msk))
out = self.linout(x, mask=msk)
self.assertEqual(out.size(), (3, 7))
data = self.linout.data
computer = self.linout.computer
cout = torch.matmul(x, computer(data).t())
cout = cout * msk.float()
self.assertTrue(np.allclose(cout.data.numpy(), out.data.numpy()))
def test_masked_3D_data(self):
self.linout.data = q.val(np.random.random((7, 10, 3)).astype(dtype="float32")).v
self.linout.computer = q.GRULayer(3, 15).return_final("only")
x = Variable(torch.randn(3, 15)).float()
msk_nonzero_batches = [0, 0, 0, 1, 1, 2]
msk_nonzero_values = [0, 2, 3, 2, 6, 5]
msk = np.zeros((3, 7)).astype("int32")
msk[msk_nonzero_batches, msk_nonzero_values] = 1
print(msk)
msk = Variable(torch.from_numpy(msk))
out = self.linout(x, mask=msk)
self.assertEqual(out.size(), (3, 7))
data = self.linout.data
computer = self.linout.computer
cout = torch.matmul(x, computer(data).t())
cout = cout * msk.float()
self.assertTrue(np.allclose(cout.data.numpy(), out.data.numpy()))
def test_basic_grad(self):
x = Variable(torch.randn(3, 15)).float()
y = Variable(torch.randn(3, 15)).float()
out = self.linout(x)
loss = out.sum()
loss.backward()
agrads = []
for p in self.linout.parameters():
if p.requires_grad:
agrads.append(p.grad.data.numpy() + 0)
out = self.linout(y)
loss = out.sum()
loss.backward()
bgrads = []
for p in self.linout.parameters():
if p.requires_grad:
bgrads.append(p.grad.data.numpy() + 0)
pass
| mit |
lcunquei/AliPhysics | PWGJE/EMCALJetTasks/Tracks/analysis/base/Graphics.py | 41 | 22080 | #**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
"""
Graphics module, containing basic ROOT plot helper functionality and
base classes for specific kinds of plots
@author: Markus Fasel ,
@contact: <markus.fasel@cern.ch>
@organization: Lawrence Berkeley National Laboratory
@organization: ALICE Collaboration
@copyright: 1998-2014, ALICE Experiment at CERN, All rights reserved
"""
from ROOT import TCanvas,TH1F,TLegend,TPad,TPaveText,TF1, TGraph, TH1
from ROOT import kBlack
class Frame:
"""
Helper class handling frame drawing in plots
"""
def __init__(self, name, xmin, xmax, ymin, ymax):
"""
Construct frame with name and ranges for x and y coordinate
@param name: Name of the frame
@param xmin: Min. value of the x-coordinate
@param xmax: Max. value of the x-coordinate
@param ymin: Min. value of the y-coordinate
@param ymax: Max. value of the y-coordinate
"""
self.__framehist = TH1F(name, "", 100, xmin, xmax)
self.__framehist.SetStats(False)
self.__framehist.GetYaxis().SetRangeUser(ymin, ymax)
def SetXtitle(self, title):
"""
Set title of the x axis
@param title: Title of the x-axis
"""
self.__framehist.GetXaxis().SetTitle(title)
def SetYtitle(self, title):
"""
Set title of the y axis
@param title: Title of the y-axis
"""
self.__framehist.GetYaxis().SetTitle(title)
def Draw(self):
"""
Draw the frame.
"""
self.__framehist.Draw("axis")
class Style:
"""
Class for plot styles (currently only color and marker)
"""
def __init__(self, color, marker, options = None):
"""
Constructor
@param color: Color definition of the style
@param marker: Marker definition of the style
@param option: Optional other style definitions
"""
self.__color = color
self.__marker = marker
self.__linestyle = None
self.__linewidth = None
self.__fillstyle = None
self.__fillcolor = None
if options:
if "fillstyle" in options.keys():
self.__fillstyle = options["fillstyle"]
if "fillcolor" in options.keys():
self.__fillcolor = options["fillcolor"]
if "linestyle" in options.keys():
self.__linestyle = options["linestyle"]
if "linewidth" in options.keys():
self.__linewidth = options["linewidth"]
def SetColor(self, color):
"""
Change color of the graphics object
@param color: The color of the object
"""
self.__color = color
def SetMarker(self, marker):
"""
Change marker style of the graphics object
@param marker: The marker style
"""
self.__marker = marker
def SetLineStyle(self, linestyle):
"""
Change the line style
@param linestyle: New line style
"""
self.__linestyle = linestyle
def SetLineWidth(self, linewidth):
"""
Change the line width
@param linewidth: New line width
"""
self.__linewidth = linewidth
def SetFillStyle(self, fillstyle):
"""
Change the fill style
@param fillstyle: New fill style
"""
self.__fillstyle = fillstyle
def SetFillColor(self, fillcolor):
"""
Change the fill color
@param fillcolor: the new fill color
"""
self.__fillcolor = fillcolor
def GetColor(self):
"""
Access color of the graphics object
@return: Marker color
"""
return self.__color
def GetMarker(self):
"""
Access marker style
@return: Marker style
"""
return self.__marker
def GetLineStyle(self):
"""
Get the line style (if defined)
@return: The line style
"""
return self.__linestyle
def GetLineWidth(self):
"""
Get the line width
@return: The line width
"""
return self.__linewidth
def GetFillStyle(self):
"""
Get the fill style (if defined)
@return: The fill style
"""
return self.__fillstyle
def GetFillColor(self):
"""
Get the fill color (if defined)
@return: The fill color
"""
return self.__fillcolor
def DefineROOTPlotObject(self, rootobject):
"""
Sets the style to the root object
@param rootobject: The ROOT graphics object to be defined
"""
#print "Defining root object"
rootobject.SetMarkerColor(self.__color)
if self.__linestyle is not None:
rootobject.SetLineStyle(self.__linestyle)
if self.__linewidth is not None:
rootobject.SetLineWidth(self.__linewidth)
if not type(rootobject) is TF1:
rootobject.SetMarkerStyle(self.__marker)
rootobject.SetLineColor(self.__color)
if self.__fillstyle is not None:
rootobject.SetFillStyle(self.__fillstyle)
if self.__fillcolor is not None:
rootobject.SetFillColor(self.__fillcolor)
class GraphicsObject:
"""
Container for styled objects, inheriting from TGraph, TH1 or TF1
"""
def __init__(self, data, style = None, drawoption = "epsame"):
"""
Initialise new graphics object with underlying data (can be TH1 or TGraph(Errors)),
and optionally a plot style. If no plot style is provided, then the default style (black,
filled circles) is chosen.
@param data: Underlying data as root object
@param style: Plot style applied
@param drawoption: Draw option
"""
self.__data = data
mystyle = Style(kBlack, 20)
if style:
mystyle = style
self.SetStyle(mystyle)
self.__drawoption = "epsame"
if drawoption:
self.__drawoption = drawoption
if not "same" in self.__drawoption:
self.__drawoption += "same"
if type(self.__data) is TF1:
self.__drawoption = "lsame"
def SetStyle(self, style):
"""
Initialise underlying object with style
@param style: The plot style used
"""
style.DefineROOTPlotObject(self.__data)
def GetData(self):
"""
Provide access to underlying data
@return: The underlying root object
"""
return self.__data
def Draw(self):
"""
Draw graphics object. By default, the plot option is
"epsame". Option strings will always have the option same
"""
#print "Drawing option %s" %(self.__drawoption)
self.__data.Draw(self.__drawoption)
def AddToLegend(self, legend, title):
"""
Add graphics object to a legend provided from outside
@param legend: The legend the object is added to
@param title: Legend entry title
"""
option = "lep"
if type(self.__data) is TF1:
option = "l"
elif self.__IsBoxStyle(self.__data):
option = "f"
legend.AddEntry(self.__data, title, option)
def __IsBoxStyle(self, plotobject):
"""
Check whether plot object is drawn in a box style
@param plotobject: The object to check
@return: True if in box style, False otherwise
"""
if type(self.__data) is TF1:
return False
elif issubclass(type(self.__data), TGraph):
for i in range(2, 6):
if "%d" %(i) in self.__drawoption.lower():
return True
return False
elif issubclass(type(self.__data), TH1):
return True if "b" in self.__drawoption.lower() else False
class PlotBase:
"""
base class for plot objects
"""
class _FramedPad:
"""
Defining the pad structure inside the canvas. A pad has a frame with
axes definition, and optionally a legend and one or several label(s)
"""
class GraphicsEntry:
"""
Definition of a graphics entry
"""
def __init__(self, graphobject, title = None, addToLegend = False):
self.__object = graphobject
self.__title = title
self.__addToLegend = addToLegend
def __cmp__(self, other):
"""
Comparison is done accoring to the object title
@param other: object to compare with
@return: 0 if objects are equal, 1 if this object is larger, -1 if object is smaller
"""
# 1st case: either or both of the titles missing
if not self.__title and not other.GetTitle():
return None
if not self.__title and other.GetTitle():
return -1
if self.__title and not other.GetTitle():
return 1
# second case: both of the titles available
if self.__title == other.GetTitle():
return 0
if self.__title < other.GetTitle():
return -1
return 1
def GetObject(self):
"""
Accessor to graphics object
@return: Underlying object
"""
return self.__object
def GetTitle(self):
"""
Get the title of the object
@return: Title of the object
"""
return self.__title
def IsAddToLegend(self):
"""
Check whether graphics is foreseen to be added to legend
@return: True if the object is added to the legend
"""
return self.__addToLegend
def SetTitle(self, title):
"""
Change title of the graphics object
@param title: Title of the object
"""
self.__title = title
def SetAddToLegend(self, doAdd):
"""
Define whether object should be added to a legend
@param doAdd: Switch for adding object to a legend
"""
self.__addToLegend = doAdd
def __init__(self, pad):
"""
Constructor, creating a framed pad structure for a TPad
@param pad: Underlying ROOT pad
"""
self.__pad = pad
self.__Frame = None
self.__legend = None
self.__graphicsObjects = []
self.__labels = []
def DrawFrame(self, frame):
"""
Draw a frame, defined from outside, within the pad
The pad becomes owner of the frame
@param frame: Frame of the pad
"""
self.__frame = frame
self.__frame.Draw()
def DrawGraphicsObject(self, graphics, addToLegend = False, title = None):
"""
Draw a graphics object into the pad. If addToLegend is set, then the object is added to to the
legend.
"""
self.__graphicsObjects.append(self.GraphicsEntry(graphics, title, addToLegend))
graphics.Draw()
def DefineLegend(self, xmin, ymin, xmax, ymax):
"""
create a new legend within the frame with the
given boundary coordinates
@param xmin: Min. x value of the legend
@param xmin: Max. x value of the legend
@param xmin: Min. y value of the legend
@param xmin: Max. y value of the legend
"""
if not self.__legend:
self.__legend = TLegend(xmin, ymin, xmax, ymax)
self.__legend.SetBorderSize(0)
self.__legend.SetFillStyle(0)
self.__legend.SetTextFont(42)
def CreateLegend(self, xmin, ymin, xmax, ymax):
"""
Create Legend from all graphics entries
@param xmin: Min. x value of the legend
@param xmin: Max. x value of the legend
@param xmin: Min. y value of the legend
@param xmin: Max. y value of the legend
"""
if not self.__legend:
self.DefineLegend(xmin, ymin, xmax, ymax)
for entry in sorted(self.__graphicsObjects):
if entry.IsAddToLegend():
self.AddToLegend(entry.GetObject(), entry.GetTitle())
self.DrawLegend()
def GetLegend(self):
"""
Provide access to legend
@return: the legend
"""
return self.__legend
def AddToLegend(self, graphicsObject, title):
"""
Special method adding graphics objects to a legend
@param graphicsObject: graphics object to be added to the legend
@param title: Legend entry title
"""
if self.__legend:
graphicsObject.AddToLegend(self.__legend, title)
def DrawLegend(self):
"""
Draw the legend
"""
if self.__legend:
self.__legend.Draw()
def DrawLabel(self, xmin, ymin, xmax, ymax, text):
"""
Add a new label to the pad and draw it
@param xmin: Min. x value of the label
@param xmin: Max. x value of the label
@param xmin: Min. y value of the label
@param xmin: Max. y value of the label
@param text: Label text
"""
label = TPaveText(xmin, ymin, xmax, ymax, "NDC")
label.SetBorderSize(0)
label.SetFillStyle(0)
label.SetTextFont(42)
label.AddText(text)
label.Draw()
self.__labels.append(label)
def GetPad(self):
"""
Provide direct access to the pad
@return: Underlying ROOT pad
"""
return self.__pad
class _FrameContainer:
"""
Container for framed pad objects
"""
def __init__(self):
"""
Create new empty frame container
"""
self.__Frames = {}
def AddFrame(self, frameID, frame):
"""
Add a new framed pad to the frame container
@param frameID: ID of the frame
@param frame: Frame to be added for pad with ID
"""
self.__Frames[frameID] = frame
def GetFrame(self, frameID):
"""
Provide access to frame
@param frameID: ID of the frame
@return: The frame for the pad
"""
if not self.__Frames.has_key(frameID):
return None
return self.__Frames[frameID]
def __init__(self):
"""
Initialise new plot
"""
self._canvas = None
self._frames = self._FrameContainer()
def _OpenCanvas(self, canvasname, canvastitle, xsize = 1000, ysize = 800):
"""
Initialise canvas with name, title and sizes
@param canvasname: Name of the canvas
@param canvastitle: Title of the canvas
@param xsize: Canvas size in x-direction
@param ysize: Canvas size in y-direction
"""
self._canvas = TCanvas(canvasname, canvastitle, xsize, ysize)
self._canvas.cd()
def SaveAs(self, filenamebase):
"""
Save plot to files:
Creating a file with a common name in the formats
eps, pdf, jpeg, gif and pdf
@param filenamebase: Basic part of the filename (without endings)
"""
for t in ["eps", "pdf", "jpeg", "gif", "png"]:
self._canvas.SaveAs("%s.%s" %(filenamebase, t))
class SinglePanelPlot(PlotBase):
def __init__(self):
"""
Initialise single panel plot
"""
PlotBase.__init__(self)
def _OpenCanvas(self, canvasname, canvastitle):
"""
Create canvas and add it to the list of framed pads
@param canvasname: Name of the canvas
@param canvastitle: Title of the canvas
"""
PlotBase._OpenCanvas(self, canvasname, canvastitle, 1000, 800)
self._frames.AddFrame(0, self._FramedPad(self._canvas))
def _GetFramedPad(self):
"""
Access to framed pad
@return: The underlying framed pad
"""
return self._frames.GetFrame(0)
class MultipanelPlot(PlotBase):
"""
Base Class For multiple panel plots
"""
def __init__(self, nrow, ncol):
"""
Create new Multi-panel plot with a given number of row and cols
"""
PlotBase.__init__(self)
self.__nrow = nrow
self.__ncol = ncol
def _OpenCanvas(self, canvasname, canvastitle, xsize, ysize):
"""
Create new canvas and split it into the amount of pads as defined
@param canvasname: Name of the canvas
@param canvastitle: Title of the canvas
@param xsize: Canvas size in x-direction
@param ysize: Canvas size in y-direction
"""
PlotBase._OpenCanvas(self, canvasname, canvastitle, xsize, ysize)
self._canvas.Divide(self.__ncol, self.__nrow)
def _OpenPad(self, padID):
"""
Create new framed pad in a multi-panel plot for a given pad ID
@param padID: ID number of the pad
@return: The framed pad
"""
if padID < 0 or padID > self.__GetMaxPadID():
return None
mypad = self._GetPad(padID)
if not mypad:
mypad = self._FramedPad(self._canvas.cd(padID+1))
self._frames.AddFrame(padID, mypad)
return mypad
def _OpenPadByRowCol(self, row, col):
"""
Create new framed pad in a multi-panel plot for a given row an col
@param row: row of the pad
@param col: column of the pad
@return: The new pad at this position
"""
return self._OpenPad(self.__GetPadID(row, col))
def _GetPad(self, padID):
"""
Access to Pads by pad ID
@param padID: ID number of the pad
@return: The framed pad
"""
return self._frames.GetFrame(padID)
def _GetPadByRowCol(self, row, col):
"""
Access Pad by row and col
@param row: row of the pad
@param col: column of the pad
@return: The pad at this position
"""
return self._frames.GetFrame(self.__GetPadID(row, col))
def __GetPadID(self, row, col):
"""
Calculate ID of the pad
@param row: row of the pad
@param col: column of the pad
@return: The pad ID for this combination
"""
if (row < 0 or row >= self.__nrow) or (col < 0 or col >= self.__ncol):
return -1
return 1 + row * self.__ncol + col
def __GetMaxPadID(self):
"""
Calculate the maximum allowed pad ID
@return: The maximum pad ID
"""
return 1 + self.__ncol * self.__nrow
class TwoPanelPlot(MultipanelPlot):
"""
A plot with two panels
"""
def __init__(self):
"""
Initialise two-panel plot
"""
MultipanelPlot.__init__(self, 1, 2)
def _CreateCanvas(self, canvasname, canvastitle):
"""
Create Canvas with the dimensions of a four-panel plot
@param canvasname: Name of the canvas
@param canvastitle: Title of the canvas
"""
MultipanelPlot._OpenCanvas(self, canvasname, canvastitle, 1000, 500)
class FourPanelPlot(MultipanelPlot):
"""
A plot with four (2x2) panels
"""
def __init__(self):
"""
Initialise four-panel plot
"""
MultipanelPlot.__init__(self, 2, 2)
def _OpenCanvas(self, canvasname, canvastitle):
"""
Create Canvas with the dimensions of a four-panel plot
@param canvasname: Name of the canvas
@param canvastitle: Title of the canvas
"""
MultipanelPlot._OpenCanvas(self, canvasname, canvastitle, 1000, 1000) | bsd-3-clause |
dirtycold/git-cola | cola/widgets/dag.py | 2 | 52694 | from __future__ import division, absolute_import, unicode_literals
import collections
import math
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
from qtpy.QtCore import Qt
from qtpy.QtCore import Signal
from qtpy.QtCore import QPointF
from qtpy.QtCore import QRectF
from .. import core
from .. import cmds
from .. import difftool
from .. import hotkeys
from .. import icons
from .. import observable
from .. import qtcompat
from .. import qtutils
from ..compat import maxsize
from ..i18n import N_
from ..models import dag
from . import archive
from . import browse
from . import completion
from . import createbranch
from . import createtag
from . import defs
from . import diff
from . import filelist
from . import standard
def git_dag(model, args=None, settings=None):
"""Return a pre-populated git DAG widget."""
branch = model.currentbranch
# disambiguate between branch names and filenames by using '--'
branch_doubledash = branch and (branch + ' --') or ''
ctx = dag.DAG(branch_doubledash, 1000)
ctx.set_arguments(args)
view = GitDAG(model, ctx, settings=settings)
if ctx.ref:
view.display()
return view
class FocusRedirectProxy(object):
"""Redirect actions from the main widget to child widgets"""
def __init__(self, *widgets):
"""Provide proxied widgets; the default widget must be first"""
self.widgets = widgets
self.default = widgets[0]
def __getattr__(self, name):
return (lambda *args, **kwargs:
self._forward_action(name, *args, **kwargs))
def _forward_action(self, name, *args, **kwargs):
"""Forward the captured action to the focused or default widget"""
widget = QtWidgets.QApplication.focusWidget()
if widget in self.widgets and hasattr(widget, name):
fn = getattr(widget, name)
else:
fn = getattr(self.default, name)
return fn(*args, **kwargs)
class ViewerMixin(object):
"""Implementations must provide selected_items()"""
def __init__(self):
self.selected = None
self.clicked = None
self.menu_actions = None # provided by implementation
def selected_item(self):
"""Return the currently selected item"""
selected_items = self.selected_items()
if not selected_items:
return None
return selected_items[0]
def selected_sha1(self):
item = self.selected_item()
if item is None:
result = None
else:
result = item.commit.sha1
return result
def selected_sha1s(self):
return [i.commit for i in self.selected_items()]
def with_oid(self, fn):
oid = self.selected_sha1()
if oid:
result = fn(oid)
else:
result = None
return result
def diff_selected_this(self):
clicked_sha1 = self.clicked.sha1
selected_sha1 = self.selected.sha1
self.diff_commits.emit(selected_sha1, clicked_sha1)
def diff_this_selected(self):
clicked_sha1 = self.clicked.sha1
selected_sha1 = self.selected.sha1
self.diff_commits.emit(clicked_sha1, selected_sha1)
def cherry_pick(self):
self.with_oid(lambda oid: cmds.do(cmds.CherryPick, [oid]))
def copy_to_clipboard(self):
self.with_oid(lambda oid: qtutils.set_clipboard(oid))
def create_branch(self):
self.with_oid(lambda oid: createbranch.create_new_branch(revision=oid))
def create_tag(self):
self.with_oid(lambda oid: createtag.create_tag(ref=oid))
def create_tarball(self):
self.with_oid(lambda oid: archive.show_save_dialog(oid, parent=self))
def show_diff(self):
self.with_oid(lambda oid:
difftool.diff_expression(self, oid + '^!',
hide_expr=False, focus_tree=True))
def show_dir_diff(self):
self.with_oid(lambda oid:
difftool.launch(left=oid, left_take_magic=True, dir_diff=True))
def reset_branch_head(self):
self.with_oid(lambda oid: cmds.do(cmds.ResetBranchHead, ref=oid))
def reset_worktree(self):
self.with_oid(lambda oid: cmds.do(cmds.ResetWorktree, ref=oid))
def save_blob_dialog(self):
self.with_oid(lambda oid: browse.BrowseDialog.browse(oid))
def update_menu_actions(self, event):
selected_items = self.selected_items()
item = self.itemAt(event.pos())
if item is None:
self.clicked = commit = None
else:
self.clicked = commit = item.commit
has_single_selection = len(selected_items) == 1
has_selection = bool(selected_items)
can_diff = bool(commit and has_single_selection and
commit is not selected_items[0].commit)
if can_diff:
self.selected = selected_items[0].commit
else:
self.selected = None
self.menu_actions['diff_this_selected'].setEnabled(can_diff)
self.menu_actions['diff_selected_this'].setEnabled(can_diff)
self.menu_actions['diff_commit'].setEnabled(has_single_selection)
self.menu_actions['diff_commit_all'].setEnabled(has_single_selection)
self.menu_actions['cherry_pick'].setEnabled(has_single_selection)
self.menu_actions['copy'].setEnabled(has_single_selection)
self.menu_actions['create_branch'].setEnabled(has_single_selection)
self.menu_actions['create_patch'].setEnabled(has_selection)
self.menu_actions['create_tag'].setEnabled(has_single_selection)
self.menu_actions['create_tarball'].setEnabled(has_single_selection)
self.menu_actions['reset_branch_head'].setEnabled(has_single_selection)
self.menu_actions['reset_worktree'].setEnabled(has_single_selection)
self.menu_actions['save_blob'].setEnabled(has_single_selection)
def context_menu_event(self, event):
self.update_menu_actions(event)
menu = qtutils.create_menu(N_('Actions'), self)
menu.addAction(self.menu_actions['diff_this_selected'])
menu.addAction(self.menu_actions['diff_selected_this'])
menu.addAction(self.menu_actions['diff_commit'])
menu.addAction(self.menu_actions['diff_commit_all'])
menu.addSeparator()
menu.addAction(self.menu_actions['create_branch'])
menu.addAction(self.menu_actions['create_tag'])
menu.addSeparator()
menu.addAction(self.menu_actions['cherry_pick'])
menu.addAction(self.menu_actions['create_patch'])
menu.addAction(self.menu_actions['create_tarball'])
menu.addSeparator()
reset_menu = menu.addMenu(N_('Reset'))
reset_menu.addAction(self.menu_actions['reset_branch_head'])
reset_menu.addAction(self.menu_actions['reset_worktree'])
menu.addSeparator()
menu.addAction(self.menu_actions['save_blob'])
menu.addAction(self.menu_actions['copy'])
menu.exec_(self.mapToGlobal(event.pos()))
def viewer_actions(widget):
return {
'diff_this_selected':
qtutils.add_action(widget, N_('Diff this -> selected'),
widget.proxy.diff_this_selected),
'diff_selected_this':
qtutils.add_action(widget, N_('Diff selected -> this'),
widget.proxy.diff_selected_this),
'create_branch':
qtutils.add_action(widget, N_('Create Branch'),
widget.proxy.create_branch),
'create_patch':
qtutils.add_action(widget, N_('Create Patch'),
widget.proxy.create_patch),
'create_tag':
qtutils.add_action(widget, N_('Create Tag'),
widget.proxy.create_tag),
'create_tarball':
qtutils.add_action(widget, N_('Save As Tarball/Zip...'),
widget.proxy.create_tarball),
'cherry_pick':
qtutils.add_action(widget, N_('Cherry Pick'),
widget.proxy.cherry_pick),
'diff_commit':
qtutils.add_action(widget, N_('Launch Diff Tool'),
widget.proxy.show_diff, hotkeys.DIFF),
'diff_commit_all':
qtutils.add_action(widget, N_('Launch Directory Diff Tool'),
widget.proxy.show_dir_diff, hotkeys.DIFF_SECONDARY),
'reset_branch_head':
qtutils.add_action(widget, N_('Reset Branch Head'),
widget.proxy.reset_branch_head),
'reset_worktree':
qtutils.add_action(widget, N_('Reset Worktree'),
widget.proxy.reset_worktree),
'save_blob':
qtutils.add_action(widget, N_('Grab File...'),
widget.proxy.save_blob_dialog),
'copy':
qtutils.add_action(widget, N_('Copy SHA-1'),
widget.proxy.copy_to_clipboard,
QtGui.QKeySequence.Copy),
}
class CommitTreeWidgetItem(QtWidgets.QTreeWidgetItem):
def __init__(self, commit, parent=None):
QtWidgets.QTreeWidgetItem.__init__(self, parent)
self.commit = commit
self.setText(0, commit.summary)
self.setText(1, commit.author)
self.setText(2, commit.authdate)
class CommitTreeWidget(standard.TreeWidget, ViewerMixin):
diff_commits = Signal(object, object)
def __init__(self, notifier, parent):
standard.TreeWidget.__init__(self, parent=parent)
ViewerMixin.__init__(self)
self.setSelectionMode(self.ExtendedSelection)
self.setHeaderLabels([N_('Summary'), N_('Author'), N_('Date, Time')])
self.sha1map = {}
self.menu_actions = None
self.notifier = notifier
self.selecting = False
self.commits = []
self.action_up = qtutils.add_action(self, N_('Go Up'),
self.go_up, hotkeys.MOVE_UP)
self.action_down = qtutils.add_action(self, N_('Go Down'),
self.go_down, hotkeys.MOVE_DOWN)
notifier.add_observer(diff.COMMITS_SELECTED, self.commits_selected)
self.itemSelectionChanged.connect(self.selection_changed)
# ViewerMixin
def go_up(self):
self.goto(self.itemAbove)
def go_down(self):
self.goto(self.itemBelow)
def goto(self, finder):
items = self.selected_items()
item = items and items[0] or None
if item is None:
return
found = finder(item)
if found:
self.select([found.commit.sha1])
def selected_commit_range(self):
selected_items = self.selected_items()
if not selected_items:
return None, None
return selected_items[-1].commit.sha1, selected_items[0].commit.sha1
def set_selecting(self, selecting):
self.selecting = selecting
def selection_changed(self):
items = self.selected_items()
if not items:
return
self.set_selecting(True)
self.notifier.notify_observers(diff.COMMITS_SELECTED,
[i.commit for i in items])
self.set_selecting(False)
def commits_selected(self, commits):
if self.selecting:
return
with qtutils.BlockSignals(self):
self.select([commit.sha1 for commit in commits])
def select(self, sha1s):
if not sha1s:
return
self.clearSelection()
for idx, sha1 in enumerate(sha1s):
try:
item = self.sha1map[sha1]
except KeyError:
continue
self.scrollToItem(item)
item.setSelected(True)
def adjust_columns(self):
width = self.width()-20
zero = width*2//3
onetwo = width//6
self.setColumnWidth(0, zero)
self.setColumnWidth(1, onetwo)
self.setColumnWidth(2, onetwo)
def clear(self):
QtWidgets.QTreeWidget.clear(self)
self.sha1map.clear()
self.commits = []
def add_commits(self, commits):
self.commits.extend(commits)
items = []
for c in reversed(commits):
item = CommitTreeWidgetItem(c)
items.append(item)
self.sha1map[c.sha1] = item
for tag in c.tags:
self.sha1map[tag] = item
self.insertTopLevelItems(0, items)
def create_patch(self):
items = self.selectedItems()
if not items:
return
sha1s = [item.commit.sha1 for item in reversed(items)]
all_sha1s = [c.sha1 for c in self.commits]
cmds.do(cmds.FormatPatch, sha1s, all_sha1s)
# Qt overrides
def contextMenuEvent(self, event):
self.context_menu_event(event)
def mousePressEvent(self, event):
if event.button() == Qt.RightButton:
event.accept()
return
QtWidgets.QTreeWidget.mousePressEvent(self, event)
class GitDAG(standard.MainWindow):
"""The git-dag widget."""
updated = Signal()
def __init__(self, model, ctx, parent=None, settings=None):
super(GitDAG, self).__init__(parent)
self.setAttribute(Qt.WA_MacMetalStyle)
self.setMinimumSize(420, 420)
# change when widgets are added/removed
self.widget_version = 2
self.model = model
self.ctx = ctx
self.settings = settings
self.commits = {}
self.commit_list = []
self.selection = []
self.thread = ReaderThread(ctx, self)
self.revtext = completion.GitLogLineEdit()
self.maxresults = standard.SpinBox()
self.zoom_out = qtutils.create_action_button(
tooltip=N_('Zoom Out'), icon=icons.zoom_out())
self.zoom_in = qtutils.create_action_button(
tooltip=N_('Zoom In'), icon=icons.zoom_in())
self.zoom_to_fit = qtutils.create_action_button(
tooltip=N_('Zoom to Fit'), icon=icons.zoom_fit_best())
self.notifier = notifier = observable.Observable()
self.notifier.refs_updated = refs_updated = 'refs_updated'
self.notifier.add_observer(refs_updated, self.display)
self.notifier.add_observer(filelist.HISTORIES_SELECTED,
self.histories_selected)
self.notifier.add_observer(filelist.DIFFTOOL_SELECTED,
self.difftool_selected)
self.notifier.add_observer(diff.COMMITS_SELECTED, self.commits_selected)
self.treewidget = CommitTreeWidget(notifier, self)
self.diffwidget = diff.DiffWidget(notifier, self)
self.filewidget = filelist.FileWidget(notifier, self)
self.graphview = GraphView(notifier, self)
self.proxy = FocusRedirectProxy(self.treewidget,
self.graphview,
self.filewidget)
self.viewer_actions = actions = viewer_actions(self)
self.treewidget.menu_actions = actions
self.graphview.menu_actions = actions
self.controls_layout = qtutils.hbox(defs.no_margin, defs.spacing,
self.revtext, self.maxresults)
self.controls_widget = QtWidgets.QWidget()
self.controls_widget.setLayout(self.controls_layout)
self.log_dock = qtutils.create_dock(N_('Log'), self, stretch=False)
self.log_dock.setWidget(self.treewidget)
log_dock_titlebar = self.log_dock.titleBarWidget()
log_dock_titlebar.add_corner_widget(self.controls_widget)
self.file_dock = qtutils.create_dock(N_('Files'), self)
self.file_dock.setWidget(self.filewidget)
self.diff_dock = qtutils.create_dock(N_('Diff'), self)
self.diff_dock.setWidget(self.diffwidget)
self.graph_controls_layout = qtutils.hbox(
defs.no_margin, defs.button_spacing,
self.zoom_out, self.zoom_in, self.zoom_to_fit,
defs.spacing)
self.graph_controls_widget = QtWidgets.QWidget()
self.graph_controls_widget.setLayout(self.graph_controls_layout)
self.graphview_dock = qtutils.create_dock(N_('Graph'), self)
self.graphview_dock.setWidget(self.graphview)
graph_titlebar = self.graphview_dock.titleBarWidget()
graph_titlebar.add_corner_widget(self.graph_controls_widget)
self.lock_layout_action = qtutils.add_action_bool(
self, N_('Lock Layout'), self.set_lock_layout, False)
self.refresh_action = qtutils.add_action(
self, N_('Refresh'), self.refresh, hotkeys.REFRESH)
# Create the application menu
self.menubar = QtWidgets.QMenuBar(self)
# View Menu
self.view_menu = qtutils.create_menu(N_('View'), self.menubar)
self.view_menu.addAction(self.refresh_action)
self.view_menu.addAction(self.log_dock.toggleViewAction())
self.view_menu.addAction(self.graphview_dock.toggleViewAction())
self.view_menu.addAction(self.diff_dock.toggleViewAction())
self.view_menu.addAction(self.file_dock.toggleViewAction())
self.view_menu.addSeparator()
self.view_menu.addAction(self.lock_layout_action)
self.menubar.addAction(self.view_menu.menuAction())
self.setMenuBar(self.menubar)
left = Qt.LeftDockWidgetArea
right = Qt.RightDockWidgetArea
self.addDockWidget(left, self.log_dock)
self.addDockWidget(left, self.diff_dock)
self.addDockWidget(right, self.graphview_dock)
self.addDockWidget(right, self.file_dock)
# Update fields affected by model
self.revtext.setText(ctx.ref)
self.maxresults.setValue(ctx.count)
self.update_window_title()
# Also re-loads dag.* from the saved state
self.init_state(settings, self.resize_to_desktop)
qtutils.connect_button(self.zoom_out, self.graphview.zoom_out)
qtutils.connect_button(self.zoom_in, self.graphview.zoom_in)
qtutils.connect_button(self.zoom_to_fit,
self.graphview.zoom_to_fit)
thread = self.thread
thread.begin.connect(self.thread_begin, type=Qt.QueuedConnection)
thread.status.connect(self.thread_status, type=Qt.QueuedConnection)
thread.add.connect(self.add_commits, type=Qt.QueuedConnection)
thread.end.connect(self.thread_end, type=Qt.QueuedConnection)
self.treewidget.diff_commits.connect(self.diff_commits)
self.graphview.diff_commits.connect(self.diff_commits)
self.maxresults.editingFinished.connect(self.display)
self.revtext.textChanged.connect(self.text_changed)
self.revtext.activated.connect(self.display)
self.revtext.enter.connect(self.display)
self.revtext.down.connect(self.focus_tree)
# The model is updated in another thread so use
# signals/slots to bring control back to the main GUI thread
self.model.add_observer(self.model.message_updated, self.updated.emit)
self.updated.connect(self.model_updated, type=Qt.QueuedConnection)
qtutils.add_action(self, 'Focus Input', self.focus_input, hotkeys.FOCUS)
qtutils.add_close_action(self)
def focus_input(self):
self.revtext.setFocus()
def focus_tree(self):
self.treewidget.setFocus()
def text_changed(self, txt):
self.ctx.ref = txt
self.update_window_title()
def update_window_title(self):
project = self.model.project
if self.ctx.ref:
self.setWindowTitle(N_('%(project)s: %(ref)s - DAG')
% dict(project=project, ref=self.ctx.ref))
else:
self.setWindowTitle(project + N_(' - DAG'))
def export_state(self):
state = standard.MainWindow.export_state(self)
state['count'] = self.ctx.count
return state
def apply_state(self, state):
result = standard.MainWindow.apply_state(self, state)
try:
count = state['count']
if self.ctx.overridden('count'):
count = self.ctx.count
except:
count = self.ctx.count
result = False
self.ctx.set_count(count)
self.lock_layout_action.setChecked(state.get('lock_layout', False))
return result
def model_updated(self):
self.display()
def refresh(self):
cmds.do(cmds.Refresh)
def display(self):
new_ref = self.revtext.value()
new_count = self.maxresults.value()
self.thread.stop()
self.ctx.set_ref(new_ref)
self.ctx.set_count(new_count)
self.thread.start()
def show(self):
standard.MainWindow.show(self)
self.treewidget.adjust_columns()
def commits_selected(self, commits):
if commits:
self.selection = commits
def clear(self):
self.commits.clear()
self.commit_list = []
self.graphview.clear()
self.treewidget.clear()
def add_commits(self, commits):
self.commit_list.extend(commits)
# Keep track of commits
for commit_obj in commits:
self.commits[commit_obj.sha1] = commit_obj
for tag in commit_obj.tags:
self.commits[tag] = commit_obj
self.graphview.add_commits(commits)
self.treewidget.add_commits(commits)
def thread_begin(self):
self.clear()
def thread_end(self):
self.focus_tree()
self.restore_selection()
def thread_status(self, successful):
self.revtext.hint.set_error(not successful)
def restore_selection(self):
selection = self.selection
try:
commit_obj = self.commit_list[-1]
except IndexError:
# No commits, exist, early-out
return
new_commits = [self.commits.get(s.sha1, None) for s in selection]
new_commits = [c for c in new_commits if c is not None]
if new_commits:
# The old selection exists in the new state
self.notifier.notify_observers(diff.COMMITS_SELECTED, new_commits)
else:
# The old selection is now empty. Select the top-most commit
self.notifier.notify_observers(diff.COMMITS_SELECTED, [commit_obj])
self.graphview.update_scene_rect()
self.graphview.set_initial_view()
def diff_commits(self, a, b):
paths = self.ctx.paths()
if paths:
difftool.launch(left=a, right=b, paths=paths)
else:
difftool.diff_commits(self, a, b)
# Qt overrides
def closeEvent(self, event):
self.revtext.close_popup()
self.thread.stop()
standard.MainWindow.closeEvent(self, event)
def resizeEvent(self, e):
standard.MainWindow.resizeEvent(self, e)
self.treewidget.adjust_columns()
def histories_selected(self, histories):
argv = [self.model.currentbranch, '--']
argv.extend(histories)
text = core.list2cmdline(argv)
self.revtext.setText(text)
self.display()
def difftool_selected(self, files):
bottom, top = self.treewidget.selected_commit_range()
if not top:
return
difftool.launch(left=bottom, left_take_parent=True,
right=top, paths=files)
class ReaderThread(QtCore.QThread):
begin = Signal()
add = Signal(object)
end = Signal()
status = Signal(object)
def __init__(self, ctx, parent):
QtCore.QThread.__init__(self, parent)
self.ctx = ctx
self._abort = False
self._stop = False
self._mutex = QtCore.QMutex()
self._condition = QtCore.QWaitCondition()
def run(self):
repo = dag.RepoReader(self.ctx)
repo.reset()
self.begin.emit()
commits = []
for c in repo:
self._mutex.lock()
if self._stop:
self._condition.wait(self._mutex)
self._mutex.unlock()
if self._abort:
repo.reset()
return
commits.append(c)
if len(commits) >= 512:
self.add.emit(commits)
commits = []
self.status.emit(repo.returncode == 0)
if commits:
self.add.emit(commits)
self.end.emit()
def start(self):
self._abort = False
self._stop = False
QtCore.QThread.start(self)
def pause(self):
self._mutex.lock()
self._stop = True
self._mutex.unlock()
def resume(self):
self._mutex.lock()
self._stop = False
self._mutex.unlock()
self._condition.wakeOne()
def stop(self):
self._abort = True
self.wait()
class Cache(object):
pass
class Edge(QtWidgets.QGraphicsItem):
item_type = QtWidgets.QGraphicsItem.UserType + 1
def __init__(self, source, dest):
QtWidgets.QGraphicsItem.__init__(self)
self.setAcceptedMouseButtons(Qt.NoButton)
self.source = source
self.dest = dest
self.commit = source.commit
self.setZValue(-2)
dest_pt = Commit.item_bbox.center()
self.source_pt = self.mapFromItem(self.source, dest_pt)
self.dest_pt = self.mapFromItem(self.dest, dest_pt)
self.line = QtCore.QLineF(self.source_pt, self.dest_pt)
width = self.dest_pt.x() - self.source_pt.x()
height = self.dest_pt.y() - self.source_pt.y()
rect = QtCore.QRectF(self.source_pt, QtCore.QSizeF(width, height))
self.bound = rect.normalized()
# Choose a new color for new branch edges
if self.source.x() < self.dest.x():
color = EdgeColor.cycle()
line = Qt.SolidLine
elif self.source.x() != self.dest.x():
color = EdgeColor.current()
line = Qt.SolidLine
else:
color = EdgeColor.current()
line = Qt.SolidLine
self.pen = QtGui.QPen(color, 4.0, line, Qt.SquareCap, Qt.RoundJoin)
# Qt overrides
def type(self):
return self.item_type
def boundingRect(self):
return self.bound
def paint(self, painter, option, widget):
arc_rect = 10
connector_length = 5
painter.setPen(self.pen)
path = QtGui.QPainterPath()
if self.source.x() == self.dest.x():
path.moveTo(self.source.x(), self.source.y())
path.lineTo(self.dest.x(), self.dest.y())
painter.drawPath(path)
else:
# Define points starting from source
point1 = QPointF(self.source.x(), self.source.y())
point2 = QPointF(point1.x(), point1.y() - connector_length)
point3 = QPointF(point2.x() + arc_rect, point2.y() - arc_rect)
# Define points starting from dest
point4 = QPointF(self.dest.x(), self.dest.y())
point5 = QPointF(point4.x(), point3.y() - arc_rect)
point6 = QPointF(point5.x() - arc_rect, point5.y() + arc_rect)
start_angle_arc1 = 180
span_angle_arc1 = 90
start_angle_arc2 = 90
span_angle_arc2 = -90
# If the dest is at the left of the source, then we
# need to reverse some values
if self.source.x() > self.dest.x():
point5 = QPointF(point4.x(), point4.y() + connector_length)
point6 = QPointF(point5.x() + arc_rect, point5.y() + arc_rect)
point3 = QPointF(self.source.x() - arc_rect, point6.y())
point2 = QPointF(self.source.x(), point3.y() + arc_rect)
span_angle_arc1 = 90
path.moveTo(point1)
path.lineTo(point2)
path.arcTo(QRectF(point2, point3),
start_angle_arc1, span_angle_arc1)
path.lineTo(point6)
path.arcTo(QRectF(point6, point5),
start_angle_arc2, span_angle_arc2)
path.lineTo(point4)
painter.drawPath(path)
class EdgeColor(object):
"""An edge color factory"""
current_color_index = 0
colors = [
QtGui.QColor(Qt.red),
QtGui.QColor(Qt.green),
QtGui.QColor(Qt.blue),
QtGui.QColor(Qt.black),
QtGui.QColor(Qt.darkRed),
QtGui.QColor(Qt.darkGreen),
QtGui.QColor(Qt.darkBlue),
QtGui.QColor(Qt.cyan),
QtGui.QColor(Qt.magenta),
# Orange; Qt.yellow is too low-contrast
qtutils.rgba(0xff, 0x66, 0x00),
QtGui.QColor(Qt.gray),
QtGui.QColor(Qt.darkCyan),
QtGui.QColor(Qt.darkMagenta),
QtGui.QColor(Qt.darkYellow),
QtGui.QColor(Qt.darkGray),
]
@classmethod
def cycle(cls):
cls.current_color_index += 1
cls.current_color_index %= len(cls.colors)
color = cls.colors[cls.current_color_index]
color.setAlpha(128)
return color
@classmethod
def current(cls):
return cls.colors[cls.current_color_index]
@classmethod
def reset(cls):
cls.current_color_index = 0
class Commit(QtWidgets.QGraphicsItem):
item_type = QtWidgets.QGraphicsItem.UserType + 2
commit_radius = 12.0
merge_radius = 18.0
item_shape = QtGui.QPainterPath()
item_shape.addRect(commit_radius/-2.0,
commit_radius/-2.0,
commit_radius, commit_radius)
item_bbox = item_shape.boundingRect()
inner_rect = QtGui.QPainterPath()
inner_rect.addRect(commit_radius/-2.0 + 2.0,
commit_radius/-2.0 + 2.0,
commit_radius - 4.0,
commit_radius - 4.0)
inner_rect = inner_rect.boundingRect()
commit_color = QtGui.QColor(Qt.white)
outline_color = commit_color.darker()
merge_color = QtGui.QColor(Qt.lightGray)
commit_selected_color = QtGui.QColor(Qt.green)
selected_outline_color = commit_selected_color.darker()
commit_pen = QtGui.QPen()
commit_pen.setWidth(1.0)
commit_pen.setColor(outline_color)
def __init__(self, commit,
notifier,
selectable=QtWidgets.QGraphicsItem.ItemIsSelectable,
cursor=Qt.PointingHandCursor,
xpos=commit_radius/2.0 + 1.0,
cached_commit_color=commit_color,
cached_merge_color=merge_color):
QtWidgets.QGraphicsItem.__init__(self)
self.commit = commit
self.notifier = notifier
self.setZValue(0)
self.setFlag(selectable)
self.setCursor(cursor)
self.setToolTip(commit.sha1[:7] + ': ' + commit.summary)
if commit.tags:
self.label = label = Label(commit)
label.setParentItem(self)
label.setPos(xpos, -self.commit_radius/2.0)
else:
self.label = None
if len(commit.parents) > 1:
self.brush = cached_merge_color
else:
self.brush = cached_commit_color
self.pressed = False
self.dragged = False
def blockSignals(self, blocked):
self.notifier.notification_enabled = not blocked
def itemChange(self, change, value):
if change == QtWidgets.QGraphicsItem.ItemSelectedHasChanged:
# Broadcast selection to other widgets
selected_items = self.scene().selectedItems()
commits = [item.commit for item in selected_items]
self.scene().parent().set_selecting(True)
self.notifier.notify_observers(diff.COMMITS_SELECTED, commits)
self.scene().parent().set_selecting(False)
# Cache the pen for use in paint()
if value:
self.brush = self.commit_selected_color
color = self.selected_outline_color
else:
if len(self.commit.parents) > 1:
self.brush = self.merge_color
else:
self.brush = self.commit_color
color = self.outline_color
commit_pen = QtGui.QPen()
commit_pen.setWidth(1.0)
commit_pen.setColor(color)
self.commit_pen = commit_pen
return QtWidgets.QGraphicsItem.itemChange(self, change, value)
def type(self):
return self.item_type
def boundingRect(self, rect=item_bbox):
return rect
def shape(self):
return self.item_shape
def paint(self, painter, option, widget,
inner=inner_rect,
cache=Cache):
# Do not draw outside the exposed rect
painter.setClipRect(option.exposedRect)
# Draw ellipse
painter.setPen(self.commit_pen)
painter.setBrush(self.brush)
painter.drawEllipse(inner)
def mousePressEvent(self, event):
QtWidgets.QGraphicsItem.mousePressEvent(self, event)
self.pressed = True
self.selected = self.isSelected()
def mouseMoveEvent(self, event):
if self.pressed:
self.dragged = True
QtWidgets.QGraphicsItem.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
QtWidgets.QGraphicsItem.mouseReleaseEvent(self, event)
if (not self.dragged and
self.selected and
event.button() == Qt.LeftButton):
return
self.pressed = False
self.dragged = False
class Label(QtWidgets.QGraphicsItem):
item_type = QtWidgets.QGraphicsItem.UserType + 3
width = 72
height = 18
item_shape = QtGui.QPainterPath()
item_shape.addRect(0, 0, width, height)
item_bbox = item_shape.boundingRect()
text_options = QtGui.QTextOption()
text_options.setAlignment(Qt.AlignCenter)
text_options.setAlignment(Qt.AlignVCenter)
def __init__(self, commit,
other_color=QtGui.QColor(Qt.white),
head_color=QtGui.QColor(Qt.green)):
QtWidgets.QGraphicsItem.__init__(self)
self.setZValue(-1)
# Starts with enough space for two tags. Any more and the commit
# needs to be taller to accommodate.
self.commit = commit
if 'HEAD' in commit.tags:
self.color = head_color
else:
self.color = other_color
self.color.setAlpha(180)
self.pen = QtGui.QPen()
self.pen.setColor(self.color.darker())
self.pen.setWidth(1.0)
def type(self):
return self.item_type
def boundingRect(self, rect=item_bbox):
return rect
def shape(self):
return self.item_shape
def paint(self, painter, option, widget,
text_opts=text_options,
black=Qt.black,
cache=Cache):
try:
font = cache.label_font
except AttributeError:
font = cache.label_font = QtWidgets.QApplication.font()
font.setPointSize(6)
# Draw tags
painter.setBrush(self.color)
painter.setPen(self.pen)
painter.setFont(font)
current_width = 0
for tag in self.commit.tags:
text_rect = painter.boundingRect(
QRectF(current_width, 0, 0, 0), Qt.TextSingleLine, tag)
box_rect = text_rect.adjusted(-1, -1, 1, 1)
painter.drawRoundedRect(box_rect, 2, 2)
painter.drawText(text_rect, Qt.TextSingleLine, tag)
current_width += text_rect.width() + 5
class GraphView(QtWidgets.QGraphicsView, ViewerMixin):
diff_commits = Signal(object, object)
x_min = 24
x_max = 0
y_min = 24
x_adjust = Commit.commit_radius*4/3
y_adjust = Commit.commit_radius*4/3
x_off = 18
y_off = 24
def __init__(self, notifier, parent):
QtWidgets.QGraphicsView.__init__(self, parent)
ViewerMixin.__init__(self)
highlight = self.palette().color(QtGui.QPalette.Highlight)
Commit.commit_selected_color = highlight
Commit.selected_outline_color = highlight.darker()
self.selection_list = []
self.menu_actions = None
self.notifier = notifier
self.commits = []
self.items = {}
self.saved_matrix = self.transform()
self.x_offsets = collections.defaultdict(lambda: self.x_min)
self.is_panning = False
self.pressed = False
self.selecting = False
self.last_mouse = [0, 0]
self.zoom = 2
self.setDragMode(self.RubberBandDrag)
scene = QtWidgets.QGraphicsScene(self)
scene.setItemIndexMethod(QtWidgets.QGraphicsScene.NoIndex)
self.setScene(scene)
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setViewportUpdateMode(self.BoundingRectViewportUpdate)
self.setCacheMode(QtWidgets.QGraphicsView.CacheBackground)
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QtWidgets.QGraphicsView.NoAnchor)
self.setBackgroundBrush(QtGui.QColor(Qt.white))
qtutils.add_action(self, N_('Zoom In'), self.zoom_in,
hotkeys.ZOOM_IN, hotkeys.ZOOM_IN_SECONDARY)
qtutils.add_action(self, N_('Zoom Out'), self.zoom_out,
hotkeys.ZOOM_OUT)
qtutils.add_action(self, N_('Zoom to Fit'),
self.zoom_to_fit, hotkeys.FIT)
qtutils.add_action(self, N_('Select Parent'),
self.select_parent, hotkeys.MOVE_DOWN_TERTIARY)
qtutils.add_action(self, N_('Select Oldest Parent'),
self.select_oldest_parent, hotkeys.MOVE_DOWN)
qtutils.add_action(self, N_('Select Child'),
self.select_child, hotkeys.MOVE_UP_TERTIARY)
qtutils.add_action(self, N_('Select Newest Child'),
self.select_newest_child, hotkeys.MOVE_UP)
notifier.add_observer(diff.COMMITS_SELECTED, self.commits_selected)
def clear(self):
EdgeColor.reset()
self.scene().clear()
self.selection_list = []
self.items.clear()
self.x_offsets.clear()
self.x_max = 24
self.y_min = 24
self.commits = []
# ViewerMixin interface
def selected_items(self):
"""Return the currently selected items"""
return self.scene().selectedItems()
def zoom_in(self):
self.scale_view(1.5)
def zoom_out(self):
self.scale_view(1.0/1.5)
def commits_selected(self, commits):
if self.selecting:
return
self.select([commit.sha1 for commit in commits])
def select(self, sha1s):
"""Select the item for the SHA-1"""
self.scene().clearSelection()
for sha1 in sha1s:
try:
item = self.items[sha1]
except KeyError:
continue
item.blockSignals(True)
item.setSelected(True)
item.blockSignals(False)
item_rect = item.sceneTransform().mapRect(item.boundingRect())
self.ensureVisible(item_rect)
def get_item_by_generation(self, commits, criteria_fn):
"""Return the item for the commit matching criteria"""
if not commits:
return None
generation = None
for commit in commits:
if (generation is None or
criteria_fn(generation, commit.generation)):
sha1 = commit.sha1
generation = commit.generation
try:
return self.items[sha1]
except KeyError:
return None
def oldest_item(self, commits):
"""Return the item for the commit with the oldest generation number"""
return self.get_item_by_generation(commits, lambda a, b: a > b)
def newest_item(self, commits):
"""Return the item for the commit with the newest generation number"""
return self.get_item_by_generation(commits, lambda a, b: a < b)
def create_patch(self):
items = self.selected_items()
if not items:
return
selected_commits = self.sort_by_generation([n.commit for n in items])
sha1s = [c.sha1 for c in selected_commits]
all_sha1s = [c.sha1 for c in self.commits]
cmds.do(cmds.FormatPatch, sha1s, all_sha1s)
def select_parent(self):
"""Select the parent with the newest generation number"""
selected_item = self.selected_item()
if selected_item is None:
return
parent_item = self.newest_item(selected_item.commit.parents)
if parent_item is None:
return
selected_item.setSelected(False)
parent_item.setSelected(True)
self.ensureVisible(
parent_item.mapRectToScene(parent_item.boundingRect()))
def select_oldest_parent(self):
"""Select the parent with the oldest generation number"""
selected_item = self.selected_item()
if selected_item is None:
return
parent_item = self.oldest_item(selected_item.commit.parents)
if parent_item is None:
return
selected_item.setSelected(False)
parent_item.setSelected(True)
scene_rect = parent_item.mapRectToScene(parent_item.boundingRect())
self.ensureVisible(scene_rect)
def select_child(self):
"""Select the child with the oldest generation number"""
selected_item = self.selected_item()
if selected_item is None:
return
child_item = self.oldest_item(selected_item.commit.children)
if child_item is None:
return
selected_item.setSelected(False)
child_item.setSelected(True)
scene_rect = child_item.mapRectToScene(child_item.boundingRect())
self.ensureVisible(scene_rect)
def select_newest_child(self):
"""Select the Nth child with the newest generation number (N > 1)"""
selected_item = self.selected_item()
if selected_item is None:
return
if len(selected_item.commit.children) > 1:
children = selected_item.commit.children[1:]
else:
children = selected_item.commit.children
child_item = self.newest_item(children)
if child_item is None:
return
selected_item.setSelected(False)
child_item.setSelected(True)
scene_rect = child_item.mapRectToScene(child_item.boundingRect())
self.ensureVisible(scene_rect)
def set_initial_view(self):
self_commits = self.commits
self_items = self.items
items = self.selected_items()
if not items:
commits = self_commits[-8:]
items = [self_items[c.sha1] for c in commits]
self.fit_view_to_items(items)
def zoom_to_fit(self):
"""Fit selected items into the viewport"""
items = self.selected_items()
self.fit_view_to_items(items)
def fit_view_to_items(self, items):
if not items:
rect = self.scene().itemsBoundingRect()
else:
x_min = y_min = maxsize
x_max = y_max = -maxsize
for item in items:
pos = item.pos()
item_rect = item.boundingRect()
x_off = item_rect.width() * 5
y_off = item_rect.height() * 10
x_min = min(x_min, pos.x())
y_min = min(y_min, pos.y()-y_off)
x_max = max(x_max, pos.x()+x_off)
y_max = max(y_max, pos.y())
rect = QtCore.QRectF(x_min, y_min, x_max-x_min, y_max-y_min)
x_adjust = GraphView.x_adjust
y_adjust = GraphView.y_adjust
rect.setX(rect.x() - x_adjust)
rect.setY(rect.y() - y_adjust)
rect.setHeight(rect.height() + y_adjust*2)
rect.setWidth(rect.width() + x_adjust*2)
self.fitInView(rect, Qt.KeepAspectRatio)
self.scene().invalidate()
def save_selection(self, event):
if event.button() != Qt.LeftButton:
return
elif Qt.ShiftModifier != event.modifiers():
return
self.selection_list = self.selected_items()
def restore_selection(self, event):
if Qt.ShiftModifier != event.modifiers():
return
for item in self.selection_list:
item.setSelected(True)
def handle_event(self, event_handler, event):
self.save_selection(event)
event_handler(self, event)
self.restore_selection(event)
self.update()
def set_selecting(self, selecting):
self.selecting = selecting
def pan(self, event):
pos = event.pos()
dx = pos.x() - self.mouse_start[0]
dy = pos.y() - self.mouse_start[1]
if dx == 0 and dy == 0:
return
rect = QtCore.QRect(0, 0, abs(dx), abs(dy))
delta = self.mapToScene(rect).boundingRect()
tx = delta.width()
if dx < 0.0:
tx = -tx
ty = delta.height()
if dy < 0.0:
ty = -ty
matrix = self.transform()
matrix.reset()
matrix *= self.saved_matrix
matrix.translate(tx, ty)
self.setTransformationAnchor(QtWidgets.QGraphicsView.NoAnchor)
self.setTransform(matrix)
def wheel_zoom(self, event):
"""Handle mouse wheel zooming."""
delta = qtcompat.wheel_delta(event)
zoom = math.pow(2.0, delta/512.0)
factor = (self.transform()
.scale(zoom, zoom)
.mapRect(QtCore.QRectF(0.0, 0.0, 1.0, 1.0))
.width())
if factor < 0.014 or factor > 42.0:
return
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
self.zoom = zoom
self.scale(zoom, zoom)
def wheel_pan(self, event):
"""Handle mouse wheel panning."""
unit = QtCore.QRectF(0.0, 0.0, 1.0, 1.0)
factor = 1.0 / self.transform().mapRect(unit).width()
tx, ty = qtcompat.wheel_translation(event)
matrix = self.transform().translate(tx * factor, ty * factor)
self.setTransformationAnchor(QtWidgets.QGraphicsView.NoAnchor)
self.setTransform(matrix)
def scale_view(self, scale):
factor = (self.transform()
.scale(scale, scale)
.mapRect(QtCore.QRectF(0, 0, 1, 1))
.width())
if factor < 0.07 or factor > 100.0:
return
self.zoom = scale
adjust_scrollbars = True
scrollbar = self.verticalScrollBar()
if scrollbar:
value = scrollbar.value()
min_ = scrollbar.minimum()
max_ = scrollbar.maximum()
range_ = max_ - min_
distance = value - min_
nonzero_range = range_ > 0.1
if nonzero_range:
scrolloffset = distance/range_
else:
adjust_scrollbars = False
self.setTransformationAnchor(QtWidgets.QGraphicsView.NoAnchor)
self.scale(scale, scale)
scrollbar = self.verticalScrollBar()
if scrollbar and adjust_scrollbars:
min_ = scrollbar.minimum()
max_ = scrollbar.maximum()
range_ = max_ - min_
value = min_ + int(float(range_) * scrolloffset)
scrollbar.setValue(value)
def add_commits(self, commits):
"""Traverse commits and add them to the view."""
self.commits.extend(commits)
scene = self.scene()
for commit in commits:
item = Commit(commit, self.notifier)
self.items[commit.sha1] = item
for ref in commit.tags:
self.items[ref] = item
scene.addItem(item)
self.layout_commits(commits)
self.link(commits)
def link(self, commits):
"""Create edges linking commits with their parents"""
scene = self.scene()
for commit in commits:
try:
commit_item = self.items[commit.sha1]
except KeyError:
# TODO - Handle truncated history viewing
continue
for parent in reversed(commit.parents):
try:
parent_item = self.items[parent.sha1]
except KeyError:
# TODO - Handle truncated history viewing
continue
edge = Edge(parent_item, commit_item)
scene.addItem(edge)
def layout_commits(self, nodes):
positions = self.position_nodes(nodes)
for sha1, (x, y) in positions.items():
item = self.items[sha1]
item.setPos(x, y)
def position_nodes(self, nodes):
positions = {}
x_max = self.x_max
y_min = self.y_min
x_off = self.x_off
y_off = self.y_off
x_offsets = self.x_offsets
for node in nodes:
generation = node.generation
sha1 = node.sha1
if node.is_fork():
# This is a fan-out so sweep over child generations and
# shift them to the right to avoid overlapping edges
child_gens = [c.generation for c in node.children]
maxgen = max(child_gens)
for g in range(generation + 1, maxgen):
x_offsets[g] += x_off
if len(node.parents) == 1:
# Align nodes relative to their parents
parent_gen = node.parents[0].generation
parent_off = x_offsets[parent_gen]
x_offsets[generation] = max(parent_off-x_off,
x_offsets[generation])
cur_xoff = x_offsets[generation]
next_xoff = cur_xoff
next_xoff += x_off
x_offsets[generation] = next_xoff
x_pos = cur_xoff
y_pos = -generation * y_off
y_pos = min(y_pos, y_min - y_off)
# y_pos = y_off
positions[sha1] = (x_pos, y_pos)
x_max = max(x_max, x_pos)
y_min = y_pos
self.x_max = x_max
self.y_min = y_min
return positions
def update_scene_rect(self):
y_min = self.y_min
x_max = self.x_max
self.scene().setSceneRect(-GraphView.x_adjust,
y_min-GraphView.y_adjust,
x_max + GraphView.x_adjust,
abs(y_min) + GraphView.y_adjust)
def sort_by_generation(self, commits):
if len(commits) < 2:
return commits
commits.sort(key=lambda x: x.generation)
return commits
# Qt overrides
def contextMenuEvent(self, event):
self.context_menu_event(event)
def mousePressEvent(self, event):
if event.button() == Qt.MidButton:
pos = event.pos()
self.mouse_start = [pos.x(), pos.y()]
self.saved_matrix = self.transform()
self.is_panning = True
return
if event.button() == Qt.RightButton:
event.ignore()
return
if event.button() == Qt.LeftButton:
self.pressed = True
self.handle_event(QtWidgets.QGraphicsView.mousePressEvent, event)
def mouseMoveEvent(self, event):
pos = self.mapToScene(event.pos())
if self.is_panning:
self.pan(event)
return
self.last_mouse[0] = pos.x()
self.last_mouse[1] = pos.y()
self.handle_event(QtWidgets.QGraphicsView.mouseMoveEvent, event)
if self.pressed:
self.viewport().repaint()
def mouseReleaseEvent(self, event):
self.pressed = False
if event.button() == Qt.MidButton:
self.is_panning = False
return
self.handle_event(QtWidgets.QGraphicsView.mouseReleaseEvent, event)
self.selection_list = []
self.viewport().repaint()
def wheelEvent(self, event):
"""Handle Qt mouse wheel events."""
if event.modifiers() & Qt.ControlModifier:
self.wheel_zoom(event)
else:
self.wheel_pan(event)
# Glossary
# ========
# oid -- Git objects IDs (i.e. SHA-1 IDs)
# ref -- Git references that resolve to a commit-ish (HEAD, branches, tags)
| gpl-2.0 |
kohnle-lernmodule/palama | twisted/trial/test/test_util.py | 14 | 7278 | from twisted.python import log
from twisted.internet import utils
from twisted.internet import defer, reactor, threads, interfaces
from twisted.trial import unittest, util
from twisted.trial.test import packages
import sys, os, time
suppress = [(['ignore', 'Do NOT use wait.*'], {})]
class WaitReentrancyTest(unittest.TestCase):
if interfaces.IReactorThreads(reactor, None) is None:
skip = ("This test probably doesn't really need threads "
"but hell if I can figure out how to rewrite it "
"without them. Skipping in the absence of "
"thread-support.")
def _returnedDeferredThenWait(self):
def threadedOperation():
time.sleep(0.1)
return "Beginning"
d = threads.deferToThread(threadedOperation)
return d.addCallback(self._cbDoWait)
def _cbDoWait(self, result):
self.assertEquals(result, "Beginning")
d = defer.succeed("End")
self.assertEquals(util.wait(d), "End")
def testReturnedDeferredThenWait(self):
d = self._returnedDeferredThenWait()
self.assertRaises(util.WaitIsNotReentrantError, util.wait, d)
def _reentrantWait(self):
def threadedOperation(n):
time.sleep(n)
return n
d1 = threads.deferToThread(threadedOperation, 0.125)
d2 = threads.deferToThread(threadedOperation, 0.250)
d1.addCallback(lambda ignored: util.wait(d2))
util.wait(d1)
def testReentrantWait(self):
self.assertRaises(util.WaitIsNotReentrantError, self._reentrantWait)
def test_twoWaitImplementations(self):
# If this test times out, then wait is being re-entered.
tc = TestMktemp('test_name')
tc._timedOut = False # whitebox
d = defer.Deferred()
def _runsInsideWait(r):
d = defer.Deferred()
self.assertRaises(util.WaitIsNotReentrantError, util.wait, d)
d.addCallback(utils.suppressWarnings(_runsInsideWait, *suppress))
reactor.callLater(0, d.callback, 'yo')
tc._wait(d)
test_twoWaitImplementations.timeout = 4
class TestMktemp(unittest.TestCase):
def test_name(self):
name = self.mktemp()
dirs = os.path.dirname(name).split(os.sep)[:-1]
self.failUnlessEqual(
dirs, ['twisted.trial.test.test_util', 'TestMktemp', 'test_name'])
def test_unique(self):
name = self.mktemp()
self.failIfEqual(name, self.mktemp())
def test_created(self):
name = self.mktemp()
dirname = os.path.dirname(name)
self.failUnless(os.path.exists(dirname))
self.failIf(os.path.exists(name))
def test_location(self):
path = os.path.abspath(self.mktemp())
self.failUnless(path.startswith(os.getcwd()))
class TestWaitInterrupt(unittest.TestCase):
def raiseKeyInt(self, ignored):
# XXX Abstraction violation, I suppose. However: signals are
# unreliable, so using them to simulate a KeyboardInterrupt
# would be sketchy too; os.kill() is not available on Windows,
# so we can't use that and let this run on Win32; raising
# KeyboardInterrupt itself is wholely unrealistic, as the
# reactor would normally block SIGINT for its own purposes and
# not allow a KeyboardInterrupt to happen at all!
if interfaces.IReactorThreads.providedBy(reactor):
reactor.callInThread(reactor.sigInt)
else:
reactor.callLater(0, reactor.sigInt)
return defer.Deferred()
def setUp(self):
self.shutdownCalled = False
def testKeyboardInterrupt(self):
# Test the KeyboardInterrupt is *not* caught by wait -- we
# want to allow users to Ctrl-C test runs. And the use of the
# useWaitError should not matter in this case.
d = defer.Deferred()
d.addCallback(self.raiseKeyInt)
reactor.callLater(0, d.callback, None)
self.assertRaises(KeyboardInterrupt, util.wait, d, useWaitError=False)
def _shutdownCalled(self):
self.shutdownCalled = True
def test_interruptDoesntShutdown(self):
reactor.addSystemEventTrigger('after', 'shutdown',
self._shutdownCalled)
d = defer.Deferred()
d.addCallback(self.raiseKeyInt)
reactor.callLater(0, d.callback, None)
try:
util.wait(d, useWaitError=False)
except KeyboardInterrupt:
self.failIf(self.shutdownCalled,
"System shutdown triggered")
else:
self.fail("KeyboardInterrupt wasn't raised")
# glyph's contributed test
# http://twistedmatrix.com/bugs/file317/failing.py
class FakeException(Exception):
pass
def die():
try:
raise FakeException()
except:
log.err()
class MyTest(unittest.TestCase):
def testFlushAfterWait(self):
die()
util.wait(defer.succeed(''))
log.flushErrors(FakeException)
def testFlushByItself(self):
die()
log.flushErrors(FakeException)
class TestIntrospection(unittest.TestCase):
def test_containers(self):
import suppression
parents = util.getPythonContainers(
suppression.TestSuppression2.testSuppressModule)
expected = [ suppression.TestSuppression2,
suppression ]
for a, b in zip(parents, expected):
self.failUnlessEqual(a, b)
class TestFindObject(packages.PackageTest):
def setUp(self):
packages.PackageTest.setUp(self, '_TestFindObject')
self.oldPath = sys.path[:]
sys.path.append('_TestFindObject')
def tearDown(self):
sys.path = self.oldPath
packages.PackageTest.tearDown(self, '_TestFindObject')
def test_importPackage(self):
package1 = util.findObject('package')
import package as package2
self.failUnlessEqual(package1, (True, package2))
def test_importModule(self):
test_sample2 = util.findObject('goodpackage.test_sample')
from goodpackage import test_sample
self.failUnlessEqual((True, test_sample), test_sample2)
def test_importError(self):
self.failUnlessRaises(ZeroDivisionError,
util.findObject, 'package.test_bad_module')
def test_sophisticatedImportError(self):
self.failUnlessRaises(ImportError,
util.findObject, 'package2.test_module')
def test_importNonexistentPackage(self):
self.failUnlessEqual(util.findObject('doesntexist')[0], False)
def test_findNonexistentModule(self):
self.failUnlessEqual(util.findObject('package.doesntexist')[0], False)
def test_findNonexistentObject(self):
self.failUnlessEqual(util.findObject(
'goodpackage.test_sample.doesnt')[0], False)
self.failUnlessEqual(util.findObject(
'goodpackage.test_sample.AlphabetTest.doesntexist')[0], False)
def test_findObjectExist(self):
alpha1 = util.findObject('goodpackage.test_sample.AlphabetTest')
from goodpackage import test_sample
self.failUnlessEqual(alpha1, (True, test_sample.AlphabetTest))
| gpl-2.0 |
iuscommunity/ius-tools | src/iustools.version_tracker/iustools/bootstrap/version_tracker.py | 1 | 1771 | """
This bootstrap module should be used to setup parts of the version_tracker plugin
that need to exist before all controllers are loaded. It is best used to
define/register hooks, setup namespaces, and the like.
"""
from pkg_resources import get_distribution
from cement.core.namespace import CementNamespace, register_namespace
VERSION = get_distribution('iustools.version_tracker').version
# Setup the 'version_tracker' namespace object
version_tracker = CementNamespace(
label='version_tracker',
description='Version Tracker Plugin for IUS Tools',
version=VERSION,
controller='VersionTrackerController',
provider='iustools'
)
# Directory where Package Configuration is kept
version_tracker.config['pkg_dir'] = '/usr/share/ius-tools/version_tracker/pkgs/'
version_tracker.config['ius_baseurl'] = 'http://dl.iuscommunity.org/pub/ius'
# Layout for output
version_tracker.config['layout'] = '%-30s %-15s %-15s %s'
version_tracker.config['layout_titles'] = ('name', 'ius ver', 'upstream ver', 'status')
# Officialize and register the namespace
register_namespace(version_tracker)
# Plugin options
version_tracker.config['launchpad'] = None
version_tracker.options.add_option('--launchpad', action='store_true', dest='launchpad',
help='if you wish the tool to add Launchpad tickets', default=None)
version_tracker.config['email'] = None
version_tracker.options.add_option('--email', action='store_true', dest='email',
help='send output in email to configured recipients', default=None)
# Configuration for --email Email notifications
version_tracker.config['fromaddr'] = 'nobody@example.com'
version_tracker.config['toaddr'] = 'nobody@example.com'
version_tracker.config['subject'] = '[ius-community] IUS Version Tracker'
| gpl-2.0 |
diogocs1/comps | web/addons/account/wizard/account_period_close.py | 341 | 2646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_period_close(osv.osv_memory):
"""
close period
"""
_name = "account.period.close"
_description = "period close"
_columns = {
'sure': fields.boolean('Check this box'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close period
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account period close’s ID or list of IDs
"""
journal_period_pool = self.pool.get('account.journal.period')
period_pool = self.pool.get('account.period')
account_move_obj = self.pool.get('account.move')
mode = 'done'
for form in self.read(cr, uid, ids, context=context):
if form['sure']:
for id in context['active_ids']:
account_move_ids = account_move_obj.search(cr, uid, [('period_id', '=', id), ('state', '=', "draft")], context=context)
if account_move_ids:
raise osv.except_osv(_('Invalid Action!'), _('In order to close a period, you must first post related journal entries.'))
cr.execute('update account_journal_period set state=%s where period_id=%s', (mode, id))
cr.execute('update account_period set state=%s where id=%s', (mode, id))
self.invalidate_cache(cr, uid, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| apache-2.0 |
xifle/home-assistant | homeassistant/components/light/lifx.py | 15 | 8350 | """
Support for the LIFX platform that implements lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.lifx/
"""
import colorsys
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_RGB_COLOR, ATTR_TRANSITION,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_RGB_COLOR,
SUPPORT_TRANSITION, Light, PLATFORM_SCHEMA)
from homeassistant.helpers.event import track_time_change
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['liffylights==0.9.4']
BYTE_MAX = 255
CONF_BROADCAST = 'broadcast'
CONF_SERVER = 'server'
SHORT_MAX = 65535
TEMP_MAX = 9000
TEMP_MAX_HASS = 500
TEMP_MIN = 2500
TEMP_MIN_HASS = 154
SUPPORT_LIFX = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_RGB_COLOR |
SUPPORT_TRANSITION)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SERVER, default=None): cv.string,
vol.Optional(CONF_BROADCAST, default=None): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the LIFX platform."""
server_addr = config.get(CONF_SERVER)
broadcast_addr = config.get(CONF_BROADCAST)
lifx_library = LIFX(add_devices, server_addr, broadcast_addr)
# Register our poll service
track_time_change(hass, lifx_library.poll, second=[10, 40])
lifx_library.probe()
class LIFX(object):
"""Representation of a LIFX light."""
def __init__(self, add_devices_callback, server_addr=None,
broadcast_addr=None):
"""Initialize the light."""
import liffylights
self._devices = []
self._add_devices_callback = add_devices_callback
self._liffylights = liffylights.LiffyLights(
self.on_device, self.on_power, self.on_color, server_addr,
broadcast_addr)
def find_bulb(self, ipaddr):
"""Search for bulbs."""
bulb = None
for device in self._devices:
if device.ipaddr == ipaddr:
bulb = device
break
return bulb
def on_device(self, ipaddr, name, power, hue, sat, bri, kel):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is None:
_LOGGER.debug("new bulb %s %s %d %d %d %d %d",
ipaddr, name, power, hue, sat, bri, kel)
bulb = LIFXLight(
self._liffylights, ipaddr, name, power, hue, sat, bri, kel)
self._devices.append(bulb)
self._add_devices_callback([bulb])
else:
_LOGGER.debug("update bulb %s %s %d %d %d %d %d",
ipaddr, name, power, hue, sat, bri, kel)
bulb.set_power(power)
bulb.set_color(hue, sat, bri, kel)
bulb.update_ha_state()
def on_color(self, ipaddr, hue, sat, bri, kel):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is not None:
bulb.set_color(hue, sat, bri, kel)
bulb.update_ha_state()
def on_power(self, ipaddr, power):
"""Initialize the light."""
bulb = self.find_bulb(ipaddr)
if bulb is not None:
bulb.set_power(power)
bulb.update_ha_state()
# pylint: disable=unused-argument
def poll(self, now):
"""Polling for the light."""
self.probe()
def probe(self, address=None):
"""Probe the light."""
self._liffylights.probe(address)
def convert_rgb_to_hsv(rgb):
"""Convert Home Assistant RGB values to HSV values."""
red, green, blue = [_ / BYTE_MAX for _ in rgb]
hue, saturation, brightness = colorsys.rgb_to_hsv(red, green, blue)
return [int(hue * SHORT_MAX),
int(saturation * SHORT_MAX),
int(brightness * SHORT_MAX)]
class LIFXLight(Light):
"""Representation of a LIFX light."""
def __init__(self, liffy, ipaddr, name, power, hue, saturation, brightness,
kelvin):
"""Initialize the light."""
_LOGGER.debug("LIFXLight: %s %s", ipaddr, name)
self._liffylights = liffy
self._ip = ipaddr
self.set_name(name)
self.set_power(power)
self.set_color(hue, saturation, brightness, kelvin)
@property
def should_poll(self):
"""No polling needed for LIFX light."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def ipaddr(self):
"""Return the IP address of the device."""
return self._ip
@property
def rgb_color(self):
"""Return the RGB value."""
_LOGGER.debug(
"rgb_color: [%d %d %d]", self._rgb[0], self._rgb[1], self._rgb[2])
return self._rgb
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
brightness = int(self._bri / (BYTE_MAX + 1))
_LOGGER.debug("brightness: %d", brightness)
return brightness
@property
def color_temp(self):
"""Return the color temperature."""
temperature = int(TEMP_MIN_HASS + (TEMP_MAX_HASS - TEMP_MIN_HASS) *
(self._kel - TEMP_MIN) / (TEMP_MAX - TEMP_MIN))
_LOGGER.debug("color_temp: %d", temperature)
return temperature
@property
def is_on(self):
"""Return true if device is on."""
_LOGGER.debug("is_on: %d", self._power)
return self._power != 0
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_LIFX
def turn_on(self, **kwargs):
"""Turn the device on."""
if ATTR_TRANSITION in kwargs:
fade = kwargs[ATTR_TRANSITION] * 1000
else:
fade = 0
if ATTR_RGB_COLOR in kwargs:
hue, saturation, brightness = \
convert_rgb_to_hsv(kwargs[ATTR_RGB_COLOR])
else:
hue = self._hue
saturation = self._sat
brightness = self._bri
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS] * (BYTE_MAX + 1)
else:
brightness = self._bri
if ATTR_COLOR_TEMP in kwargs:
# pylint: disable=fixme
# TODO: Use color_temperature_mired_to_kelvin from util.color
kelvin = int(((TEMP_MAX - TEMP_MIN) *
(kwargs[ATTR_COLOR_TEMP] - TEMP_MIN_HASS) /
(TEMP_MAX_HASS - TEMP_MIN_HASS)) + TEMP_MIN)
else:
kelvin = self._kel
_LOGGER.debug("turn_on: %s (%d) %d %d %d %d %d",
self._ip, self._power,
hue, saturation, brightness, kelvin, fade)
if self._power == 0:
self._liffylights.set_power(self._ip, 65535, fade)
self._liffylights.set_color(self._ip, hue, saturation,
brightness, kelvin, fade)
def turn_off(self, **kwargs):
"""Turn the device off."""
if ATTR_TRANSITION in kwargs:
fade = kwargs[ATTR_TRANSITION] * 1000
else:
fade = 0
_LOGGER.debug("turn_off: %s %d", self._ip, fade)
self._liffylights.set_power(self._ip, 0, fade)
def set_name(self, name):
"""Set name of the light."""
self._name = name
def set_power(self, power):
"""Set power state value."""
_LOGGER.debug("set_power: %d", power)
self._power = (power != 0)
def set_color(self, hue, sat, bri, kel):
"""Set color state values."""
self._hue = hue
self._sat = sat
self._bri = bri
self._kel = kel
red, green, blue = colorsys.hsv_to_rgb(hue / SHORT_MAX,
sat / SHORT_MAX,
bri / SHORT_MAX)
red = int(red * BYTE_MAX)
green = int(green * BYTE_MAX)
blue = int(blue * BYTE_MAX)
_LOGGER.debug("set_color: %d %d %d %d [%d %d %d]",
hue, sat, bri, kel, red, green, blue)
self._rgb = [red, green, blue]
| mit |
google-code/android-scripting | python/src/Mac/Modules/te/tescan.py | 34 | 1975 | # Scan an Apple header file, generating a Python file of generator calls.
import sys
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner
LONG = "TextEdit"
SHORT = "te"
OBJECT = "TEHandle"
def main():
input = LONG + ".h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[-1]
# This is non-functional today
if t == OBJECT and m == "InMode":
classname = "Method"
listname = "methods"
return classname, listname
def makeblacklistnames(self):
return [
"TEDispose",
"TEInit",
## "TEGetHiliteRgn",
]
def makeblacklisttypes(self):
return [
"TEClickLoopUPP",
"UniversalProcPtr",
"WordBreakUPP",
"TEDoTextUPP",
"TERecalcUPP",
"TEFindWordUPP",
]
def makerepairinstructions(self):
return [
([("void_ptr", "*", "InMode"), ("long", "*", "InMode")],
[("InBuffer", "*", "*")]),
# TEContinuousStyle
([("short", "mode", "OutMode"), ("TextStyle", "aStyle", "OutMode")],
[("short", "mode", "InOutMode"), ("TextStyle", "aStyle", "InOutMode")])
]
if __name__ == "__main__":
main()
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/top_level_domain.py | 2 | 1850 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class TopLevelDomain(ProxyOnlyResource):
"""A top level domain object.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar domain_name: Name of the top level domain.
:vartype domain_name: str
:param privacy: If <code>true</code>, then the top level domain supports
domain privacy; otherwise, <code>false</code>.
:type privacy: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'domain_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'domain_name': {'key': 'properties.name', 'type': 'str'},
'privacy': {'key': 'properties.privacy', 'type': 'bool'},
}
def __init__(self, kind=None, privacy=None):
super(TopLevelDomain, self).__init__(kind=kind)
self.domain_name = None
self.privacy = privacy
| mit |
rll/lfd | lfd/rapprentice/kinematics_utils.py | 2 | 2519 | import numpy as np
import scipy.interpolate as si
from numpy import pi
from lfd.rapprentice import math_utils as mu
def smaller_ang(x):
return (x + pi)%(2*pi) - pi
def closer_ang(x,a,dir=0):
"""
find angle y (==x mod 2*pi) that is close to a
dir == 0: minimize absolute value of difference
dir == 1: y > x
dir == 2: y < x
"""
if dir == 0:
return a + smaller_ang(x-a)
elif dir == 1:
return a + (x-a)%(2*pi)
elif dir == -1:
return a + (x-a)%(2*pi) - 2*pi
def closer_joint_angles(pos,seed):
result = np.array(pos)
for i in [2,4,6]:
result[i] = closer_ang(pos[i],seed[i],0)
return result
def get_velocities(positions, times, tol):
positions = np.atleast_2d(positions)
n = len(positions)
deg = min(3, n - 1)
good_inds = np.r_[True,(abs(times[1:] - times[:-1]) >= 1e-6)]
good_positions = positions[good_inds]
good_times = times[good_inds]
if len(good_inds) == 1:
return np.zeros(positions[0:1].shape)
(tck, _) = si.splprep(good_positions.T,s = tol**2*(n+1), u=good_times, k=deg)
#smooth_positions = np.r_[si.splev(times,tck,der=0)].T
velocities = np.r_[si.splev(times,tck,der=1)].T
return velocities
def smooth_positions(positions, tol):
times = np.arange(len(positions))
positions = np.atleast_2d(positions)
n = len(positions)
deg = min(3, n - 1)
good_inds = np.r_[True,(abs(times[1:] - times[:-1]) >= 1e-6)]
good_positions = positions[good_inds]
good_times = times[good_inds]
if len(good_inds) == 1:
return np.zeros(positions[0:1].shape)
(tck, _) = si.splprep(good_positions.T,s = tol**2*(n+1), u=good_times, k=deg)
smooth_positions = np.r_[si.splev(times,tck,der=0)].T
return smooth_positions
def unif_resample(x,n,weights,tol=.001,deg=3):
x = np.atleast_2d(x)
weights = np.atleast_2d(weights)
x = mu.remove_duplicate_rows(x)
x_scaled = x * weights
dl = mu.norms(x_scaled[1:] - x_scaled[:-1],1)
l = np.cumsum(np.r_[0,dl])
(tck,_) = si.splprep(x_scaled.T,k=deg,s = tol**2*len(x),u=l)
newu = np.linspace(0,l[-1],n)
out_scaled = np.array(si.splev(newu,tck)).T
out = out_scaled/weights
return out | bsd-2-clause |
cheminfo/Visualizer-grunt | src/components/three.js/utils/converters/fbx/convert_to_threejs.py | 50 | 67563 | # @author zfedoran / http://github.com/zfedoran
import os
import sys
import math
import operator
# #####################################################
# Globals
# #####################################################
option_triangulate = True
option_textures = True
option_prefix = True
option_geometry = False
option_default_camera = False
option_default_light = False
converter = None
global_up_vector = None
# #####################################################
# Templates
# #####################################################
def Vector2String(v, no_brackets = False):
if no_brackets:
return '%g,%g' % (v[0], v[1])
else:
return '[ %g, %g ]' % (v[0], v[1])
def Vector3String(v, no_brackets = False):
if no_brackets:
return '%g,%g,%g' % (v[0], v[1], v[2])
else:
return '[ %g, %g, %g ]' % (v[0], v[1], v[2])
def ColorString(c, no_brackets = False):
if no_brackets:
return '%g, %g, %g' % (c[0], c[1], c[2])
else:
return '[ %g, %g, %g ]' % (c[0], c[1], c[2])
def LabelString(s):
return '"%s"' % s
def ArrayString(s):
return '[ %s ]' % s
def PaddingString(n):
output = ""
for i in range(n):
output += "\t"
return output
def BoolString(value):
if value:
return "true"
return "false"
# #####################################################
# Helpers
# #####################################################
def getObjectName(o, force_prefix = False):
if not o:
return ""
prefix = ""
if option_prefix or force_prefix:
prefix = "Object_%s_" % o.GetUniqueID()
return prefix + o.GetName()
def getGeometryName(g, force_prefix = False):
prefix = ""
if option_prefix or force_prefix:
prefix = "Geometry_%s_" % g.GetUniqueID()
return prefix + g.GetName()
def getEmbedName(e, force_prefix = False):
prefix = ""
if option_prefix or force_prefix:
prefix = "Embed_%s_" % e.GetUniqueID()
return prefix + e.GetName()
def getMaterialName(m, force_prefix = False):
prefix = ""
if option_prefix or force_prefix:
prefix = "Material_%s_" % m.GetUniqueID()
return prefix + m.GetName()
def getTextureName(t, force_prefix = False):
texture_file = t.GetFileName()
texture_id = os.path.splitext(os.path.basename(texture_file))[0]
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % t.GetUniqueID()
return prefix + texture_id
def getFogName(f, force_prefix = False):
prefix = ""
if option_prefix or force_prefix:
prefix = "Fog_%s_" % f.GetUniqueID()
return prefix + f.GetName()
def getObjectVisible(n):
return BoolString(True)
def getRadians(v):
return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
def getHex(c):
color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
return color
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def convert_fbx_color(color):
return [color.mRed, color.mGreen, color.mBlue, color.mAlpha]
def convert_fbx_vec2(v):
return [v[0], v[1]]
def convert_fbx_vec3(v):
return [v[0], v[1], v[2]]
def generate_uvs(uv_layers):
layers = []
for uvs in uv_layers:
layer = ",".join(Vector2String(n, True) for n in uvs)
layers.append(layer)
return ",".join("[%s]" % n for n in layers)
def generateMultiLineString(lines, separator, padding):
cleanLines = []
for i in range(len(lines)):
line = lines[i]
line = PaddingString(padding) + line
cleanLines.append(line)
return separator.join(cleanLines)
def get_up_vector(scene):
global_settings = scene.GetGlobalSettings()
axis_system = global_settings.GetAxisSystem()
up_vector = axis_system.GetUpVector()
tmp = [0,0,0]
tmp[up_vector[0] - 1] = up_vector[1] * 1
return FbxVector4(tmp[0], tmp[1], tmp[2], 1)
def generate_bounding_box(vertices):
minx = 0
miny = 0
minz = 0
maxx = 0
maxy = 0
maxz = 0
for vertex in vertices:
if vertex[0] < minx:
minx = vertex[0]
if vertex[1] < miny:
miny = vertex[1]
if vertex[2] < minz:
minz = vertex[2]
if vertex[0] > maxx:
maxx = vertex[0]
if vertex[1] > maxy:
maxy = vertex[1]
if vertex[2] > maxz:
maxz = vertex[2]
return [minx, miny, minz], [maxx, maxy, maxz]
# #####################################################
# Generate - Triangles
# #####################################################
def triangulate_node_hierarchy(node):
node_attribute = node.GetNodeAttribute();
if node_attribute:
if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
converter.TriangulateInPlace(node);
child_count = node.GetChildCount()
for i in range(child_count):
triangulate_node_hierarchy(node.GetChild(i))
def triangulate_scene(scene):
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
triangulate_node_hierarchy(node.GetChild(i))
# #####################################################
# Generate - Material String
# #####################################################
def generate_texture_bindings(material_property, texture_list):
binding_types = {
"DiffuseColor": "map", "DiffuseFactor": "diffuseFactor", "EmissiveColor": "emissiveMap",
"EmissiveFactor": "emissiveFactor", "AmbientColor": "ambientMap", "AmbientFactor": "ambientFactor",
"SpecularColor": "specularMap", "SpecularFactor": "specularFactor", "ShininessExponent": "shininessExponent",
"NormalMap": "normalMap", "Bump": "bumpMap", "TransparentColor": "transparentMap",
"TransparencyFactor": "transparentFactor", "ReflectionColor": "reflectionMap",
"ReflectionFactor": "reflectionFactor", "DisplacementColor": "displacementMap",
"VectorDisplacementColor": "vectorDisplacementMap"
}
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_id = getTextureName(texture, True)
texture_binding = ' "%s": "%s",' % (binding_types[str(material_property.GetName())], texture_id)
texture_list.append(texture_binding)
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_id = getTextureName(texture, True)
texture_binding = ' "%s": "%s",' % (binding_types[str(material_property.GetName())], texture_id)
texture_list.append(texture_binding)
def generate_material_string(material):
#Get the implementation to see if it's a hardware shader.
implementation = GetImplementation(material, "ImplementationHLSL")
implementation_type = "HLSL"
if not implementation:
implementation = GetImplementation(material, "ImplementationCGFX")
implementation_type = "CGFX"
output = []
if implementation:
# This material is a hardware shader, skip it
print("Shader materials are not supported")
return ''
elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
ambient = str(getHex(material.Ambient.Get()))
diffuse = str(getHex(material.Diffuse.Get()))
emissive = str(getHex(material.Emissive.Get()))
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = str(opacity)
transparent = BoolString(False)
reflectivity = "1"
output = [
'\t' + LabelString( getMaterialName( material ) ) + ': {',
' "type" : "MeshLambertMaterial",',
' "parameters" : {',
' "color" : ' + diffuse + ',',
' "ambient" : ' + ambient + ',',
' "emissive" : ' + emissive + ',',
' "reflectivity" : ' + reflectivity + ',',
' "transparent" : ' + transparent + ',',
' "opacity" : ' + opacity + ',',
]
elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
ambient = str(getHex(material.Ambient.Get()))
diffuse = str(getHex(material.Diffuse.Get()))
emissive = str(getHex(material.Emissive.Get()))
specular = str(getHex(material.Specular.Get()))
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = str(opacity)
shininess = str(material.Shininess.Get())
transparent = BoolString(False)
reflectivity = "1"
bumpScale = "1"
output = [
'\t' + LabelString( getMaterialName( material ) ) + ': {',
' "type" : "MeshPhongMaterial",',
' "parameters" : {',
' "color" : ' + diffuse + ',',
' "ambient" : ' + ambient + ',',
' "emissive" : ' + emissive + ',',
' "specular" : ' + specular + ',',
' "shininess" : ' + shininess + ',',
' "bumpScale" : ' + bumpScale + ',',
' "reflectivity" : ' + reflectivity + ',',
' "transparent" : ' + transparent + ',',
' "opacity" : ' + opacity + ',',
]
else:
print("Unknown type of Material")
return ''
if option_textures:
texture_list = []
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
generate_texture_bindings(material_property, texture_list)
output += texture_list
wireframe = BoolString(False)
wireframeLinewidth = "1"
output.append(' "wireframe" : ' + wireframe + ',')
output.append(' "wireframeLinewidth" : ' + wireframeLinewidth)
output.append(' }')
output.append('}')
return generateMultiLineString( output, '\n\t\t', 0 )
def generate_proxy_material_string(node, material_names):
output = [
'\t' + LabelString( getMaterialName( node, True ) ) + ': {',
' "type" : "MeshFaceMaterial",',
' "parameters" : {',
' "materials" : ' + ArrayString( ",".join(LabelString(m) for m in material_names) ),
' }',
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Parse - Materials
# #####################################################
def extract_materials_from_node(node, material_list):
name = node.GetName()
mesh = node.GetNodeAttribute()
node = None
if mesh:
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append(getMaterialName(material))
material_string = generate_material_string(material)
material_list.append(material_string)
if material_count > 1:
proxy_material = generate_proxy_material_string(node, material_names)
material_list.append(proxy_material)
def generate_materials_from_hierarchy(node, material_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_materials_from_node(node, material_list)
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_list)
def generate_material_list(scene):
material_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_list)
return material_list
# #####################################################
# Generate - Texture String
# #####################################################
def generate_texture_string(texture):
#TODO: extract more texture properties
wrap_u = texture.GetWrapModeU()
wrap_v = texture.GetWrapModeV()
offset = texture.GetUVTranslation()
output = [
'\t' + LabelString( getTextureName( texture, True ) ) + ': {',
' "url" : "' + texture.GetFileName() + '",',
' "repeat" : ' + Vector2String( (1,1) ) + ',',
' "offset" : ' + Vector2String( texture.GetUVTranslation() ) + ',',
' "magFilter" : ' + LabelString( "LinearFilter" ) + ',',
' "minFilter" : ' + LabelString( "LinearMipMapLinearFilter" ) + ',',
' "anisotropy" : ' + BoolString( True ),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Parse - Textures
# #####################################################
def extract_material_textures(material_property, texture_list):
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_string = generate_texture_string(texture)
texture_list.append(texture_string)
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_string = generate_texture_string(texture)
texture_list.append(texture_string)
def extract_textures_from_node(node, texture_list):
name = node.GetName()
mesh = node.GetNodeAttribute()
#for all materials attached to this mesh
material_count = mesh.GetNode().GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for material_index in range(material_count):
material = mesh.GetNode().GetSrcObject(FbxSurfaceMaterial.ClassId, material_index)
#go through all the possible textures types
if material:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
extract_material_textures(material_property, texture_list)
def generate_textures_from_hierarchy(node, texture_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_textures_from_node(node, texture_list)
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_list)
def generate_texture_list(scene):
if not option_textures:
return []
texture_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_list)
return texture_list
# #####################################################
# Extract - Fbx Mesh data
# #####################################################
def extract_fbx_vertex_positions(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
positions = []
for i in range(control_points_count):
positions.append(convert_fbx_vec3(control_points[i]))
node = mesh.GetNode()
if node and option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform)
for i in range(len(positions)):
v = positions[i]
position = FbxVector4(v[0], v[1], v[2])
position = transform.MultNormalize(position)
positions[i] = convert_fbx_vec3(position)
return positions
def extract_fbx_vertex_normals(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_normal_indices = []
layered_normal_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_normals = mesh.GetLayer(l).GetNormals()
if not mesh_normals:
continue
normals_array = mesh_normals.GetDirectArray()
normals_count = normals_array.GetCount()
if normals_count == 0:
continue
normal_indices = []
normal_values = []
# values
for i in range(normals_count):
normal = convert_fbx_vec3(normals_array.GetAt(i))
normal_values.append(normal)
node = mesh.GetNode()
if node and option_geometry:
# FbxMeshes are local to their node, we need the normals in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform.SetT(FbxVector4(0,0,0,0))
transform = FbxMatrix(transform)
for i in range(len(normal_values)):
n = normal_values[i]
normal = FbxVector4(n[0], n[1], n[2])
normal = transform.MultNormalize(normal)
normal_values[i] = convert_fbx_vec3(normal)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_normals = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(control_point_index)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(control_point_index)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(vertexId)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(vertexId)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_normals.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_normals.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported normal mapping mode for polygon vertex")
vertexId += 1
normal_indices.append(poly_normals)
layered_normal_values.append(normal_values)
layered_normal_indices.append(normal_indices)
normal_values = []
normal_indices = []
# Three.js only supports one layer of normals
if len(layered_normal_values) > 0:
normal_values = layered_normal_values[0]
normal_indices = layered_normal_indices[0]
return normal_values, normal_indices
def extract_fbx_vertex_colors(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_color_indices = []
layered_color_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_colors = mesh.GetLayer(l).GetVertexColors()
if not mesh_colors:
continue
colors_array = mesh_colors.GetDirectArray()
colors_count = colors_array.GetCount()
if colors_count == 0:
continue
color_indices = []
color_values = []
# values
for i in range(colors_count):
color = convert_fbx_color(colors_array.GetAt(i))
color_values.append(color)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_colors = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(control_point_index)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(control_point_index)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(vertexId)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(vertexId)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_colors.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_colors.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported color mapping mode for polygon vertex")
vertexId += 1
color_indices.append(poly_colors)
color_values = []
color_indices = []
# Three.js only supports one layer of colors
if len(layered_color_values) > 0:
color_values = layered_color_values[0]
color_indices = layered_color_indices[0]
return color_values, color_indices
def extract_fbx_vertex_uvs(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_uv_indices = []
layered_uv_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_uvs = mesh.GetLayer(l).GetUVs()
if not mesh_uvs:
continue
uvs_array = mesh_uvs.GetDirectArray()
uvs_count = uvs_array.GetCount()
if uvs_count == 0:
continue
uv_indices = []
uv_values = []
# values
for i in range(uvs_count):
uv = convert_fbx_vec2(uvs_array.GetAt(i))
uv_values.append(uv)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_uvs = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
poly_uvs.append(control_point_index)
elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
poly_uvs.append(index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
uv_texture_index = mesh.GetTextureUVIndex(p, v)
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
poly_uvs.append(uv_texture_index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported uv mapping mode for polygon vertex")
vertexId += 1
uv_indices.append(poly_uvs)
layered_uv_values.append(uv_values)
layered_uv_indices.append(uv_indices)
return layered_uv_values, layered_uv_indices
# #####################################################
# Generate - Mesh String (for scene output)
# #####################################################
def generate_mesh_string_for_scene_output(node):
mesh = node.GetNodeAttribute()
mesh_list = [ mesh ]
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
nuvs = ",".join(nuvs)
aabb_min, aabb_max = generate_bounding_box(vertices)
aabb_min = ",".join(str(f) for f in aabb_min)
aabb_max = ",".join(str(f) for f in aabb_max)
vertices = ",".join(Vector3String(v, True) for v in vertices)
normals = ",".join(Vector3String(v, True) for v in normal_values)
colors = ",".join(Vector3String(v, True) for v in color_values)
faces = ",".join(faces)
uvs = generate_uvs(uv_values)
output = [
'\t' + LabelString( getEmbedName( node, True ) ) + ' : {',
' "metadata" : {',
' "vertices" : ' + str(nvertices) + ',',
' "normals" : ' + str(nnormals) + ',',
' "colors" : ' + str(ncolors) + ',',
' "faces" : ' + str(nfaces) + ',',
' "uvs" : ' + ArrayString(nuvs),
' },',
' "boundingBox" : {',
' "min" : ' + ArrayString(aabb_min) + ',',
' "max" : ' + ArrayString(aabb_max),
' },',
' "scale" : ' + str( 1 ) + ',',
' "materials" : ' + ArrayString("") + ',',
' "vertices" : ' + ArrayString(vertices) + ',',
' "normals" : ' + ArrayString(normals) + ',',
' "colors" : ' + ArrayString(colors) + ',',
' "uvs" : ' + ArrayString(uvs) + ',',
' "faces" : ' + ArrayString(faces),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Generate - Mesh String (for non-scene output)
# #####################################################
def generate_mesh_string_for_non_scene_output(scene):
mesh_list = generate_mesh_list(scene)
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
nuvs = ",".join(nuvs)
aabb_min, aabb_max = generate_bounding_box(vertices)
aabb_min = ",".join(str(f) for f in aabb_min)
aabb_max = ",".join(str(f) for f in aabb_max)
vertices = ",".join(Vector3String(v, True) for v in vertices)
normals = ",".join(Vector3String(v, True) for v in normal_values)
colors = ",".join(Vector3String(v, True) for v in color_values)
faces = ",".join(faces)
uvs = generate_uvs(uv_values)
output = [
'{',
' "metadata" : {',
' "formatVersion" : 3.2,',
' "type" : "geometry",',
' "generatedBy" : "convert-to-threejs.py"' + ',',
' "vertices" : ' + str(nvertices) + ',',
' "normals" : ' + str(nnormals) + ',',
' "colors" : ' + str(ncolors) + ',',
' "faces" : ' + str(nfaces) + ',',
' "uvs" : ' + ArrayString(nuvs),
' },',
' "boundingBox" : {',
' "min" : ' + ArrayString(aabb_min) + ',',
' "max" : ' + ArrayString(aabb_max),
' },',
' "scale" : ' + str( 1 ) + ',',
' "materials" : ' + ArrayString("") + ',',
' "vertices" : ' + ArrayString(vertices) + ',',
' "normals" : ' + ArrayString(normals) + ',',
' "colors" : ' + ArrayString(colors) + ',',
' "uvs" : ' + ArrayString(uvs) + ',',
' "faces" : ' + ArrayString(faces),
'}'
]
return generateMultiLineString( output, '\n', 0 )
# #####################################################
# Process - Mesh Geometry
# #####################################################
def generate_normal_key(normal):
return (round(normal[0], 6), round(normal[1], 6), round(normal[2], 6))
def generate_color_key(color):
return getHex(color)
def generate_uv_key(uv):
return (round(uv[0], 6), round(uv[1], 6))
def append_non_duplicate_uvs(source_uvs, dest_uvs, counts):
source_layer_count = len(source_uvs)
for layer_index in range(source_layer_count):
dest_layer_count = len(dest_uvs)
if dest_layer_count <= layer_index:
dest_uv_layer = {}
count = 0
dest_uvs.append(dest_uv_layer)
counts.append(count)
else:
dest_uv_layer = dest_uvs[layer_index]
count = counts[layer_index]
source_uv_layer = source_uvs[layer_index]
for uv in source_uv_layer:
key = generate_uv_key(uv)
if key not in dest_uv_layer:
dest_uv_layer[key] = count
count += 1
counts[layer_index] = count
return counts
def generate_unique_normals_dictionary(mesh_list):
normals_dictionary = {}
nnormals = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
node = mesh.GetNode()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
if len(normal_values) > 0:
for normal in normal_values:
key = generate_normal_key(normal)
if key not in normals_dictionary:
normals_dictionary[key] = nnormals
nnormals += 1
return normals_dictionary
def generate_unique_colors_dictionary(mesh_list):
colors_dictionary = {}
ncolors = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
color_values, color_indices = extract_fbx_vertex_colors(mesh)
if len(color_values) > 0:
for color in color_values:
key = generate_color_key(color)
if key not in colors_dictionary:
colors_dictionary[key] = count
count += 1
return colors_dictionary
def generate_unique_uvs_dictionary_layers(mesh_list):
uvs_dictionary_layers = []
nuvs_list = []
# Merge meshes, remove duplicate data
for mesh in mesh_list:
uv_values, uv_indices = extract_fbx_vertex_uvs(mesh)
if len(uv_values) > 0:
nuvs_list = append_non_duplicate_uvs(uv_values, uvs_dictionary_layers, nuvs_list)
return uvs_dictionary_layers
def generate_normals_from_dictionary(normals_dictionary):
normal_values = []
for key, index in sorted(normals_dictionary.items(), key = operator.itemgetter(1)):
normal_values.append(key)
return normal_values
def generate_colors_from_dictionary(colors_dictionary):
color_values = []
for key, index in sorted(colors_dictionary.items(), key = operator.itemgetter(1)):
color_values.append(key)
return color_values
def generate_uvs_from_dictionary_layers(uvs_dictionary_layers):
uv_values = []
for uvs_dictionary in uvs_dictionary_layers:
uv_values_layer = []
for key, index in sorted(uvs_dictionary.items(), key = operator.itemgetter(1)):
uv_values_layer.append(key)
uv_values.append(uv_values_layer)
return uv_values
def generate_normal_indices_for_poly(poly_index, mesh_normal_values, mesh_normal_indices, normals_to_indices):
if len(mesh_normal_indices) <= 0:
return []
poly_normal_indices = mesh_normal_indices[poly_index]
poly_size = len(poly_normal_indices)
output_poly_normal_indices = []
for v in range(poly_size):
normal_index = poly_normal_indices[v]
normal_value = mesh_normal_values[normal_index]
key = generate_normal_key(normal_value)
output_index = normals_to_indices[key]
output_poly_normal_indices.append(output_index)
return output_poly_normal_indices
def generate_color_indices_for_poly(poly_index, mesh_color_values, mesh_color_indices, colors_to_indices):
if len(mesh_color_indices) <= 0:
return []
poly_color_indices = mesh_color_indices[poly_index]
poly_size = len(poly_color_indices)
output_poly_color_indices = []
for v in range(poly_size):
color_index = poly_color_indices[v]
color_value = mesh_color_values[color_index]
key = generate_color_key(color_value)
output_index = colors_to_indices[key]
output_poly_color_indices.append(output_index)
return output_poly_color_indices
def generate_uv_indices_for_poly(poly_index, mesh_uv_values, mesh_uv_indices, uvs_to_indices):
if len(mesh_uv_indices) <= 0:
return []
poly_uv_indices = mesh_uv_indices[poly_index]
poly_size = len(poly_uv_indices)
output_poly_uv_indices = []
for v in range(poly_size):
uv_index = poly_uv_indices[v]
uv_value = mesh_uv_values[uv_index]
key = generate_uv_key(uv_value)
output_index = uvs_to_indices[key]
output_poly_uv_indices.append(output_index)
return output_poly_uv_indices
def process_mesh_vertices(mesh_list):
vertex_offset = 0
vertex_offset_list = [0]
vertices = []
for mesh in mesh_list:
node = mesh.GetNode()
mesh_vertices = extract_fbx_vertex_positions(mesh)
vertices.extend(mesh_vertices[:])
vertex_offset += len(mesh_vertices)
vertex_offset_list.append(vertex_offset)
return vertices, vertex_offset_list
def process_mesh_materials(mesh_list):
material_offset = 0
material_offset_list = [0]
materials_list = []
#TODO: remove duplicate mesh references
for mesh in mesh_list:
node = mesh.GetNode()
material_count = node.GetMaterialCount()
if material_count > 0:
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
materials_list.append( material )
material_offset += material_count
material_offset_list.append(material_offset)
return materials_list, material_offset_list
def process_mesh_polygons(mesh_list, normals_to_indices, colors_to_indices, uvs_to_indices_list, vertex_offset_list, material_offset_list):
faces = []
for mesh_index in range(len(mesh_list)):
mesh = mesh_list[mesh_index]
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
color_values, color_indices = extract_fbx_vertex_colors(mesh)
uv_values_layers, uv_indices_layers = extract_fbx_vertex_uvs(mesh)
for poly_index in range(poly_count):
poly_size = mesh.GetPolygonSize(poly_index)
face_normals = generate_normal_indices_for_poly(poly_index, normal_values, normal_indices, normals_to_indices)
face_colors = generate_color_indices_for_poly(poly_index, color_values, color_indices, colors_to_indices)
face_uv_layers = []
for l in range(len(uv_indices_layers)):
uv_values = uv_values_layers[l]
uv_indices = uv_indices_layers[l]
face_uv_indices = generate_uv_indices_for_poly(poly_index, uv_values, uv_indices, uvs_to_indices_list[l])
face_uv_layers.append(face_uv_indices)
face_vertices = []
for vertex_index in range(poly_size):
control_point_index = mesh.GetPolygonVertex(poly_index, vertex_index)
face_vertices.append(control_point_index)
#TODO: assign a default material to any mesh without one
if len(material_offset_list) <= mesh_index:
material_offset = 0
else:
material_offset = material_offset_list[mesh_index]
vertex_offset = vertex_offset_list[mesh_index]
face = generate_mesh_face(mesh,
poly_index,
face_vertices,
face_normals,
face_colors,
face_uv_layers,
vertex_offset,
material_offset)
faces.append(face)
return faces
def generate_mesh_face(mesh, polygon_index, vertex_indices, normals, colors, uv_layers, vertex_offset, material_offset):
isTriangle = ( len(vertex_indices) == 3 )
nVertices = 3 if isTriangle else 4
hasMaterial = False
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
hasMaterial = True
break
hasFaceUvs = False
hasFaceVertexUvs = len(uv_layers) > 0
hasFaceNormals = False
hasFaceVertexNormals = len(normals) > 0
hasFaceColors = False
hasFaceVertexColors = len(colors) > 0
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
tmp = []
for i in range(nVertices):
tmp.append(vertex_indices[i])
index = vertex_indices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
material_id = 0
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
material_id = materials.GetIndexArray().GetAt(polygon_index)
break
material_id += material_offset
faceData.append( material_id )
if hasFaceVertexUvs:
for polygon_uvs in uv_layers:
for i in range(nVertices):
index = polygon_uvs[i]
faceData.append(index)
if hasFaceVertexNormals:
for i in range(nVertices):
index = normals[i]
faceData.append(index)
if hasFaceVertexColors:
for i in range(nVertices):
index = colors[i]
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generate - Mesh List
# #####################################################
def generate_mesh_list_from_hierarchy(node, mesh_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
mesh_list.append(node.GetNodeAttribute())
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
def generate_mesh_list(scene):
mesh_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
return mesh_list
# #####################################################
# Generate - Embeds
# #####################################################
def generate_embed_list_from_hierarchy(node, embed_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
embed_string = generate_mesh_string_for_scene_output(node)
embed_list.append(embed_string)
for i in range(node.GetChildCount()):
generate_embed_list_from_hierarchy(node.GetChild(i), embed_list)
def generate_embed_list(scene):
embed_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_embed_list_from_hierarchy(node.GetChild(i), embed_list)
return embed_list
# #####################################################
# Generate - Geometries
# #####################################################
def generate_geometry_string(node):
output = [
'\t' + LabelString( getGeometryName( node, True ) ) + ' : {',
' "type" : "embedded",',
' "id" : ' + LabelString( getEmbedName( node, True ) ),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
def generate_geometry_list_from_hierarchy(node, geometry_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
geometry_string = generate_geometry_string(node)
geometry_list.append(geometry_string)
for i in range(node.GetChildCount()):
generate_geometry_list_from_hierarchy(node.GetChild(i), geometry_list)
def generate_geometry_list(scene):
geometry_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_geometry_list_from_hierarchy(node.GetChild(i), geometry_list)
return geometry_list
# #####################################################
# Generate - Camera Names
# #####################################################
def generate_camera_name_list_from_hierarchy(node, camera_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eCamera:
camera_string = getObjectName(node)
camera_list.append(camera_string)
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
def generate_camera_name_list(scene):
camera_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
return camera_list
# #####################################################
# Generate - Light Object
# #####################################################
def generate_default_light_string(padding):
direction = (1,1,1)
color = (1,1,1)
intensity = 80.0
output = [
'\t\t' + LabelString( 'default_light' ) + ' : {',
' "type" : "DirectionalLight",',
' "color" : ' + str(getHex(color)) + ',',
' "intensity" : ' + str(intensity/100.0) + ',',
' "direction" : ' + Vector3String( direction ) + ',',
' "target" : ' + LabelString( getObjectName( None ) ),
' }'
]
return generateMultiLineString( output, '\n\t\t', padding )
def generate_light_string(node, padding):
light = node.GetNodeAttribute()
light_types = ["point", "directional", "spot", "area", "volume"]
light_type = light_types[light.LightType.Get()]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
output = []
if light_type == "directional":
# Three.js directional lights emit light from a point in 3d space to a target node or the origin.
# When there is no target, we need to take a point, one unit away from the origin, and move it
# into the right location so that the origin acts like the target
if node.GetTarget():
direction = position
else:
translation = FbxVector4(0,0,0,0)
scale = FbxVector4(1,1,1,1)
rotation = transform.GetR()
matrix = FbxMatrix(translation, rotation, scale)
direction = matrix.MultNormalize(global_up_vector)
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "DirectionalLight",',
' "color" : ' + str(getHex(light.Color.Get())) + ',',
' "intensity" : ' + str(light.Intensity.Get()/100.0) + ',',
' "direction" : ' + Vector3String( direction ) + ',',
' "target" : ' + LabelString( getObjectName( node.GetTarget() ) ) + ( ',' if node.GetChildCount() > 0 else '' )
]
elif light_type == "point":
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "PointLight",',
' "color" : ' + str(getHex(light.Color.Get())) + ',',
' "intensity" : ' + str(light.Intensity.Get()/100.0) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "distance" : ' + str(light.FarAttenuationEnd.Get()) + ( ',' if node.GetChildCount() > 0 else '' )
]
elif light_type == "spot":
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "SpotLight",',
' "color" : ' + str(getHex(light.Color.Get())) + ',',
' "intensity" : ' + str(light.Intensity.Get()/100.0) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "distance" : ' + str(light.FarAttenuationEnd.Get()) + ',',
' "angle" : ' + str((light.OuterAngle.Get()*math.pi)/180) + ',',
' "exponent" : ' + str(light.DecayType.Get()) + ',',
' "target" : ' + LabelString( getObjectName( node.GetTarget() ) ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
def generate_ambient_light_string(scene):
scene_settings = scene.GetGlobalSettings()
ambient_color = scene_settings.GetAmbientColor()
ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
return None
class AmbientLight:
def GetName(self):
return "AmbientLight"
node = AmbientLight()
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "AmbientLight",',
' "color" : ' + str(getHex(ambient_color)),
'}'
]
return generateMultiLineString( output, '\n\t\t', 0 )
# #####################################################
# Generate - Camera Object
# #####################################################
def generate_default_camera_string(padding):
position = (100, 100, 100)
near = 0.1
far = 1000
fov = 75
output = [
'\t\t' + LabelString( 'default_camera' ) + ' : {',
' "type" : "PerspectiveCamera",',
' "fov" : ' + str(fov) + ',',
' "near" : ' + str(near) + ',',
' "far" : ' + str(far) + ',',
' "position" : ' + Vector3String( position ),
' }'
]
return generateMultiLineString( output, '\n\t\t', padding )
def generate_camera_string(node, padding):
camera = node.GetNodeAttribute()
target_node = node.GetTarget()
target = ""
if target_node:
transform = target.EvaluateLocalTransform()
target = transform.GetT()
else:
target = camera.InterestPosition.Get()
position = camera.Position.Get()
projection_types = [ "perspective", "orthogonal" ]
projection = projection_types[camera.ProjectionType.Get()]
near = camera.NearPlane.Get()
far = camera.FarPlane.Get()
output = []
if projection == "perspective":
aspect = camera.PixelAspectRatio.Get()
fov = camera.FieldOfView.Get()
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "PerspectiveCamera",',
' "fov" : ' + str(fov) + ',',
' "aspect" : ' + str(aspect) + ',',
' "near" : ' + str(near) + ',',
' "far" : ' + str(far) + ',',
' "position" : ' + Vector3String( position ) + ( ',' if node.GetChildCount() > 0 else '' )
]
elif projection == "orthogonal":
left = ""
right = ""
top = ""
bottom = ""
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "type" : "OrthographicCamera",',
' "left" : ' + left + ',',
' "right" : ' + right + ',',
' "top" : ' + top + ',',
' "bottom" : ' + bottom + ',',
' "near" : ' + str(near) + ',',
' "far" : ' + str(far) + ',',
' "position" : ' + Vector3String( position ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
# #####################################################
# Generate - Mesh Object
# #####################################################
def generate_mesh_object_string(node, padding):
mesh = node.GetNodeAttribute()
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
material_count = node.GetMaterialCount()
material_name = ""
if material_count > 0:
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append( getMaterialName(material) )
#If this mesh has more than one material, use a proxy material
material_name = getMaterialName( node, True) if material_count > 1 else material_names[0]
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "geometry" : ' + LabelString( getGeometryName( node, True ) ) + ',',
' "material" : ' + LabelString( material_name ) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "rotation" : ' + Vector3String( rotation ) + ',',
' "scale" : ' + Vector3String( scale ) + ',',
' "visible" : ' + getObjectVisible( node ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
# #####################################################
# Generate - Object
# #####################################################
def generate_object_string(node, padding):
node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
"CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
"TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
node_type = ""
if node.GetNodeAttribute() == None:
node_type = "Null"
else:
node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
output = [
'\t\t' + LabelString( getObjectName( node ) ) + ' : {',
' "fbx_type" : ' + LabelString( node_type ) + ',',
' "position" : ' + Vector3String( position ) + ',',
' "rotation" : ' + Vector3String( rotation ) + ',',
' "scale" : ' + Vector3String( scale ) + ',',
' "visible" : ' + getObjectVisible( node ) + ( ',' if node.GetChildCount() > 0 else '' )
]
return generateMultiLineString( output, '\n\t\t', padding )
# #####################################################
# Parse - Objects
# #####################################################
def generate_object_hierarchy(node, object_list, pad, siblings_left):
object_count = 0
if node.GetNodeAttribute() == None:
object_string = generate_object_string(node, pad)
object_list.append(object_string)
object_count += 1
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
object_string = generate_mesh_object_string(node, pad)
object_list.append(object_string)
object_count += 1
elif attribute_type == FbxNodeAttribute.eLight:
object_string = generate_light_string(node, pad)
object_list.append(object_string)
object_count += 1
elif attribute_type == FbxNodeAttribute.eCamera:
object_string = generate_camera_string(node, pad)
object_list.append(object_string)
object_count += 1
else:
object_string = generate_object_string(node, pad)
object_list.append(object_string)
object_count += 1
if node.GetChildCount() > 0:
object_list.append( PaddingString( pad + 1 ) + '\t\t"children" : {\n' )
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_list, pad + 2, node.GetChildCount() - i - 1)
object_list.append( PaddingString( pad + 1 ) + '\t\t}' )
object_list.append( PaddingString( pad ) + '\t\t}' + (',\n' if siblings_left > 0 else ''))
return object_count
def generate_scene_objects_string(scene):
object_count = 0
object_list = []
ambient_light = generate_ambient_light_string(scene)
if ambient_light:
if scene.GetNodeCount() > 0 or option_default_light or option_default_camera:
ambient_light += (',\n')
object_list.append(ambient_light)
object_count += 1
if option_default_light:
default_light = generate_default_light_string(0)
if scene.GetNodeCount() > 0 or option_default_camera:
default_light += (',\n')
object_list.append(default_light)
object_count += 1
if option_default_camera:
default_camera = generate_default_camera_string(0)
if scene.GetNodeCount() > 0:
default_camera += (',\n')
object_list.append(default_camera)
object_count += 1
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_list, 0, node.GetChildCount() - i - 1)
return "\n".join(object_list), object_count
# #####################################################
# Parse - Geometry (non-scene output)
# #####################################################
def extract_geometry(scene, filename):
mesh_string = generate_mesh_string_for_non_scene_output(scene)
return mesh_string
# #####################################################
# Parse - Scene (scene output)
# #####################################################
def extract_scene(scene, filename):
global_settings = scene.GetGlobalSettings()
objects, nobjects = generate_scene_objects_string(scene)
textures = generate_texture_list(scene)
materials = generate_material_list(scene)
geometries = generate_geometry_list(scene)
embeds = generate_embed_list(scene)
fogs = []
ntextures = len(textures)
nmaterials = len(materials)
ngeometries = len(geometries)
#TODO: extract actual root/scene data here
position = Vector3String( (0,0,0) )
rotation = Vector3String( (0,0,0) )
scale = Vector3String( (1,1,1) )
camera_names = generate_camera_name_list(scene)
scene_settings = scene.GetGlobalSettings()
#TODO: this might exist as part of the FBX spec
bgcolor = Vector3String( (0.667,0.667,0.667) )
bgalpha = 1
# This does not seem to be any help here
# global_settings.GetDefaultCamera()
defcamera = LabelString(camera_names[0] if len(camera_names) > 0 else "")
if option_default_camera:
defcamera = LabelString('default_camera')
#TODO: extract fog info from scene
deffog = LabelString("")
geometries = generateMultiLineString( geometries, ",\n\n\t", 0 )
materials = generateMultiLineString( materials, ",\n\n\t", 0 )
textures = generateMultiLineString( textures, ",\n\n\t", 0 )
embeds = generateMultiLineString( embeds, ",\n\n\t", 0 )
fogs = generateMultiLineString( fogs, ",\n\n\t", 0 )
output = [
'{',
' "metadata": {',
' "formatVersion" : 3.2,',
' "type" : "scene",',
' "generatedBy" : "convert-to-threejs.py",',
' "objects" : ' + str(nobjects) + ',',
' "geometries" : ' + str(ngeometries) + ',',
' "materials" : ' + str(nmaterials) + ',',
' "textures" : ' + str(ntextures),
' },',
'',
' "urlBaseType": "relativeToScene",',
'',
' "objects" :',
' {',
objects,
' },',
'',
' "geometries" :',
' {',
'\t' + geometries,
' },',
'',
' "materials" :',
' {',
'\t' + materials,
' },',
'',
' "textures" :',
' {',
'\t' + textures,
' },',
'',
' "embeds" :',
' {',
'\t' + embeds,
' },',
'',
' "fogs" :',
' {',
'\t' + fogs,
' },',
'',
' "transform" :',
' {',
' "position" : ' + position + ',',
' "rotation" : ' + rotation + ',',
' "scale" : ' + scale,
' },',
'',
' "defaults" :',
' {',
' "bgcolor" : ' + str(bgcolor) + ',',
' "bgalpha" : ' + str(bgalpha) + ',',
' "camera" : ' + defcamera + ',',
' "fog" : ' + deffog,
' }',
'}'
]
return "\n".join(output)
# #####################################################
# file helpers
# #####################################################
def write_file(fname, content):
out = open(fname, "w")
out.write(content)
out.close()
# #####################################################
# main
# #####################################################
if __name__ == "__main__":
from optparse import OptionParser
try:
from FbxCommon import *
except ImportError:
import platform
msg = 'Could not locate the python FBX SDK!\n'
msg += 'You need to copy the FBX SDK into your python install folder such as '
if platform.system() == 'Windows' or platform.system() == 'Microsoft':
msg += '"Python26/Lib/site-packages"'
elif platform.system() == 'Linux':
msg += '"/usr/local/lib/python2.6/site-packages"'
elif platform.system() == 'Darwin':
msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
msg += ' folder.'
print(msg)
sys.exit(1)
usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
parser = OptionParser(usage=usage)
parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
parser.add_option('-x', '--no-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
parser.add_option('-p', '--prefix', action='store_true', dest='prefix', help="prefix object names in output file", default=False)
parser.add_option('-g', '--geometry-only', action='store_true', dest='geometry', help="output geometry only", default=False)
parser.add_option('-c', '--default-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
parser.add_option('-l', '--defualt-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
(options, args) = parser.parse_args()
option_triangulate = options.triangulate
option_textures = True if not options.notextures else False
option_prefix = options.prefix
option_geometry = options.geometry
option_default_camera = options.defcamera
option_default_light = options.deflight
# Prepare the FBX SDK.
sdk_manager, scene = InitializeSdkObjects()
converter = FbxGeometryConverter(sdk_manager)
global_up_vector = get_up_vector(scene)
# The converter takes an FBX file as an argument.
if len(args) > 1:
print("\nLoading file: %s" % args[0])
result = LoadScene(sdk_manager, scene, args[0])
else:
result = False
print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
if not result:
print("\nAn error occurred while loading the file...")
else:
if option_triangulate:
print("\nForcing geometry to triangles")
triangulate_scene(scene)
if option_geometry:
output_content = extract_geometry(scene, os.path.basename(args[0]))
else:
output_content = extract_scene(scene, os.path.basename(args[0]))
output_path = os.path.join(os.getcwd(), args[1])
write_file(output_path, output_content)
print("\nExported Three.js file to:\n%s\n" % output_path)
# Destroy all objects created by the FBX SDK.
sdk_manager.Destroy()
sys.exit(0)
| mit |
heyavery/lopenr | venv/lib/python2.7/site-packages/django/contrib/messages/storage/base.py | 526 | 6285 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.messages import constants, utils
from django.utils.encoding import force_text, python_2_unicode_compatible
LEVEL_TAGS = utils.get_level_tags()
@python_2_unicode_compatible
class Message(object):
"""
Represents an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepares the message for serialization by forcing the ``message``
and ``extra_tags`` to unicode in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_text`` implementation for details).
"""
self.message = force_text(self.message, strings_only=True)
self.extra_tags = force_text(self.extra_tags, strings_only=True)
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __str__(self):
return force_text(self.message)
def _get_tags(self):
extra_tags = force_text(self.extra_tags, strings_only=True)
if extra_tags and self.level_tag:
return ' '.join([extra_tags, self.level_tag])
elif extra_tags:
return extra_tags
elif self.level_tag:
return self.level_tag
return ''
tags = property(_get_tags)
@property
def level_tag(self):
return force_text(LEVEL_TAGS.get(self.level, ''), strings_only=True)
class BaseStorage(object):
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super(BaseStorage, self).__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Returns a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieves a list of stored messages. Returns a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _get() method')
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages, returning a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _store() method')
def _prepare_messages(self, messages):
"""
Prepares a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Stores all unread messages.
If the backend has yet to be iterated, previously stored messages will
be stored again. Otherwise, only messages added after the last
iteration will be stored.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queues a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Returns the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Sets a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
| mit |
msrb/samba | python/samba/subunit/__init__.py | 32 | 2672 | # Subunit handling
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2014
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Subunit test protocol."""
import samba
samba.ensure_third_party_module("iso8601", "pyiso8601")
import iso8601
import unittest
PROGRESS_SET = 0
PROGRESS_CUR = 1
PROGRESS_PUSH = 2
PROGRESS_POP = 3
def RemoteError(description=""):
return (Exception, Exception(description), None)
class RemotedTestCase(unittest.TestCase):
"""A class to represent test cases run in child processes.
Instances of this class are used to provide the Python test API a TestCase
that can be printed to the screen, introspected for metadata and so on.
However, as they are a simply a memoisation of a test that was actually
run in the past by a separate process, they cannot perform any interactive
actions.
"""
def __eq__ (self, other):
try:
return self.__description == other.__description
except AttributeError:
return False
def __init__(self, description):
"""Create a psuedo test case with description description."""
self.__description = description
def error(self, label):
raise NotImplementedError("%s on RemotedTestCases is not permitted." %
label)
def setUp(self):
self.error("setUp")
def tearDown(self):
self.error("tearDown")
def shortDescription(self):
return self.__description
def id(self):
return "%s" % (self.__description,)
def __str__(self):
return "%s (%s)" % (self.__description, self._strclass())
def __repr__(self):
return "<%s description='%s'>" % \
(self._strclass(), self.__description)
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
result.addError(self, RemoteError("Cannot run RemotedTestCases.\n"))
result.stopTest(self)
def _strclass(self):
cls = self.__class__
return "%s.%s" % (cls.__module__, cls.__name__)
| gpl-3.0 |
jhseu/tensorflow | tensorflow/python/keras/layers/simplernn_test.py | 1 | 8143 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SimpleRNN layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
@keras_parameterized.run_all_keras_modes
class SimpleRNNLayerTest(keras_parameterized.TestCase):
def test_return_sequences_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_v2_only
def test_float64_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'return_sequences': True,
'dtype': 'float64'},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
def test_dynamic_behavior_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile('rmsprop', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_with_masking_layer_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_regularizers_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
def test_statefulness_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.SimpleRNN
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_get_initial_states(self):
batch_size = 4
cell = keras.layers.SimpleRNNCell(20)
initial_state = cell.get_initial_state(
batch_size=batch_size, dtype=dtypes.float32)
_, state = cell(np.ones((batch_size, 20), dtype=np.float32), initial_state)
self.assertLen(state, 1)
self.assertEqual(state[0].shape, initial_state.shape)
if __name__ == '__main__':
test.main()
| apache-2.0 |
kevin-coder/tensorflow-fork | tensorflow/python/kernel_tests/reduce_join_op_test.py | 13 | 13578 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ReduceJoin op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def _input_array(num_dims):
"""Creates an ndarray where each element is the binary of its linear index.
Args:
num_dims: The number of dimensions to create.
Returns:
An ndarray of shape [2] * num_dims.
"""
formatter = "{:0%db}" % num_dims
strings = [formatter.format(i) for i in xrange(2**num_dims)]
return np.array(strings, dtype="S%d" % num_dims).reshape([2] * num_dims)
def _joined_array(num_dims, reduce_dim):
"""Creates an ndarray with the result from reduce_join on input_array.
Args:
num_dims: The number of dimensions of the original input array.
reduce_dim: The dimension to reduce.
Returns:
An ndarray of shape [2] * (num_dims - 1).
"""
formatter = "{:0%db}" % (num_dims - 1)
result = np.zeros(shape=[2] * (num_dims - 1), dtype="S%d" % (2 * num_dims))
flat = result.ravel()
for i in xrange(2**(num_dims - 1)):
dims = formatter.format(i)
flat[i] = "".join([(dims[:reduce_dim] + "%d" + dims[reduce_dim:]) % j
for j in xrange(2)])
return result
class UnicodeTestCase(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype("U"), np.array(actual).astype("U"))
class ReduceJoinTestHelperTest(UnicodeTestCase):
"""Tests for helper functions."""
def testInputArray(self):
num_dims = 3
truth = ["{:03b}".format(i) for i in xrange(2**num_dims)]
output_array = _input_array(num_dims).reshape([-1])
self.assertAllEqualUnicode(truth, output_array)
def testJoinedArray(self):
num_dims = 3
truth_dim_zero = [["000100", "001101"], ["010110", "011111"]]
truth_dim_one = [["000010", "001011"], ["100110", "101111"]]
truth_dim_two = [["000001", "010011"], ["100101", "110111"]]
output_array_dim_zero = _joined_array(num_dims, reduce_dim=0)
output_array_dim_one = _joined_array(num_dims, reduce_dim=1)
output_array_dim_two = _joined_array(num_dims, reduce_dim=2)
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqualUnicode(truth_dim_two, output_array_dim_two)
class ReduceJoinTest(UnicodeTestCase):
def _testReduceJoin(self,
input_array,
truth,
truth_shape,
axis,
keep_dims=False,
separator=""):
"""Compares the output of reduce_join to an expected result.
Args:
input_array: The string input to be joined.
truth: An array or np.array of the expected result.
truth_shape: An array or np.array of the expected shape.
axis: The indices to reduce over.
keep_dims: Whether or not to retain reduced dimensions.
separator: The separator to use for joining.
"""
with self.cached_session():
output = string_ops.reduce_join(
inputs=input_array,
axis=axis,
keep_dims=keep_dims,
separator=separator)
output_array = self.evaluate(output)
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, output.get_shape())
def _testMultipleReduceJoin(self, input_array, axis, separator=" "):
"""Tests reduce_join for one input and multiple axes.
Does so by comparing the output to that from nested reduce_string_joins.
The correctness of single-dimension reduce_join is verified by other
tests below using _testReduceJoin.
Args:
input_array: The input to test.
axis: The indices to reduce.
separator: The separator to use when joining.
"""
with self.cached_session():
output = string_ops.reduce_join(
inputs=input_array, axis=axis, keep_dims=False, separator=separator)
output_keep_dims = string_ops.reduce_join(
inputs=input_array, axis=axis, keep_dims=True, separator=separator)
truth = input_array
for index in axis:
truth = string_ops.reduce_join(
inputs=truth, axis=index, keep_dims=True, separator=separator)
if not axis:
truth = constant_op.constant(truth)
truth_squeezed = array_ops.squeeze(truth, axis=axis)
output_array = self.evaluate(output)
output_keep_dims_array = self.evaluate(output_keep_dims)
truth_array = self.evaluate(truth)
truth_squeezed_array = self.evaluate(truth_squeezed)
self.assertAllEqualUnicode(truth_array, output_keep_dims_array)
self.assertAllEqualUnicode(truth_squeezed_array, output_array)
self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape())
self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())
def testRankOne(self):
input_array = ["this", "is", "a", "test"]
truth = "thisisatest"
truth_shape = []
self._testReduceJoin(input_array, truth, truth_shape, axis=0)
def testRankTwo(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array, truth_dim_zero, truth_shape_dim_zero, axis=0)
self._testReduceJoin(
input_array, truth_dim_one, truth_shape_dim_one, axis=1)
expected_val = "thisisatestpleasedonotpanic"
expected_shape = []
self._testReduceJoin(input_array, expected_val, expected_shape, axis=None)
# Using axis=[] is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(input_array, expected_val, expected_shape, axis=[])
def testRankFive(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(input_array, truths[i], truth_shape, axis=i)
def testNegative(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(input_array, truths[i], truth_shape, axis=i - 5)
def testSingletonDimension(self):
input_arrays = [
_input_array(num_dims=5).reshape([2] * i + [1] + [2] * (5 - i))
for i in xrange(6)
]
truth = _input_array(num_dims=5)
truth_shape = [2] * 5
for i in xrange(6):
self._testReduceJoin(input_arrays[i], truth, truth_shape, axis=i)
def testSeparator(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["this please", "is do", "a not", "test panic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["this is a test", "please do not panic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
axis=0,
separator=" ")
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
axis=1,
separator=" ")
@test_util.run_deprecated_v1
def testUnknownShape(self):
input_array = [["a"], ["b"]]
truth = ["ab"]
truth_shape = None
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
reduced = string_ops.reduce_join(placeholder, axis=0)
output_array = reduced.eval(feed_dict={placeholder.name: input_array})
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, reduced.get_shape())
@test_util.run_deprecated_v1
def testUnknownIndices(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape = None
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(input_array, axis=placeholder)
output_array_dim_zero = reduced.eval(feed_dict={placeholder.name: [0]})
output_array_dim_one = reduced.eval(feed_dict={placeholder.name: [1]})
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testKeepDims(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = [["thisplease", "isdo", "anot", "testpanic"]]
truth_shape_dim_zero = [1, 4]
truth_dim_one = [["thisisatest"], ["pleasedonotpanic"]]
truth_shape_dim_one = [2, 1]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
axis=0,
keep_dims=True)
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
axis=1,
keep_dims=True)
expected_val = [["thisisatestpleasedonotpanic"]]
expected_shape = [1, 1]
self._testReduceJoin(
constant_op.constant(input_array), expected_val, expected_shape,
keep_dims=True, axis=None)
# Using axis=[] is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(
input_array, expected_val, expected_shape, keep_dims=True, axis=[])
def testMultiIndex(self):
num_dims = 3
input_array = _input_array(num_dims=num_dims)
# Also tests [].
for i in xrange(num_dims + 1):
for permutation in itertools.permutations(xrange(num_dims), i):
self._testMultipleReduceJoin(input_array, axis=permutation)
@test_util.run_deprecated_v1
def testInvalidReductionIndices(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "Invalid reduction dim"):
string_ops.reduce_join(inputs="", axis=0)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], axis=-3)
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], axis=2)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], axis=[0, -3])
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], axis=[0, 2])
def testZeroDims(self):
with self.cached_session():
inputs = np.zeros([0, 1], dtype=str)
# Reduction that drops the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, axis=0)
self.assertAllEqualUnicode([""], self.evaluate(output))
# Reduction that keeps the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, axis=1)
output_shape = self.evaluate(output).shape
self.assertAllEqual([0], output_shape)
@test_util.run_deprecated_v1
def testInvalidArgsUnknownShape(self):
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
index_too_high = string_ops.reduce_join(placeholder, axis=1)
duplicate_index = string_ops.reduce_join(placeholder, axis=[-1, 1])
with self.assertRaisesOpError("Invalid reduction dimension 1"):
index_too_high.eval(feed_dict={placeholder.name: [""]})
with self.assertRaisesOpError("Duplicate reduction dimension 1"):
duplicate_index.eval(feed_dict={placeholder.name: [[""]]})
@test_util.run_deprecated_v1
def testInvalidArgsUnknownIndices(self):
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(["test", "test2"], axis=placeholder)
with self.assertRaisesOpError("reduction dimension -2"):
reduced.eval(feed_dict={placeholder.name: -2})
with self.assertRaisesOpError("reduction dimension 2"):
reduced.eval(feed_dict={placeholder.name: 2})
if __name__ == "__main__":
test.main()
| apache-2.0 |
cloudify-cosmo/cloudify-cli | cloudify_cli/execution_events_fetcher.py | 1 | 9188 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import time
from cloudify_rest_client.executions import Execution
from .exceptions import (ExecutionTimeoutError,
EventProcessingTimeoutError)
WAIT_FOR_EXECUTION_SLEEP_INTERVAL = 1
WORKFLOW_END_TYPES = {u'workflow_succeeded', u'workflow_failed',
u'workflow_cancelled'}
class ExecutionEventsFetcher(object):
CONTEXT_FIELDS = [
'deployment_id',
'execution_id',
'node_name',
'operation',
'workflow_id',
]
def __init__(self,
client,
batch_size=100,
**list_kwargs):
self._client = client
self._list_kwargs = list_kwargs
self._batch_size = batch_size
self._from_event = 0
# make sure execution/group exists before proceeding
# a 404 will be raised otherwise
if 'execution_id' in list_kwargs:
self._client.executions.get(list_kwargs['execution_id'])
elif 'execution_group_id' in list_kwargs:
self._client.execution_groups.get(
list_kwargs['execution_group_id'])
def fetch_and_process_events_batch(self,
events_handler=None,
offset=None,
size=None):
events_list_response = self._fetch_events_batch(offset, size)
total_events = events_list_response.metadata.pagination.total
events = [
self._map_api_event_to_internal_event(event)
for event in events_list_response.items
]
if events and events_handler:
events_handler(events)
return len(events), total_events
def _fetch_events_batch(self, offset=None, size=None):
offset = offset if offset is not None else self._from_event
size = size if size is not None else self._batch_size
events_list_response = self._client.events.list(
_offset=offset,
_size=size,
sort='reported_timestamp',
**self._list_kwargs
)
self._from_event += len(events_list_response)
return events_list_response
def _map_api_event_to_internal_event(self, event):
"""Map data structure from API to internal.
This method adapts the data structure returend by the events API
endpoint to the structure expected by `cloudify.event.Event`.
Note: the event is modified in place, so even though the value is
returned, the original data structure is not preserved.
:param event: Event in API format
:type event: dict(str)
:return: Event in internal format
:rtype: dict(str)
"""
event['context'] = {
context_field: event[context_field]
for context_field in self.CONTEXT_FIELDS
}
for context_field in self.CONTEXT_FIELDS:
del event[context_field]
event['context']['node_id'] = event['node_instance_id']
if 'source_id' in event:
event['context']['source_id'] = event['source_id']
event['context']['target_id'] = event['target_id']
del event['node_instance_id']
event['message'] = {
'arguments': None,
'text': event['message'],
}
event['context']['task_error_causes'] = event['error_causes']
del event['error_causes']
return event
def fetch_and_process_events(self, events_handler=None, timeout=60):
total_events_count = 0
# timeout can be None (never time out), for example when tail is used
if timeout is not None:
deadline = time.time() + timeout
while True:
if timeout is not None and time.time() > deadline:
raise EventProcessingTimeoutError(
self._list_kwargs.get('execution_id') or
self._list_kwargs.get('execution_group_id'),
'events/log fetching timed out')
events_batch_count, _ = self.fetch_and_process_events_batch(
events_handler=events_handler)
total_events_count += events_batch_count
if events_batch_count < self._batch_size:
# returned less events than allowed by _batch_size,
# this means these are the last events found so far
break
return total_events_count
def get_deployment_environment_creation_execution(client, deployment_id):
executions = client.executions.list(deployment_id=deployment_id)
for e in executions:
if e.workflow_id == 'create_deployment_environment':
return e
raise RuntimeError(
'Failed to get create_deployment_environment workflow '
'execution. Available executions: {0}'.format(executions))
def wait_for_execution(client,
execution,
events_handler=None,
include_logs=False,
timeout=900,
logger=None,
from_datetime=None):
# if execution already ended - return without waiting
if execution.status in Execution.END_STATES:
return execution
if timeout is not None:
deadline = time.time() + timeout
events_fetcher = ExecutionEventsFetcher(client,
execution_id=execution.id,
include_logs=include_logs,
from_datetime=from_datetime)
# Poll for execution status and execution logs, until execution ends
execution_ended = False
while True:
if timeout is not None:
if time.time() > deadline:
raise ExecutionTimeoutError(
execution.id,
'execution of operation {0} for deployment {1} '
'timed out'.format(execution.workflow_id,
execution.deployment_id))
else:
# update the remaining timeout
timeout = deadline - time.time()
if not execution_ended:
execution = client.executions.get(execution.id)
execution_ended = execution.status in Execution.END_STATES
events_fetcher.fetch_and_process_events(
events_handler=events_handler, timeout=timeout)
if execution_ended:
break
time.sleep(WAIT_FOR_EXECUTION_SLEEP_INTERVAL)
return execution
def wait_for_execution_group(client,
execution_group,
events_handler=None,
include_logs=False,
timeout=900,
logger=None,
from_datetime=None):
"""Like wait_for_execution, but for a group"""
if execution_group.status in Execution.END_STATES:
return execution_group
if logger is not None and execution_group.status == Execution.QUEUED:
logger.info("Executions have been queued: you can keep waiting "
"for the executions to start or interrupt (eg. ^C).\n")
if timeout is not None:
deadline = time.time() + timeout
events_fetcher = ExecutionEventsFetcher(
client,
execution_group_id=execution_group.id,
include_logs=include_logs,
from_datetime=from_datetime)
# Poll for execution status and execution logs, until execution ends
group_finished = False
while True:
if timeout is not None:
if time.time() > deadline:
raise ExecutionTimeoutError(
execution_group.id,
'execution of operation {0} for deployment group {1} '
'timed out'.format(execution_group.workflow_id,
execution_group.deployment_group_id))
else:
# update the remaining timeout
timeout = deadline - time.time()
if not group_finished:
execution_group = client.execution_groups.get(execution_group.id)
group_finished = execution_group.status in Execution.END_STATES
events_fetcher.fetch_and_process_events(
events_handler=events_handler, timeout=timeout)
if group_finished:
break
time.sleep(WAIT_FOR_EXECUTION_SLEEP_INTERVAL)
return execution_group
| apache-2.0 |
Changaco/oh-mainline | vendor/packages/gdata/samples/analytics/mgmt_feed_demo.py | 39 | 8348 | #!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Analytics Management API Demo.
This script demonstrates how to retrieve the important data from the Google
Analytics Data Management API using the Python Client library. This example
requires a Google Analytics account with data and a username and password.
Each feed in the Management API is retrieved and printed using the respective
print method in ManagementFeedDemo. To simplify setting filters and query
parameters, each feed has it's own query class. Check the
<code>gdata.analytics.client</code> module for more details on usage.
main: The main method of this example.
GetAnalyticsClient: Returns an authorized AnalyticsClient object.
Class ManagementFeedDemo: Prints all the import Account Feed data.
"""
__author__ = 'api.nickm@google.com (Nick Mihailovski)'
import gdata.analytics.client
import gdata.sample_util
ACCOUNT_ID = '~all'
WEB_PROPERTY_ID = '~all'
PROFILE_ID = '~all'
def main():
"""Main example script. Un-comment each method to print the feed."""
demo = ManagementFeedDemo(GetAnalyticsClient())
demo.PrintAccountFeed()
# demo.PrintWebPropertyFeed()
# demo.PrintProfileFeed()
# demo.PrintGoalFeed()
# demo.PrintSegmentFeed()
def GetAnalyticsClient():
"""Returns an authorized GoogleAnalayticsClient object.
Uses the Google Data python samples wrapper to prompt the user for
credentials then tries to authorize the client object with the
Google Analytics API.
Returns:
An authorized GoogleAnalyticsClient object.
"""
SOURCE_APP_NAME = 'Analytics-ManagementAPI-Demo-v1'
my_client = gdata.analytics.client.AnalyticsClient(source=SOURCE_APP_NAME)
try:
gdata.sample_util.authorize_client(
my_client,
service=my_client.auth_service,
source=SOURCE_APP_NAME,
scopes=['https://www.google.com/analytics/feeds/'])
except gdata.client.BadAuthentication:
exit('Invalid user credentials given.')
except gdata.client.Error:
exit('Login Error')
return my_client
class ManagementFeedDemo(object):
"""The main demo for the management feed.
Attributes:
my_client: gdata.analytics.client The AnalyticsClient object for this demo.
"""
def __init__(self, my_client):
"""Initializes the ManagementFeedDemo class.
Args:
my_client: gdata.analytics.client An authorized GoogleAnalyticsClient
object.
"""
self.my_client = my_client
def PrintAccountFeed(self):
"""Requests and prints the important data in the Account Feed.
Note:
AccountQuery is used for the ManagementAPI.
AccountFeedQuery is used for the Data Export API.
"""
account_query = gdata.analytics.client.AccountQuery()
results = self.my_client.GetManagementFeed(account_query)
print '-------- Account Feed Data --------'
if not results.entry:
print 'no entries found'
else:
for entry in results.entry:
print 'Account Name = ' + entry.GetProperty('ga:accountName').value
print 'Account ID = ' + entry.GetProperty('ga:accountId').value
print 'Child Feed Link = ' + entry.GetChildLink('analytics#webproperties').href
print
def PrintWebPropertyFeed(self):
"""Requests and prints the important data in the Web Property Feed."""
web_property_query = gdata.analytics.client.WebPropertyQuery(
acct_id=ACCOUNT_ID)
results = self.my_client.GetManagementFeed(web_property_query)
print '-------- Web Property Feed Data --------'
if not results.entry:
print 'no entries found'
else:
for entry in results.entry:
print 'Account ID = ' + entry.GetProperty('ga:accountId').value
print 'Web Property ID = ' + entry.GetProperty('ga:webPropertyId').value
print 'Child Feed Link = ' + entry.GetChildLink('analytics#profiles').href
print
def PrintProfileFeed(self):
"""Requests and prints the important data in the Profile Feed.
Note:
TableId has a different namespace (dxp:) than all the
other properties (ga:).
"""
profile_query = gdata.analytics.client.ProfileQuery(
acct_id=ACCOUNT_ID, web_prop_id=WEB_PROPERTY_ID)
results = self.my_client.GetManagementFeed(profile_query)
print '-------- Profile Feed Data --------'
if not results.entry:
print 'no entries found'
else:
for entry in results.entry:
print 'Account ID = ' + entry.GetProperty('ga:accountId').value
print 'Web Property ID = ' + entry.GetProperty('ga:webPropertyId').value
print 'Profile ID = ' + entry.GetProperty('ga:profileId').value
print 'Currency = ' + entry.GetProperty('ga:currency').value
print 'Timezone = ' + entry.GetProperty('ga:timezone').value
print 'TableId = ' + entry.GetProperty('dxp:tableId').value
print 'Child Feed Link = ' + entry.GetChildLink('analytics#goals').href
print
def PrintGoalFeed(self):
"""Requests and prints the important data in the Goal Feed.
Note:
There are two types of goals, destination and engagement which need to
be handled differently.
"""
goal_query = gdata.analytics.client.GoalQuery(
acct_id=ACCOUNT_ID, web_prop_id=WEB_PROPERTY_ID, profile_id=PROFILE_ID)
results = self.my_client.GetManagementFeed(goal_query)
print '-------- Goal Feed Data --------'
if not results.entry:
print 'no entries found'
else:
for entry in results.entry:
print 'Goal Number = ' + entry.goal.number
print 'Goal Name = ' + entry.goal.name
print 'Goal Value = ' + entry.goal.value
print 'Goal Active = ' + entry.goal.active
if entry.goal.destination:
self.PrintDestinationGoal(entry.goal.destination)
elif entry.goal.engagement:
self.PrintEngagementGoal(entry.goal.engagement)
def PrintDestinationGoal(self, destination):
"""Prints the important information for destination goals including all
the configured steps if they exist.
Args:
destination: gdata.data.Destination The destination goal configuration.
"""
print '\t----- Destination Goal -----'
print '\tExpression = ' + destination.expression
print '\tMatch Type = ' + destination.match_type
print '\tStep 1 Required = ' + destination.step1_required
print '\tCase Sensitive = ' + destination.case_sensitive
if destination.step:
print '\t\t----- Destination Goal Steps -----'
for step in destination.step:
print '\t\tStep Number = ' + step.number
print '\t\tStep Name = ' + step.name
print '\t\tStep Path = ' + step.path
print
def PrintEngagementGoal(self, engagement):
"""Prints the important information for engagement goals.
Args:
engagement: gdata.data.Engagement The engagement goal configuration.
"""
print '\t----- Engagement Goal -----'
print '\tGoal Type = ' + engagement.type
print '\tGoal Engagement = ' + engagement.comparison
print '\tGoal Threshold = ' + engagement.threshold_value
print
def PrintSegmentFeed(self):
"""Requests and prints the important data in the Profile Feed."""
adv_seg_query = gdata.analytics.client.AdvSegQuery()
results = self.my_client.GetManagementFeed(adv_seg_query)
print '-------- Advanced Segment Feed Data --------'
if not results.entry:
print 'no entries found'
else:
for entry in results.entry:
print 'Segment ID = ' + entry.segment.id
print 'Segment Name = ' + entry.segment.name
print 'Segment Definition = ' + entry.segment.definition.text
print
if __name__ == '__main__':
main()
| agpl-3.0 |
kenwith/cs561 | cs561-as2-kenwith/pox/tools/gui/views/elastictree.py | 7 | 8231 | '''
ElasticTree view for drawn topology
@author Albert Wu (awu12345@stanford.edu)
'''
from nox.ripcordapps.dispatch_server.ripcord_pb2 import Vertex, Edge, \
Path, Tunnels, Tunnel, TunnelsRequest, NewTunnelRequest, Topology,\
DisplayTunnel, DisplaySwitch, TopologyRequest, TopologyReply, \
LinkUtilizationRequest, LinkUtilizationReply, PathUtilizationRequest, \
PathUtilizationReply, EventSubscription, PortUtilization, PortUtilizations, \
SwitchQuery, Generic, UtilBound, TrafficMsg
from PyQt4 import QtGui, QtCore
from view import View
from ripcord.et.elastictree import findPower
class ET_View(View):
powerSliderSignal = QtCore.pyqtSignal()
def __init__(self, topoWidget):
View.__init__(self, topoWidget, "Elastic Tree")
self.logDisplay = self.topoWidget.parent.logWidget.logDisplay
utilBtn = QtGui.QPushButton('Change Util Bound')
infoBtn = QtGui.QPushButton('What is ElasticTree?')
self.connect(utilBtn, QtCore.SIGNAL('clicked()'),
self.changeUtil)
self.connect(infoBtn, QtCore.SIGNAL('clicked()'), self.showInfo)
# self.connect(powerBtn, QtCore.SIGNAL('clicked()'),
# self.showPowerStats)
# self.buttons.append(powerBtn)
self.buttons.append(utilBtn)
self.buttons.append(infoBtn)
self.utilBound = 0.01
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.slider.setMinimum(0)
self.slider.setMaximum(100)
self.buttons.append(self.slider)
self.sliderValue = 0
self.stats = {} # maps tuples (dpid, port) to utilization
self.powerSliderSignal.connect(self.changeSlider)
self.indicator = QtGui.QLabel()
self.buttons.append(self.indicator)
def changeSlider(self):
self.slider.setValue(self.sliderValue)
msg = str(self.sliderValue) + "%"
self.indicator.setText("<font color='red'>"+msg+"</font>")
def node_color(self, node): # green node when on, gray when off
#topo = self.topoWidget.topologyView.topology
#if topo.node_info[node.dpid].power_on:
# return QtCore.Qt.green
#else:
# return QtCore.Qt.gray
return
def link_color(self, link):
# reflected by shades of colors based on utilizations
# assumes 1 GB links
srcID = link.source.dpid
srcPort = link.sport
dstID = link.dest.dpid
dstPort = link.dport
if not link.isUp:
return QtCore.Qt.gray
if not (srcID, srcPort) in self.stats and \
not (dstID, dstPort) in self.stats:
return QtCore.Qt.green
if not (srcID, srcPort) in self.stats:
util = self.stats[(dstID, dstPort)]
elif not (dstID, dstPort) in self.stats:
util = self.stats[(srcID, srcPort)]
else:
util1 = self.stats[(srcID, srcPort)]
util2 = self.stats[(dstID, dstPort)]
util = (util1 + util2) / 2
if util >= 0.8:
return QtCore.Qt.red
if util > 0:
return QtCore.Qt.red
return QtCore.Qt.white
def link_pattern(self, link):
pattern = QtCore.Qt.SolidLine
return pattern
def changeUtil(self):
self.buttons[0].setChecked(True)
change_util_popup = ChangeUtilPopup(self)
change_util_popup.exec_()
#return
def showInfo(self):
self.buttons[1].setChecked(True)
info_popup = InfoPopup(self)
info_popup.exec_()
def showPowerStats(self):
''' method that shows stats for a specific ElasticTree subset
currently not implemented'''
# self.logDisplay.parent.freezeLog = True
# topo = self.topoWidget.topologyView.topology
# numSwitches = len(topo.nodes.keys())
# numLinks = len(topo.links.keys())
#
# k = 4
# # this next portion will have to be updated eventually
# totSwitches = k * k / 4 + k * k
# totLinks = 3 * k * k * k / 4
#
# stats = "Displaying Switch and Edge Power Stats\n"
# stats += "Switches on: %d\n" % numSwitches
# stats += "Links on: %d\n" % numLinks
# stats += "% Original Network Power: %d\n" % \
# findPower(numSwitches, numLinks, totSwitches, totLinks, k)
#
# self.logDisplay.setText(stats)
return
def updateStats(self, utils):
''' updates link stats from dispatch_server message '''
self.stats = {}
for util in utils:
self.stats[(util.dpid, util.port)] = \
(util.gbps_transmitted + util.gbps_received) / 2
class InfoPopup(QtGui.QDialog):
''' popup showing basic background for Elastic Tree '''
def __init__(self, parent=None):
''' Sets up graphics for popup '''
self.parent = parent
QtGui.QWidget.__init__(self)
self.setWindowTitle("ElasticTree Basic Info")
self.resize(500, 150)
self.combo = QtGui.QGroupBox(self)
ok = QtGui.QPushButton("Ok")
self.connect(ok, QtCore.SIGNAL('clicked()'), self.ok)
self.hbox = QtGui.QHBoxLayout()
self.hbox.addStretch(1)
self.hbox.addWidget(ok)
self.vbox = QtGui.QVBoxLayout()
grid = QtGui.QGridLayout()
msg1 = "ElasticTree saves energy by turning off unneeded switches / links."
msg2 = "This view visualizes the subset of switches. Also, the user can "
msg3 = "adjust the utilization bound, the amount of bandwidth reserved per link."
l = QtGui.QLabel(msg1)
m = QtGui.QLabel(msg2)
n = QtGui.QLabel(msg3)
grid.addWidget(l, 1, 1)
grid.addWidget(m, 2, 1)
grid.addWidget(n, 3, 1)
self.combo.setLayout(self.vbox)
self.vbox.addLayout(grid)
self.vbox.addLayout(self.hbox)
self.vbox.addStretch(1)
def ok(self):
self.accept()
class ChangeUtilPopup(QtGui.QDialog):
''' allows user to adjust slider to change utilization bound for ET'''
def __init__(self, parent=None):
''' Sets up graphics '''
self.parent = parent
QtGui.QWidget.__init__(self)
self.setWindowTitle("Change Utilization Bound, Mbps")
self.resize(350, 100)
self.combo = QtGui.QGroupBox(self)
# self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
# self.slider.setFocusPolicy(QtCore.Qt.NoFocus)
# self.slider.setGeometry(30, 40, 100, 30)
# self.slider.setMinimum(0)
# self.slider.setMaximum(1000)
# self.slider.setValue(self.parent.utilBound * 1000)
self.utilEdit = QtGui.QLineEdit()
self.utilEdit.setText('')
ok = QtGui.QPushButton("Ok")
cancel = QtGui.QPushButton("Cancel")
self.connect(ok, QtCore.SIGNAL('clicked()'), self.ok)
self.connect(cancel, QtCore.SIGNAL('clicked()'), self.cancel)
self.hbox = QtGui.QHBoxLayout()
self.hbox.addStretch(1)
self.hbox.addWidget(cancel)
self.hbox.addWidget(ok)
self.vbox = QtGui.QVBoxLayout()
grid = QtGui.QGridLayout()
grid.addWidget(self.utilEdit, 1, 1)
self.combo.setLayout(self.vbox)
self.vbox.addLayout(grid)
self.vbox.addLayout(self.hbox)
self.vbox.addStretch(1)
def changeValue(self):
return
def ok(self):
# send util bound message
value = float(self.utilEdit.text())
display = 'Util bound set to: ' + str(value) + " Gbps"
self.parent.topoWidget.parent.setStatusTip(display)
self.parent.utilBound = value
msg = UtilBound()
msg.util_bound = self.parent.utilBound
self.parent.topoWidget.topologyView.topologyInterface.send(msg)
self.accept()
def cancel(self):
self.parent.buttons[0].setChecked(False)
self.parent.topoWidget.parent.setStatusTip('Util bound not changed')
self.reject()
| gpl-3.0 |
salguarnieri/intellij-community | python/lib/Lib/email/Encoders.py | 152 | 2302 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Encodings and related functions."""
__all__ = [
'encode_7or8bit',
'encode_base64',
'encode_noop',
'encode_quopri',
]
import base64
from quopri import encodestring as _encodestring
def _qencode(s):
enc = _encodestring(s, quotetabs=True)
# Must encode spaces, which quopri.encodestring() doesn't do
return enc.replace(' ', '=20')
def _bencode(s):
# We can't quite use base64.encodestring() since it tacks on a "courtesy
# newline". Blech!
if not s:
return s
hasnewline = (s[-1] == '\n')
value = base64.encodestring(s)
if not hasnewline and value[-1] == '\n':
return value[:-1]
return value
def encode_base64(msg):
"""Encode the message's payload in Base64.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload()
encdata = _bencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'base64'
def encode_quopri(msg):
"""Encode the message's payload in quoted-printable.
Also, add an appropriate Content-Transfer-Encoding header.
"""
orig = msg.get_payload()
encdata = _qencode(orig)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
def encode_7or8bit(msg):
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
orig = msg.get_payload()
if orig is None:
# There's no payload. For backwards compatibility we use 7bit
msg['Content-Transfer-Encoding'] = '7bit'
return
# We play a trick to make this go fast. If encoding to ASCII succeeds, we
# know the data must be 7bit, otherwise treat it as 8bit.
try:
orig.encode('ascii')
except UnicodeError:
# iso-2022-* is non-ASCII but still 7-bit
charset = msg.get_charset()
output_cset = charset and charset.output_charset
if output_cset and output_cset.lower().startswith('iso-2202-'):
msg['Content-Transfer-Encoding'] = '7bit'
else:
msg['Content-Transfer-Encoding'] = '8bit'
else:
msg['Content-Transfer-Encoding'] = '7bit'
def encode_noop(msg):
"""Do nothing."""
| apache-2.0 |
argonemyth/sentry | src/sentry/rules/conditions/event_frequency.py | 6 | 2862 | """
sentry.rules.conditions.event_frequency
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from datetime import datetime, timedelta
from django import forms
from pytz import utc
from django.utils import timezone
from sentry.rules.conditions.base import EventCondition
class Interval(object):
ONE_MINUTE = '1m'
ONE_HOUR = '1h'
class EventFrequencyForm(forms.Form):
interval = forms.ChoiceField(choices=(
(Interval.ONE_MINUTE, 'one minute'),
(Interval.ONE_HOUR, 'one hour'),
))
value = forms.IntegerField(widget=forms.TextInput(attrs={
'placeholder': '100',
'type': 'number'
}))
class EventFrequencyCondition(EventCondition):
form_cls = EventFrequencyForm
label = 'An event is seen more than {value} times in {interval}'
def __init__(self, *args, **kwargs):
from sentry.app import tsdb
self.tsdb = kwargs.pop('tsdb', tsdb)
super(EventFrequencyCondition, self).__init__(*args, **kwargs)
def passes(self, event, state):
# when a rule is not active (i.e. it hasnt gone from inactive -> active)
# it means that we already notified the user about this condition and
# shouldn't spam them again
if state.rule_is_active:
return False
interval = self.get_option('interval')
try:
value = int(self.get_option('value'))
except (TypeError, ValueError):
return False
if not (interval and value):
return False
now = timezone.now()
# XXX(dcramer): hardcode 30 minute frequency until rules support choices
if state.rule_last_active and state.rule_last_active > (now - timedelta(minutes=30)):
return False
current_value = self.get_rate(event, interval)
return current_value > value
def clear_cache(self, event):
event._rate_cache = {}
def get_rate(self, event, interval):
if not hasattr(event, '_rate_cache'):
event._rate_cache = {}
result = event._rate_cache.get(interval)
if result is None:
end = datetime.utcnow().replace(tzinfo=utc)
if interval == Interval.ONE_MINUTE:
start = end - timedelta(minutes=1)
elif interval == Interval.ONE_HOUR:
start = end - timedelta(hours=1)
else:
raise ValueError(interval)
result = self.tsdb.get_sums(
model=self.tsdb.models.group,
keys=[event.group_id],
start=start,
end=end,
)[event.group_id]
event._rate_cache[interval] = result
return result
| bsd-3-clause |
skion/oauthlib-oidc | oauthlib/oauth1/rfc5849/request_validator.py | 4 | 30462 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849
~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 1.0 RFC 5849 requests.
"""
from __future__ import absolute_import, unicode_literals
import sys
from . import SIGNATURE_METHODS, utils
class RequestValidator(object):
"""A validator/datastore interaction base class for OAuth 1 providers.
OAuth providers should inherit from RequestValidator and implement the
methods and properties outlined below. Further details are provided in the
documentation for each method and property.
Methods used to check the format of input parameters. Common tests include
length, character set, membership, range or pattern. These tests are
referred to as `whitelisting or blacklisting`_. Whitelisting is better
but blacklisting can be usefull to spot malicious activity.
The following have methods a default implementation:
- check_client_key
- check_request_token
- check_access_token
- check_nonce
- check_verifier
- check_realms
The methods above default to whitelist input parameters, checking that they
are alphanumerical and between a minimum and maximum length. Rather than
overloading the methods a few properties can be used to configure these
methods.
* @safe_characters -> (character set)
* @client_key_length -> (min, max)
* @request_token_length -> (min, max)
* @access_token_length -> (min, max)
* @nonce_length -> (min, max)
* @verifier_length -> (min, max)
* @realms -> [list, of, realms]
Methods used to validate/invalidate input parameters. These checks usually
hit either persistent or temporary storage such as databases or the
filesystem. See each methods documentation for detailed usage.
The following methods must be implemented:
- validate_client_key
- validate_request_token
- validate_access_token
- validate_timestamp_and_nonce
- validate_redirect_uri
- validate_requested_realms
- validate_realms
- validate_verifier
- invalidate_request_token
Methods used to retrieve sensitive information from storage.
The following methods must be implemented:
- get_client_secret
- get_request_token_secret
- get_access_token_secret
- get_rsa_key
- get_realms
- get_default_realms
- get_redirect_uri
Methods used to save credentials.
The following methods must be implemented:
- save_request_token
- save_verifier
- save_access_token
Methods used to verify input parameters. This methods are used during
authorizing request token by user (AuthorizationEndpoint), to check if
parameters are valid. During token authorization request is not signed,
thus 'validation' methods can not be used. The following methods must be
implemented:
- verify_realms
- verify_request_token
To prevent timing attacks it is necessary to not exit early even if the
client key or resource owner key is invalid. Instead dummy values should
be used during the remaining verification process. It is very important
that the dummy client and token are valid input parameters to the methods
get_client_secret, get_rsa_key and get_(access/request)_token_secret and
that the running time of those methods when given a dummy value remain
equivalent to the running time when given a valid client/resource owner.
The following properties must be implemented:
* @dummy_client
* @dummy_request_token
* @dummy_access_token
Example implementations have been provided, note that the database used is
a simple dictionary and serves only an illustrative purpose. Use whichever
database suits your project and how to access it is entirely up to you.
The methods are introduced in an order which should make understanding
their use more straightforward and as such it could be worth reading what
follows in chronological order.
.. _`whitelisting or blacklisting`: https://www.schneier.com/blog/archives/2011/01/whitelisting_vs.html
"""
def __init__(self):
pass
@property
def allowed_signature_methods(self):
return SIGNATURE_METHODS
@property
def safe_characters(self):
return set(utils.UNICODE_ASCII_CHARACTER_SET)
@property
def client_key_length(self):
return 20, 30
@property
def request_token_length(self):
return 20, 30
@property
def access_token_length(self):
return 20, 30
@property
def timestamp_lifetime(self):
return 600
@property
def nonce_length(self):
return 20, 30
@property
def verifier_length(self):
return 20, 30
@property
def realms(self):
return []
@property
def enforce_ssl(self):
return True
def check_client_key(self, client_key):
"""Check that the client key only contains safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.client_key_length
return (set(client_key) <= self.safe_characters and
lower <= len(client_key) <= upper)
def check_request_token(self, request_token):
"""Checks that the request token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.request_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_access_token(self, request_token):
"""Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.access_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_nonce(self, nonce):
"""Checks that the nonce only contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.nonce_length
return (set(nonce) <= self.safe_characters and
lower <= len(nonce) <= upper)
def check_verifier(self, verifier):
"""Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.verifier_length
return (set(verifier) <= self.safe_characters and
lower <= len(verifier) <= upper)
def check_realms(self, realms):
"""Check that the realm is one of a set allowed realms."""
return all((r in self.realms for r in realms))
def _subclass_must_implement(self, fn):
"""
Returns a NotImplementedError for a function that should be implemented.
:param fn: name of the function
"""
m = "Missing function implementation in {}: {}".format(type(self), fn)
return NotImplementedError(m)
@property
def dummy_client(self):
"""Dummy client used when an invalid client key is supplied.
:returns: The dummy client key string.
The dummy client should be associated with either a client secret,
a rsa key or both depending on which signature methods are supported.
Providers should make sure that
get_client_secret(dummy_client)
get_rsa_key(dummy_client)
return a valid secret or key for the dummy client.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("dummy_client")
@property
def dummy_request_token(self):
"""Dummy request token used when an invalid token was supplied.
:returns: The dummy request token string.
The dummy request token should be associated with a request token
secret such that get_request_token_secret(.., dummy_request_token)
returns a valid secret.
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("dummy_request_token")
@property
def dummy_access_token(self):
"""Dummy access token used when an invalid token was supplied.
:returns: The dummy access token string.
The dummy access token should be associated with an access token
secret such that get_access_token_secret(.., dummy_access_token)
returns a valid secret.
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("dummy_access_token")
def get_client_secret(self, client_key, request):
"""Retrieves the client secret associated with the client key.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The client secret as a string.
This method must allow the use of a dummy client_key value.
Fetching the secret using the dummy key must take the same amount of
time as fetching a secret for a valid client::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import ClientSecret
if ClientSecret.has(client_key):
return ClientSecret.get(client_key)
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import ClientSecret
return ClientSecret.get(client_key, 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement('get_client_secret')
def get_request_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the request token.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import RequestTokenSecret
if RequestTokenSecret.has(client_key):
return RequestTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import RequestTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement('get_request_token_secret')
def get_access_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the access token.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: An oauthlib.common.Request object.
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import AccessTokenSecret
if AccessTokenSecret.has(client_key):
return AccessTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import AccessTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("get_access_token_secret")
def get_default_realms(self, client_key, request):
"""Get the default realms for a client.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The list of default realms associated with the client.
The list of default realms will be set during client registration and
is outside the scope of OAuthLib.
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("get_default_realms")
def get_realms(self, token, request):
"""Get realms associated with a request token.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The list of realms associated with the request token.
This method is used by
* AuthorizationEndpoint
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("get_realms")
def get_redirect_uri(self, token, request):
"""Get the redirect URI associated with a request token.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The redirect URI associated with the request token.
It may be desirable to return a custom URI if the redirect is set to "oob".
In this case, the user will be redirected to the returned URI and at that
endpoint the verifier can be displayed.
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("get_redirect_uri")
def get_rsa_key(self, client_key, request):
"""Retrieves a previously stored client provided RSA key.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The rsa public key as a string.
This method must allow the use of a dummy client_key value. Fetching
the rsa key using the dummy key must take the same amount of time
as fetching a key for a valid client. The dummy key must also be of
the same bit length as client keys.
Note that the key must be returned in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("get_rsa_key")
def invalidate_request_token(self, client_key, request_token, request):
"""Invalidates a used request token.
:param client_key: The client/consumer key.
:param request_token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: None
Per `Section 2.3`__ of the spec:
"The server MUST (...) ensure that the temporary
credentials have not expired or been used before."
.. _`Section 2.3`: https://tools.ietf.org/html/rfc5849#section-2.3
This method should ensure that provided token won't validate anymore.
It can be simply removing RequestToken from storage or setting
specific flag that makes it invalid (note that such flag should be
also validated during request token validation).
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("invalidate_request_token")
def validate_client_key(self, client_key, request):
"""Validates that supplied client key is a registered and valid client.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy client is supplied it should validate in same
or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import Client
try:
return Client.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import Client
if access_token == self.dummy_access_token:
return False
else:
return Client.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("validate_client_key")
def validate_request_token(self, client_key, token, request):
"""Validates that supplied request token is registered and valid.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy request_token is supplied it should validate in
the same nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import RequestToken
try:
return RequestToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import RequestToken
if access_token == self.dummy_access_token:
return False
else:
return RequestToken.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("validate_request_token")
def validate_access_token(self, client_key, token, request):
"""Validates that supplied access token is registered and valid.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy access token is supplied it should validate in
the same or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import AccessToken
try:
return AccessToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import AccessToken
if access_token == self.dummy_access_token:
return False
else:
return AccessToken.exists(client_key, access_token)
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("validate_access_token")
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None, access_token=None):
"""Validates that the nonce has not been used before.
:param client_key: The client/consumer key.
:param timestamp: The ``oauth_timestamp`` parameter.
:param nonce: The ``oauth_nonce`` parameter.
:param request_token: Request token string, if any.
:param access_token: Access token string, if any.
:param request: An oauthlib.common.Request object.
:returns: True or False
Per `Section 3.3`_ of the spec.
"A nonce is a random string, uniquely generated by the client to allow
the server to verify that a request has never been made before and
helps prevent replay attacks when requests are made over a non-secure
channel. The nonce value MUST be unique across all requests with the
same timestamp, client credentials, and token combinations."
.. _`Section 3.3`: https://tools.ietf.org/html/rfc5849#section-3.3
One of the first validation checks that will be made is for the validity
of the nonce and timestamp, which are associated with a client key and
possibly a token. If invalid then immediately fail the request
by returning False. If the nonce/timestamp pair has been used before and
you may just have detected a replay attack. Therefore it is an essential
part of OAuth security that you not allow nonce/timestamp reuse.
Note that this validation check is done before checking the validity of
the client and token.::
nonces_and_timestamps_database = [
(u'foo', 1234567890, u'rannoMstrInghere', u'bar')
]
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request_token=None, access_token=None):
return ((client_key, timestamp, nonce, request_token or access_token)
not in self.nonces_and_timestamps_database)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
* SignatureOnlyEndpoint
"""
raise self._subclass_must_implement("validate_timestamp_and_nonce")
def validate_redirect_uri(self, client_key, redirect_uri, request):
"""Validates the client supplied redirection URI.
:param client_key: The client/consumer key.
:param redirect_uri: The URI the client which to redirect back to after
authorization is successful.
:param request: An oauthlib.common.Request object.
:returns: True or False
It is highly recommended that OAuth providers require their clients
to register all redirection URIs prior to using them in requests and
register them as absolute URIs. See `CWE-601`_ for more information
about open redirection attacks.
By requiring registration of all redirection URIs it should be
straightforward for the provider to verify whether the supplied
redirect_uri is valid or not.
Alternatively per `Section 2.1`_ of the spec:
"If the client is unable to receive callbacks or a callback URI has
been established via other means, the parameter value MUST be set to
"oob" (case sensitive), to indicate an out-of-band configuration."
.. _`CWE-601`: http://cwe.mitre.org/top25/index.html#CWE-601
.. _`Section 2.1`: https://tools.ietf.org/html/rfc5849#section-2.1
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("validate_redirect_uri")
def validate_requested_realms(self, client_key, realms, request):
"""Validates that the client may request access to the realm.
:param client_key: The client/consumer key.
:param realms: The list of realms that client is requesting access to.
:param request: An oauthlib.common.Request object.
:returns: True or False
This method is invoked when obtaining a request token and should
tie a realm to the request token and after user authorization
this realm restriction should transfer to the access token.
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("validate_requested_realms")
def validate_realms(self, client_key, token, request, uri=None,
realms=None):
"""Validates access to the request realm.
:param client_key: The client/consumer key.
:param token: A request token string.
:param request: An oauthlib.common.Request object.
:param uri: The URI the realms is protecting.
:param realms: A list of realms that must have been granted to
the access token.
:returns: True or False
How providers choose to use the realm parameter is outside the OAuth
specification but it is commonly used to restrict access to a subset
of protected resources such as "photos".
realms is a convenience parameter which can be used to provide
a per view method pre-defined list of allowed realms.
Can be as simple as::
from your_datastore import RequestToken
request_token = RequestToken.get(token, None)
if not request_token:
return False
return set(request_token.realms).issuperset(set(realms))
This method is used by
* ResourceEndpoint
"""
raise self._subclass_must_implement("validate_realms")
def validate_verifier(self, client_key, token, verifier, request):
"""Validates a verification code.
:param client_key: The client/consumer key.
:param token: A request token string.
:param verifier: The authorization verifier string.
:param request: An oauthlib.common.Request object.
:returns: True or False
OAuth providers issue a verification code to clients after the
resource owner authorizes access. This code is used by the client to
obtain token credentials and the provider must verify that the
verifier is valid and associated with the client as well as the
resource owner.
Verifier validation should be done in near constant time
(to avoid verifier enumeration). To achieve this we need a
constant time string comparison which is provided by OAuthLib
in ``oauthlib.common.safe_string_equals``::
from your_datastore import Verifier
correct_verifier = Verifier.get(client_key, request_token)
from oauthlib.common import safe_string_equals
return safe_string_equals(verifier, correct_verifier)
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("validate_verifier")
def verify_request_token(self, token, request):
"""Verify that the given OAuth1 request token is valid.
:param token: A request token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
This method is used only in AuthorizationEndpoint to check whether the
oauth_token given in the authorization URL is valid or not.
This request is not signed and thus similar ``validate_request_token``
method can not be used.
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("verify_request_token")
def verify_realms(self, token, realms, request):
"""Verify authorized realms to see if they match those given to token.
:param token: An access token string.
:param realms: A list of realms the client attempts to access.
:param request: An oauthlib.common.Request object.
:returns: True or False
This prevents the list of authorized realms sent by the client during
the authorization step to be altered to include realms outside what
was bound with the request token.
Can be as simple as::
valid_realms = self.get_realms(token)
return all((r in valid_realms for r in realms))
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("verify_realms")
def save_access_token(self, token, request):
"""Save an OAuth1 access token.
:param token: A dict with token credentials.
:param request: An oauthlib.common.Request object.
The token dictionary will at minimum include
* ``oauth_token`` the access token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_authorized_realms`` a space separated list of realms.
Client key can be obtained from ``request.client_key``.
The list of realms (not joined string) can be obtained from
``request.realm``.
This method is used by
* AccessTokenEndpoint
"""
raise self._subclass_must_implement("save_access_token")
def save_request_token(self, token, request):
"""Save an OAuth1 request token.
:param token: A dict with token credentials.
:param request: An oauthlib.common.Request object.
The token dictionary will at minimum include
* ``oauth_token`` the request token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_callback_confirmed`` the string ``true``.
Client key can be obtained from ``request.client_key``.
This method is used by
* RequestTokenEndpoint
"""
raise self._subclass_must_implement("save_request_token")
def save_verifier(self, token, verifier, request):
"""Associate an authorization verifier with a request token.
:param token: A request token string.
:param verifier A dictionary containing the oauth_verifier and
oauth_token
:param request: An oauthlib.common.Request object.
We need to associate verifiers with tokens for validation during the
access token request.
Note that unlike save_x_token token here is the ``oauth_token`` token
string from the request token saved previously.
This method is used by
* AuthorizationEndpoint
"""
raise self._subclass_must_implement("save_verifier")
| bsd-3-clause |
mxjl620/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
tux-00/ansible | lib/ansible/modules/monitoring/monit.py | 49 | 7071 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: monit
short_description: Manage the state of a program monitored via Monit
description:
- Manage the state of a program monitored via I(Monit)
version_added: "1.2"
options:
name:
description:
- The name of the I(monit) program/process to manage
required: true
default: null
state:
description:
- The state of service
required: true
default: null
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
timeout:
description:
- If there are pending actions for the service monitored by monit, then Ansible will check
for up to this many seconds to verify the the requested action has been performed.
Ansible will sleep for five seconds between each check.
required: false
default: 300
version_added: "2.1"
requirements: [ ]
author: "Darryl Stoflet (@dstoflet)"
'''
EXAMPLES = '''
# Manage the state of program "httpd" to be in "started" state.
- monit:
name: httpd
state: started
'''
import time
def main():
arg_spec = dict(
name=dict(required=True),
timeout=dict(default=300, type='int'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
state = module.params['state']
timeout = module.params['timeout']
MONIT = module.get_bin_path('monit', True)
def status():
"""Return the status of the process in monit, or the empty string if not present."""
rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True)
for line in out.split('\n'):
# Sample output lines:
# Process 'name' Running
# Process 'name' Running - restart pending
parts = line.split()
if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
return ' '.join(parts[2:]).lower()
else:
return ''
def run_command(command):
"""Runs a monit command, and returns the new status."""
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
return status()
def wait_for_monit_to_stop_pending():
"""Fails this run if there is no status or it's pending/initalizing for timeout"""
timeout_time = time.time() + timeout
sleep_time = 5
running_status = status()
while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
if time.time() >= timeout_time:
module.fail_json(
msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
running_status
),
state=state
)
time.sleep(sleep_time)
running_status = status()
if state == 'reloaded':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command('%s reload' % MONIT)
if rc != 0:
module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
present = status() != ''
if not present and not state == 'present':
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
if state == 'present':
if not present:
if module.check_mode:
module.exit_json(changed=True)
status = run_command('reload')
if status == '':
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
wait_for_monit_to_stop_pending()
running = 'running' in status()
if running and state in ['started', 'monitored']:
module.exit_json(changed=False, name=name, state=state)
if running and state == 'stopped':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('stop')
if status in ['not monitored'] or 'stop pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not stopped' % name, status=status)
if running and state == 'unmonitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('unmonitor')
if status in ['not monitored'] or 'unmonitor pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not unmonitored' % name, status=status)
elif state == 'restarted':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('restart')
if status in ['initializing', 'running'] or 'restart pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not restarted' % name, status=status)
elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('start')
if status in ['initializing', 'running'] or 'start pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not started' % name, status=status)
elif not running and state == 'monitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('monitor')
if status not in ['not monitored']:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not monitored' % name, status=status)
module.exit_json(changed=False, name=name, state=state)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Valeureux/donation | donation_recurring/__openerp__.py | 2 | 1628 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Donation Recurring module for Odoo
# Copyright (C) 2014-2015 Barroux Abbey (www.barroux.org)
# Copyright (C) 2014-2015 Akretion France (www.akretion.com)
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Donation Recurring',
'version': '0.1',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Manage recurring donations',
'author': 'Barroux Abbey, Akretion, Odoo Community Association (OCA)',
'website': 'http://www.barroux.org',
'depends': ['donation'],
'data': [
'donation_view.xml',
'wizard/donation_recurring_generate_view.xml',
],
'demo': ['donation_recurring_demo.xml'],
'test': ['test/generate_recurring_donations.yml'],
}
| agpl-3.0 |
BRAINSia/ITK | Wrapping/Generators/Python/itk/support/itkLazy.py | 1 | 4992 | # ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import types
from itk.support import itkBase
# Needed to avoid problem with aliasing of itk.set (itkTemplate)
# inside the itk namespace. We need to explictly specify the
# use of the builtin set
from builtins import set as _builtin_set
# Need to use a recursive lock for thread ownership
# within the given thread you can acquire a RLock as often as you like.
# Other threads need to wait until this thread releases the resource again.
from multiprocessing import RLock as _mp_RLock
# A single lock is needed for all lazy loading. This lock blocks
# across all threads until this thread has completed all its imports
# and dependancies. The complex inter-relationship, and the recursive
# nature of imports, makes a more fine-grained locking very difficult
# to implement robustly.
_gbl_lazy_load_lock: _mp_RLock = _mp_RLock()
not_loaded: str = "not loaded"
def _lazy_itk_module_reconstructor(module_name, state):
# Similar to copyreg._reconstructor
lazy_module = types.ModuleType.__new__(LazyITKModule, state)
types.ModuleType.__init__(lazy_module, module_name)
return lazy_module
class LazyITKModule(types.ModuleType):
"""Subclass of ModuleType that implements a custom __getattribute__ method
to allow lazy-loading of attributes from ITK sub-modules."""
def __init__(self, name, lazy_attributes):
types.ModuleType.__init__(self, name)
for k, v in lazy_attributes.items():
itkBase.itk_base_global_lazy_attributes.setdefault(
k, _builtin_set()
).update(v)
self.__belong_lazy_attributes = dict(
(k, v[0]) for k, v in lazy_attributes.items() if len(v) > 0
)
for k in lazy_attributes:
setattr(self, k, not_loaded) # use default known value
# For PEP 366
setattr(self, "__package__", "itk")
setattr(self, "itk_base_global_lazy_attributes", lazy_attributes)
setattr(self, "loaded_lazy_modules", _builtin_set())
def __getattribute__(self, attr):
value = types.ModuleType.__getattribute__(self, attr)
if value is not_loaded:
with _gbl_lazy_load_lock: # All but one thread will block here.
if value is not_loaded:
# Only the first thread needs to run this code, all other blocked threads skip
module = self.__belong_lazy_attributes[attr]
namespace = {}
itkBase.itk_load_swig_module(module, namespace)
self.loaded_lazy_modules.add(module)
for k, v in namespace.items():
setattr(self, k, v)
value = namespace[attr]
else: # one of the other threads that had been blocking
# waiting for first thread to complete. Now the
# attribute is REQUIRED to be available
# can just fall through now.
value = types.ModuleType.__getattribute__(self, attr)
assert value is not not_loaded
return value
# For pickle support
def __reduce_ex__(self, proto):
state = self.__getstate__()
return _lazy_itk_module_reconstructor, (self.__name__, state), state
# For pickle support
def __getstate__(self):
state = self.__dict__.copy()
lazy_modules = list()
# import ipdb; ipdb.set_trace()
for key in self.itk_base_global_lazy_attributes:
if isinstance(state[key], LazyITKModule):
lazy_modules.append((key, state[key].itk_base_global_lazy_attributes))
state[key] = not_loaded
state["lazy_modules"] = lazy_modules
return state
# For pickle support
def __setstate__(self, state):
self.__dict__.update(state)
for module_name, lazy_attributes in state["lazy_modules"]:
self.__dict__.update(
{module_name: LazyITKModule(module_name, lazy_attributes)}
)
for module in state["loaded_lazy_modules"]:
namespace = {}
itkBase.itk_load_swig_module(module, namespace)
for k, v in namespace.items():
setattr(self, k, v)
| apache-2.0 |
cin/spark | examples/src/main/python/ml/onehot_encoder_example.py | 72 | 1605 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import OneHotEncoder, StringIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("OneHotEncoderExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
(0, "a"),
(1, "b"),
(2, "c"),
(3, "a"),
(4, "a"),
(5, "c")
], ["id", "category"])
stringIndexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
model = stringIndexer.fit(df)
indexed = model.transform(df)
encoder = OneHotEncoder(inputCol="categoryIndex", outputCol="categoryVec")
encoded = encoder.transform(indexed)
encoded.show()
# $example off$
spark.stop()
| apache-2.0 |
pombredanne/invenio | modules/websubmit/lib/websubmit_templates.py | 1 | 135737 | ## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import urllib
import cgi
import re
import operator
from invenio.config import CFG_SITE_URL, \
CFG_SITE_LANG
from invenio.messages import gettext_set_language
from invenio.dateutils import convert_datetext_to_dategui, convert_datestruct_to_dategui
from invenio.urlutils import create_html_link
from invenio.webmessage_mailutils import email_quoted_txt2html
from invenio.htmlutils import escape_html
from websubmit_config import \
CFG_WEBSUBMIT_CHECK_USER_LEAVES_SUBMISSION
class Template:
# Parameters allowed in the web interface for fetching files
files_default_urlargd = {
'version': (str, ""), # version "" means "latest"
'docname': (str, ""), # the docname (optional)
'format' : (str, ""), # the format
'verbose' : (int, 0), # the verbosity
'subformat' : (str, ""), # the subformat
}
def tmpl_submit_home_page(self, ln, catalogues):
"""
The content of the home page of the submit engine
Parameters:
- 'ln' *string* - The language to display the interface in
- 'catalogues' *string* - The HTML code for the catalogues list
"""
# load the right message language
_ = gettext_set_language(ln)
return """
<script type="text/javascript" language="Javascript1.2">
var allLoaded = 1;
</script>
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(document_types)s:</th>
</tr>
<tr>
<td class="portalboxbody">
<br />
%(please_select)s:
<br /><br />
<table width="100%%">
<tr>
<td width="50%%" class="narrowsearchboxbody">
%(catalogues)s
</td>
</tr>
</table>
</td>
</tr>
</table>""" % {
'document_types' : _("Document types available for submission"),
'please_select' : _("Please select the type of document you want to submit"),
'catalogues' : catalogues,
'ln' : ln,
}
def tmpl_submit_home_catalog_no_content(self, ln):
"""
The content of the home page of submit in case no doctypes are available
Parameters:
- 'ln' *string* - The language to display the interface in
"""
# load the right message language
_ = gettext_set_language(ln)
out = "<h3>" + _("No document types available.") + "</h3>\n"
return out
def tmpl_submit_home_catalogs(self, ln, catalogs):
"""
Produces the catalogs' list HTML code
Parameters:
- 'ln' *string* - The language to display the interface in
- 'catalogs' *array* - The catalogs of documents, each one a hash with the properties:
- 'id' - the internal id
- 'name' - the name
- 'sons' - sub-catalogs
- 'docs' - the contained document types, in the form:
- 'id' - the internal id
- 'name' - the name
There is at least one catalog
"""
# load the right message language
_ = gettext_set_language(ln)
# import pprint
# out = "<pre>" + pprint.pformat(catalogs)
out = ""
for catalog in catalogs:
out += "\n<ul>"
out += self.tmpl_submit_home_catalogs_sub(ln, catalog)
out += "\n</ul>\n"
return out
def tmpl_print_warning(self, msg, type, prologue, epilogue):
"""Prints warning message and flushes output.
Parameters:
- 'msg' *string* - The message string
- 'type' *string* - the warning type
- 'prologue' *string* - HTML code to display before the warning
- 'epilogue' *string* - HTML code to display after the warning
"""
out = '\n%s<span class="quicknote">' % (prologue)
if type:
out += '%s: ' % type
out += '%s</span>%s' % (msg, epilogue)
return out
def tmpl_submit_home_catalogs_sub(self, ln, catalog):
"""
Recursive function that produces a catalog's HTML display
Parameters:
- 'ln' *string* - The language to display the interface in
- 'catalog' *array* - A catalog of documents, with the properties:
- 'id' - the internal id
- 'name' - the name
- 'sons' - sub-catalogs
- 'docs' - the contained document types, in the form:
- 'id' - the internal id
- 'name' - the name
"""
# load the right message language
_ = gettext_set_language(ln)
if catalog['level'] == 1:
out = "<li><font size=\"+1\"><strong>%s</strong></font>\n" % catalog['name']
else:
if catalog['level'] == 2:
out = "<li>%s\n" % cgi.escape(catalog['name'])
else:
if catalog['level'] > 2:
out = "<li>%s\n" % cgi.escape(catalog['name'])
if len(catalog['docs']) or len(catalog['sons']):
out += "<ul>\n"
if len(catalog['docs']) != 0:
for row in catalog['docs']:
out += self.tmpl_submit_home_catalogs_doctype(ln, row)
if len(catalog['sons']) != 0:
for row in catalog['sons']:
out += self.tmpl_submit_home_catalogs_sub(ln, row)
if len(catalog['docs']) or len(catalog['sons']):
out += "</ul></li>"
else:
out += "</li>"
return out
def tmpl_submit_home_catalogs_doctype(self, ln, doc):
"""
Recursive function that produces a catalog's HTML display
Parameters:
- 'ln' *string* - The language to display the interface in
- 'doc' *array* - A catalog of documents, with the properties:
- 'id' - the internal id
- 'name' - the name
"""
# load the right message language
_ = gettext_set_language(ln)
return """<li>%s</li>""" % create_html_link('%s/submit' % CFG_SITE_URL, {'doctype' : doc['id'], 'ln' : ln}, doc['name'])
def tmpl_action_page(self, ln, uid, guest, pid, now, doctype,
description, docfulldesc, snameCateg,
lnameCateg, actionShortDesc, indir,
statustext):
"""
Recursive function that produces a catalog's HTML display
Parameters:
- 'ln' *string* - The language to display the interface in
- 'guest' *boolean* - If the user is logged in or not
- 'pid' *string* - The current process id
- 'now' *string* - The current time (security control features)
- 'doctype' *string* - The selected doctype
- 'description' *string* - The description of the doctype
- 'docfulldesc' *string* - The title text of the page
- 'snameCateg' *array* - The short names of all the categories of documents
- 'lnameCateg' *array* - The long names of all the categories of documents
- 'actionShortDesc' *array* - The short names (codes) for the different actions
- 'indir' *array* - The directories for each of the actions
- 'statustext' *array* - The names of the different action buttons
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
out += """
<script language="JavaScript" type="text/javascript">
var checked = 0;
function tester() {
"""
if (guest):
out += "alert(\"%(please_login_js)s\");return false;\n" % {
'please_login_js' : _("Please log in first.") + '\\n' + _("Use the top-right menu to log in.")
}
out += """
if (checked == 0) {
alert ("%(select_cat)s");
return false;
} else {
return true;
}
}
function clicked() {
checked=1;
}
function selectdoctype(nb) {
document.forms[0].act.value = docname[nb];
}
</script>
<form method="get" action="/submit">
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="indir" />
<input type="hidden" name="access" value="%(now)i_%(pid)s" />
<input type="hidden" name="act" />
<input type="hidden" name="startPg" value="1" />
<input type="hidden" name="mainmenu" value="/submit?doctype=%(doctype)s&ln=%(ln)s" />
<input type="hidden" name="ln" value="%(ln)s" />
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(docfulldesc)s</th>
</tr>
<tr>
<td class="portalboxbody">%(description)s
<br />
<script language="JavaScript" type="text/javascript">
var nbimg = document.images.length + 1;
</script>
<br />
<table align="center" cellpadding="0" cellspacing="0" border="0">
<tr valign="top">
""" % {
'select_cat' : _("Please select a category"),
'doctype' : doctype,
'now' : now,
'pid' : pid,
'docfulldesc' : docfulldesc,
'description' : description,
'ln' : ln,
}
if len(snameCateg) :
out += """<td align="right">"""
for i in range(0, len(snameCateg)):
out += """<label for="combo%(shortname)s">%(longname)s</label><input type="radio" name="combo%(doctype)s" id="combo%(shortname)s" value="%(shortname)s" onclick="clicked();" /> <br />""" % {
'longname' : lnameCateg[i],
'doctype' : doctype,
'shortname' : snameCateg[i],
}
out += "</td><td>"
else:
out += '<td><script type="text/javascript">checked=1;</script>'
out += """ </td>
<td>
<table><tr><td>
"""
#display list of actions
for i in range(0, len(actionShortDesc)):
out += """<input type="submit" class="adminbutton" value="%(status)s" onclick="if (tester()) { document.forms[0].indir.value='%(indir)s';document.forms[0].act.value='%(act)s';document.forms[0].submit();}; return false;" /><br />""" % {
'status' : statustext[i],
'indir' : indir[i],
'act' : actionShortDesc[i]
}
out += """ </td></tr></table>
</td>
</tr>
</table>
<br />"""
if len(snameCateg) :
out += """<strong class="headline">%(notice)s:</strong><br />
%(select_cat)s""" % {
'notice' : _("Notice"),
'select_cat' : _("Select a category and then click on an action button."),
}
out += """
<br /><br />
</td>
</tr>
</table>
</form>
<form action="/submit"><hr />
<font color="black"><small>%(continue_explain)s</small></font>
<table border="0" bgcolor="#CCCCCC" width="100%%"><tr>
<td width="100%%">
<small>Access Number: <input size="15" name="AN" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="ln" value="%(ln)s" />
<input class="adminbutton" type="submit" value=" %(go)s " />
</small>
</td></tr>
</table>
<hr />
</form>
""" % {
'continue_explain' : _("To continue with a previously interrupted submission, enter an access number into the box below:"),
'doctype' : doctype,
'go' : _("GO"),
'ln' : ln,
}
return out
def tmpl_warning_message(self, ln, msg):
"""
Produces a warning message for the specified text
Parameters:
- 'ln' *string* - The language to display the interface in
- 'msg' *string* - The message to display
"""
# load the right message language
_ = gettext_set_language(ln)
return """<center><font color="red">%s</font></center>""" % msg
def tmpl_page_interface(self, ln, docname, actname, curpage, nbpages, nextPg, access, nbPg, doctype, act, fields, javascript, mainmenu):
"""
Produces a page with the specified fields (in the submit chain)
Parameters:
- 'ln' *string* - The language to display the interface in
- 'doctype' *string* - The document type
- 'docname' *string* - The document type name
- 'actname' *string* - The action name
- 'act' *string* - The action
- 'curpage' *int* - The current page of submitting engine
- 'nbpages' *int* - The total number of pages
- 'nextPg' *int* - The next page
- 'access' *string* - The submission number
- 'nbPg' *string* - ??
- 'fields' *array* - the fields to display in the page, with each record having the structure:
- 'fullDesc' *string* - the description of the field
- 'text' *string* - the HTML code of the field
- 'javascript' *string* - if the field has some associated javascript code
- 'type' *string* - the type of field (T, F, I, H, D, S, R)
- 'name' *string* - the name of the field
- 'rows' *string* - the number of rows for textareas
- 'cols' *string* - the number of columns for textareas
- 'val' *string* - the default value of the field
- 'size' *string* - the size for text fields
- 'maxlength' *string* - the maximum length for text fields
- 'htmlcode' *string* - the complete HTML code for user-defined fields
- 'typename' *string* - the long name of the type
- 'javascript' *string* - the javascript code to insert in the page
- 'mainmenu' *string* - the url of the main menu
"""
# load the right message language
_ = gettext_set_language(ln)
# top menu
out = """
<form method="post" action="/submit" enctype="multipart/form-data" onsubmit="return tester();" accept-charset="UTF-8">
<center><table cellspacing="0" cellpadding="0" border="0">
<tr>
<td class="submitHeader"><b>%(docname)s </b></td>
<td class="submitHeader"><small> %(actname)s </small></td>
<td valign="bottom">
<table cellspacing="0" cellpadding="0" border="0" width="100%%">
<tr><td class="submitEmptyPage"> </td>
""" % {
'docname' : docname,
'actname' : actname,
}
for i in range(1, nbpages+1):
if i == int(curpage):
out += """<td class="submitCurrentPage"><small> page: %s </small></td>""" % curpage
else:
out += """<td class="submitPage"><small> <a href='' onclick="if (tester2() == 1){document.forms[0].curpage.value=%s;user_must_confirm_before_leaving_page = false;document.forms[0].submit();return false;} else { return false; }">%s</a> </small></td>""" % (i, i)
out += """ <td class="submitEmptyPage">
</td></tr></table>
</td>
<td class="submitHeader" align="right"> <a href="" onclick="window.open('/submit/summary?doctype=%(doctype)s&act=%(act)s&access=%(access)s&ln=%(ln)s','summary','scrollbars=yes,menubar=no,width=500,height=250');return false;"><font color="white"><small>%(summary)s(2)</small></font></a> </td>
</tr>
<tr><td colspan="5" class="submitHeader">
<table border="0" cellspacing="0" cellpadding="15" width="100%%" class="submitBody"><tr><td>
<br />
<input type="hidden" name="nextPg" value="%(nextPg)s" />
<input type="hidden" name="access" value="%(access)s" />
<input type="hidden" name="curpage" value="%(curpage)s" />
<input type="hidden" name="nbPg" value="%(nbPg)s" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="act" value="%(act)s" />
<input type="hidden" name="mode" value="U" />
<input type="hidden" name="step" value="0" />
<input type="hidden" name="ln" value="%(ln)s" />
""" % {
'summary' : _("SUMMARY"),
'doctype' : cgi.escape(doctype),
'act' : cgi.escape(act),
'access' : cgi.escape(access),
'nextPg' : cgi.escape(nextPg),
'curpage' : cgi.escape(curpage),
'nbPg' : cgi.escape(nbPg),
'ln' : cgi.escape(ln),
}
for field in fields:
if field['javascript']:
out += """<script language="JavaScript1.1" type="text/javascript">
%s
</script>
""" % field['javascript']
# now displays the html form field(s)
out += "%s\n%s\n" % (field['fullDesc'], field['text'])
out += javascript
out += "<br /> <br /> </td></tr></table></td></tr>\n"
# Display the navigation cell
# Display "previous page" navigation arrows
out += """<tr><td colspan="5"><table border="0" cellpadding="0" cellspacing="0" width="100%%"><tr>"""
if int(curpage) != 1:
out += """ <td class="submitHeader" align="left">
<a href='' onclick="if (tester2() == 1) {document.forms[0].curpage.value=%(prpage)s;user_must_confirm_before_leaving_page = false;document.forms[0].submit();return false;} else { return false; }">
<img src="%(images)s/left-trans.gif" alt="%(prevpage)s" border="0" />
<strong><font color="white">%(prevpage)s</font></strong>
</a>
</td>
""" % {
'prpage' : int(curpage) - 1,
'images' : CFG_SITE_URL + '/img',
'prevpage' : _("Previous page"),
}
else:
out += """ <td class="submitHeader"> </td>"""
# Display the submission number
out += """ <td class="submitHeader" align="center"><small>%(submission)s: %(access)s</small></td>\n""" % {
'submission' : _("Submission number") + '(1)',
'access' : cgi.escape(access),
}
# Display the "next page" navigation arrow
if int(curpage) != int(nbpages):
out += """ <td class="submitHeader" align="right">
<a href='' onclick="if (tester2()){document.forms[0].curpage.value=%(nxpage)s;user_must_confirm_before_leaving_page = false;document.forms[0].submit();return false;} else {return false;}; return false;">
<strong><font color="white">%(nextpage)s</font></strong>
<img src="%(images)s/right-trans.gif" alt="%(nextpage)s" border="0" />
</a>
</td>
""" % {
'nxpage' : int(curpage) + 1,
'images' : CFG_SITE_URL + '/img',
'nextpage' : _("Next page"),
}
else:
out += """ <td class="submitHeader"> </td>"""
out += """</tr></table></td></tr></table></center></form>
<br />
<br />
<a href="%(mainmenu)s" onclick="if (%(check_not_already_enabled)s){return confirm('%(surequit)s')}">
<img src="%(images)s/mainmenu.gif" border="0" alt="%(back)s" align="right" /></a>
<br /><br />
<hr />
<small>%(take_note)s</small><br />
<small>%(explain_summary)s</small><br />
""" % {
'surequit' : _("Are you sure you want to quit this submission?"),
'check_not_already_enabled': CFG_WEBSUBMIT_CHECK_USER_LEAVES_SUBMISSION and 'false' or 'true',
'back' : _("Back to main menu"),
'mainmenu' : cgi.escape(mainmenu),
'images' : CFG_SITE_URL + '/img',
'take_note' : '(1) ' + _("This is your submission access number. It can be used to continue with an interrupted submission in case of problems."),
'explain_summary' : '(2) ' + _("Mandatory fields appear in red in the SUMMARY window."),
}
return out
def tmpl_submit_field(self, ln, field):
"""
Produces the HTML code for the specified field
Parameters:
- 'ln' *string* - The language to display the interface in
- 'field' *array* - the field to display in the page, with the following structure:
- 'javascript' *string* - if the field has some associated javascript code
- 'type' *string* - the type of field (T, F, I, H, D, S, R)
- 'name' *string* - the name of the field
- 'rows' *string* - the number of rows for textareas
- 'cols' *string* - the number of columns for textareas
- 'val' *string* - the default value of the field
- 'size' *string* - the size for text fields
- 'maxlength' *string* - the maximum length for text fields
- 'htmlcode' *string* - the complete HTML code for user-defined fields
- 'typename' *string* - the long name of the type
"""
# load the right message language
_ = gettext_set_language(ln)
# If the field is a textarea
if field['type'] == 'T':
## Field is a textarea:
text = "<textarea name=\"%s\" rows=\"%s\" cols=\"%s\">%s</textarea>" \
% (field['name'], field['rows'], field['cols'], cgi.escape(str(field['val']), 1))
# If the field is a file upload
elif field['type'] == 'F':
## the field is a file input:
text = """<input type="file" name="%s" size="%s"%s />""" \
% (field['name'], field['size'], "%s" \
% ((field['maxlength'] in (0, None) and " ") or (""" maxlength="%s\"""" % field['maxlength'])) )
# If the field is a text input
elif field['type'] == 'I':
## Field is a text input:
text = """<input type="text" name="%s" size="%s" value="%s"%s />""" \
% (field['name'], field['size'], field['val'], "%s" \
% ((field['maxlength'] in (0, None) and " ") or (""" maxlength="%s\"""" % field['maxlength'])) )
# If the field is a hidden input
elif field['type'] == 'H':
text = "<input type=\"hidden\" name=\"%s\" value=\"%s\" />" % (field['name'], field['val'])
# If the field is user-defined
elif field['type'] == 'D':
text = field['htmlcode']
# If the field is a select box
elif field['type'] == 'S':
text = field['htmlcode']
# If the field type is not recognized
else:
text = "%s: unknown field type" % field['typename']
return text
def tmpl_page_interface_js(self, ln, upload, field, fieldhtml, txt, check, level, curdir, values, select, radio, curpage, nbpages, returnto):
"""
Produces the javascript for validation and value filling for a submit interface page
Parameters:
- 'ln' *string* - The language to display the interface in
- 'upload' *array* - booleans if the field is a <input type="file"> field
- 'field' *array* - the fields' names
- 'fieldhtml' *array* - the fields' HTML representation
- 'txt' *array* - the fields' long name
- 'check' *array* - if the fields should be checked (in javascript)
- 'level' *array* - strings, if the fields should be filled (M) or not (O)
- 'curdir' *array* - the current directory of the submission
- 'values' *array* - the current values of the fields
- 'select' *array* - booleans, if the controls are "select" controls
- 'radio' *array* - booleans, if the controls are "radio" controls
- 'curpage' *int* - the current page
- 'nbpages' *int* - the total number of pages
- 'returnto' *array* - a structure with 'field' and 'page', if a mandatory field on antoher page was not completed
"""
# load the right message language
_ = gettext_set_language(ln)
nbFields = len(upload)
# if there is a file upload field, we change the encoding type
out = """<script language="JavaScript1.1" type="text/javascript">
"""
for i in range(0,nbFields):
if upload[i] == 1:
out += "document.forms[0].encoding = \"multipart/form-data\";\n"
break
# we don't want the form to be submitted if the user enters 'Return'
# tests if mandatory fields are well filled
out += """function tester(){
return false;
}
function tester2() {
"""
for i in range(0,nbFields):
if re.search("%s\[\]" % field[i],fieldhtml[i]):
fieldname = "%s[]" % field[i]
else:
fieldname = field[i]
out += " el = document.forms[0].elements['%s'];\n" % fieldname
# If the field must be checked we call the checking function
if check[i] != "":
out += """if (%(check)s(el.value) == 0) {
el.focus();
return 0;
} """ % {
'check' : check[i]
}
# If the field is mandatory, we check a value has been selected
if level[i] == 'M':
if select[i] != 0:
# If the field is a select box
out += """if ((el.selectedIndex == -1)||(el.selectedIndex == 0)){
alert("%(field_mandatory)s");
return 0;
} """ % {
'field_mandatory' : _("The field %s is mandatory.") % txt[i] + '\\n' + _("Please make a choice in the select box")
}
elif radio[i] != 0:
# If the field is a radio buttonset
out += """var check=0;
for (var j = 0; j < el.length; j++) {
if (el.options[j].checked){
check++;
}
}
if (check == 0) {
alert("%(press_button)s");
return 0;
}""" % {
'press_button':_("Please press a button.")
}
else:
# If the field is a text input
out += """if (el.value == '') {
alert("%(field_mandatory)s");
return 0;
}""" % {
'field_mandatory' : _("The field %s is mandatory. Please fill it in.") % txt[i]
}
out += """ return 1;
}
<!-- Fill the fields in with the previous saved values-->
"""
# # # # # # # # # # # # # # # # # # # # # # # # #
# Fill the fields with the previously saved values
# # # # # # # # # # # # # # # # # # # # # # # # #
for i in range(0,nbFields):
if re.search("%s\[\]"%field[i],fieldhtml[i]):
fieldname = "%s[]" % field[i]
else:
fieldname = field[i]
text = values[i]
if text != '':
if select[i] != 0:
# If the field is a SELECT element
vals = text.split("\n")
tmp=""
for val in vals:
if tmp != "":
tmp = tmp + " || "
tmp = tmp + "el.options[j].value == \"%s\" || el.options[j].text == \"%s\"" % (val,val)
if tmp != "":
out += """
<!--SELECT field found-->
el = document.forms[0].elements['%(fieldname)s'];
for (var j = 0; j < el.length; j++){
if (%(tmp)s){
el.options[j].selected = true;
}
}""" % {
'fieldname' : fieldname,
'tmp' : tmp,
}
elif radio[i] != 0:
# If the field is a RADIO element
out += """<!--RADIO field found-->
el = document.forms[0].elements['%(fieldname)s'];
if (el.value == "%(text)s"){
el.checked=true;
}""" % {
'fieldname' : fieldname,
'text' : cgi.escape(str(text)).replace('"', '\\"'),
}
elif upload[i] == 0:
text = text.replace('"','\"')
text = text.replace("\n","\\n")
# If the field is not an upload element
out += """<!--input field found-->
el = document.forms[0].elements['%(fieldname)s'];
el.value="%(text)s";
""" % {
'fieldname' : fieldname,
'text' : cgi.escape(str(text)).replace('"', '\\"'),
}
out += """<!--End Fill in section-->
"""
# JS function finish
# This function tests each mandatory field in the whole submission and checks whether
# the field has been correctly filled in or not
# This function is called when the user presses the "End
# Submission" button
if int(curpage) == int(nbpages):
out += """function finish() {
"""
if returnto:
out += """alert ("%(msg)s");
document.forms[0].curpage.value="%(page)s";
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
}
""" % {
'msg' : _("The field %(field)s is mandatory.") + '\n' \
+ _("Going back to page") \
+ str(returnto['page']),
'page' : returnto['page']
}
else:
out += """ if (tester2()) {
document.forms[0].action="/submit";
document.forms[0].step.value=1;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
} else {
return false;
}
}"""
out += """</script>"""
return out
def tmpl_page_do_not_leave_submission_js(self, ln, enabled=CFG_WEBSUBMIT_CHECK_USER_LEAVES_SUBMISSION):
"""
Code to ask user confirmation when leaving the page, so that the
submission is not interrupted by mistake.
All submission functions should set the Javascript variable
'user_must_confirm_before_leaving_page' to 'false' before
programmatically submitting the submission form.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'enabled' *bool* - If the check applies or not
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''
<script language="JavaScript">
var user_must_confirm_before_leaving_page = %s;
window.onbeforeunload = confirmExit;
function confirmExit() {
if (user_must_confirm_before_leaving_page)
return "%s";
}
</script>
''' % (enabled and 'true' or 'false',
_('Your modifications will not be saved.').replace('"', '\\"'))
return out
def tmpl_page_endaction(self, ln, nextPg, startPg, access, curpage, nbPg, nbpages, doctype, act, docname, actname, mainmenu, finished, function_content, next_action):
"""
Produces the pages after all the fields have been submitted.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'doctype' *string* - The document type
- 'act' *string* - The action
- 'docname' *string* - The document type name
- 'actname' *string* - The action name
- 'curpage' *int* - The current page of submitting engine
- 'startPg' *int* - The start page
- 'nextPg' *int* - The next page
- 'access' *string* - The submission number
- 'nbPg' *string* - total number of pages
- 'nbpages' *string* - number of pages (?)
- 'mainmenu' *string* - the url of the main menu
- 'finished' *bool* - if the submission is finished
- 'function_content' *string* - HTML code produced by some function executed
- 'next_action' *string* - if there is another action to be completed, the HTML code for linking to it
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<form ENCTYPE="multipart/form-data" action="/submit" onsubmit="user_must_confirm_before_leaving_page=false;" method="post" accept-charset="UTF-8">
<input type="hidden" name="nextPg" value="%(nextPg)s" />
<input type="hidden" name="startPg" value="%(startPg)s" />
<input type="hidden" name="access" value="%(access)s" />
<input type="hidden" name="curpage" value="%(curpage)s" />
<input type="hidden" name="nbPg" value="%(nbPg)s" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="act" value="%(act)s" />
<input type="hidden" name="fromdir" value="" />
<input type="hidden" name="mainmenu" value="%(mainmenu)s" />
<input type="hidden" name="mode" value="U" />
<input type="hidden" name="step" value="1" />
<input type="hidden" name="deleted" value="no" />
<input type="hidden" name="file_path" value="" />
<input type="hidden" name="userfile_name" value="" />
<input type="hidden" name="ln" value="%(ln)s" />
<center><table cellspacing="0" cellpadding="0" border="0"><tr>
<td class="submitHeader"><b>%(docname)s </b></td>
<td class="submitHeader"><small> %(actname)s </small></td>
<td valign="bottom">
<table cellspacing="0" cellpadding="0" border="0" width="100%%">
<tr><td class="submitEmptyPage"> </td>
""" % {
'nextPg' : cgi.escape(nextPg),
'startPg' : cgi.escape(startPg),
'access' : cgi.escape(access),
'curpage' : cgi.escape(curpage),
'nbPg' : cgi.escape(nbPg),
'doctype' : cgi.escape(doctype),
'act' : cgi.escape(act),
'docname' : docname,
'actname' : actname,
'mainmenu' : cgi.escape(mainmenu),
'ln' : cgi.escape(ln),
}
if finished == 1:
out += """<td class="submitCurrentPage">%(finished)s</td>
<td class="submitEmptyPage"> </td>
</tr></table>
</td>
<td class="submitEmptyPage" align="right"> </td>
""" % {
'finished' : _("finished!"),
}
else:
for i in range(1, nbpages + 1):
out += """<td class="submitPage"><small>
<a href='' onclick="document.forms[0].curpage.value=%s;document.forms[0].action='/submit';document.forms[0].step.value=0;user_must_confirm_before_leaving_page = false;document.forms[0].submit();return false;">%s</a> </small></td>""" % (i,i)
out += """<td class="submitCurrentPage">%(end_action)s</td><td class="submitEmptyPage"> </td></tr></table></td>
<td class="submitHeader" align="right"> <a href='' onclick="window.open('/submit/summary?doctype=%(doctype)s&act=%(act)s&access=%(access)s&ln=%(ln)s','summary','scrollbars=yes,menubar=no,width=500,height=250');return false;"><font color="white"><small>%(summary)s(2)</small></font></a> </td>""" % {
'end_action' : _("end of action"),
'summary' : _("SUMMARY"),
'doctype' : cgi.escape(doctype),
'act' : cgi.escape(act),
'access' : cgi.escape(access),
'ln' : cgi.escape(ln),
}
out += """</tr>
<tr>
<td colspan="5" class="submitBody">
<small><br /><br />
%(function_content)s
%(next_action)s
<br /><br />
</td>
</tr>
<tr class="submitHeader">
<td class="submitHeader" colspan="5" align="center">""" % {
'function_content' : function_content,
'next_action' : next_action,
}
if finished == 0:
out += """<small>%(submission)s</small>²:
<small>%(access)s</small>""" % {
'submission' : _("Submission no"),
'access' : cgi.escape(access),
}
else:
out += " \n"
out += """
</td>
</tr>
</table>
</center>
</form>
<br />
<br />"""
# Add the "back to main menu" button
if finished == 0:
out += """ <a href="%(mainmenu)s" onclick="if (%(check_not_already_enabled)s){return confirm('%(surequit)s')}">
<img src="%(images)s/mainmenu.gif" border="0" alt="%(back)s" align="right" /></a>
<br /><br />""" % {
'surequit' : _("Are you sure you want to quit this submission?"),
'back' : _("Back to main menu"),
'images' : CFG_SITE_URL + '/img',
'mainmenu' : cgi.escape(mainmenu),
'check_not_already_enabled': CFG_WEBSUBMIT_CHECK_USER_LEAVES_SUBMISSION and 'false' or 'true',
}
else:
out += """ <a href="%(mainmenu)s">
<img src="%(images)s/mainmenu.gif" border="0" alt="%(back)s" align="right" /></a>
<br /><br />""" % {
'back' : _("Back to main menu"),
'images' : CFG_SITE_URL + '/img',
'mainmenu' : cgi.escape(mainmenu),
}
return out
def tmpl_function_output(self, ln, display_on, action, doctype, step, functions):
"""
Produces the output of the functions.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'display_on' *bool* - If debug information should be displayed
- 'doctype' *string* - The document type
- 'action' *string* - The action
- 'step' *int* - The current step in submission
- 'functions' *aray* - HTML code produced by functions executed and informations about the functions
- 'name' *string* - the name of the function
- 'score' *string* - the score of the function
- 'error' *bool* - if the function execution produced errors
- 'text' *string* - the HTML code produced by the function
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
if display_on:
out += """<br /><br />%(function_list)s<P>
<table border="1" cellpadding="15">
<tr><th>%(function)s</th><th>%(score)s</th><th>%(running)s</th></tr>
""" % {
'function_list' : _("Here is the %(x_action)s function list for %(x_doctype)s documents at level %(x_step)s") % {
'x_action' : action,
'x_doctype' : doctype,
'x_step' : step,
},
'function' : _("Function"),
'score' : _("Score"),
'running' : _("Running function"),
}
for function in functions:
out += """<tr><td>%(name)s</td><td>%(score)s</td><td>%(result)s</td></tr>""" % {
'name' : function['name'],
'score' : function['score'],
'result' : function['error'] and (_("Function %s does not exist.") % function['name'] + "<br />") or function['text']
}
out += "</table>"
else:
for function in functions:
if not function['error']:
out += function['text']
return out
def tmpl_next_action(self, ln, actions):
"""
Produces the output of the functions.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'actions' *array* - The actions to display, in the structure
- 'page' *string* - the starting page
- 'action' *string* - the action (in terms of submission)
- 'doctype' *string* - the doctype
- 'nextdir' *string* - the path to the submission data
- 'access' *string* - the submission number
- 'indir' *string* - ??
- 'name' *string* - the name of the action
"""
# load the right message language
_ = gettext_set_language(ln)
out = "<br /><br />%(haveto)s<ul>" % {
'haveto' : _("You must now"),
}
i = 0
for action in actions:
if i > 0:
out += " <b>" + _("or") + "</b> "
i += 1
out += """<li><a href="" onclick="document.forms[0].action='/submit';document.forms[0].curpage.value='%(page)s';document.forms[0].startPg.value='%(page)s';document.forms[0].act.value='%(action)s';document.forms[0].doctype.value='%(doctype)s';document.forms[0].indir.value='%(nextdir)s';document.forms[0].access.value='%(access)s';document.forms[0].fromdir.value='%(indir)s';user_must_confirm_before_leaving_page = falsedocument.forms[0].submit();return false;"> %(name)s </a></li>""" % action
out += "</ul>"
return out
def tmpl_filelist(self, ln, filelist='', recid='', docname='', version=''):
"""
Displays the file list for a record.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'recid' *int* - The record id
- 'docname' *string* - The document name
- 'version' *int* - The version of the document
- 'filelist' *string* - The HTML string of the filelist (produced by the BibDoc classes)
"""
# load the right message language
_ = gettext_set_language(ln)
title = _("record") + ' #' + '<a href="%s/record/%s">%s</a>' % (CFG_SITE_URL, recid, recid)
if docname != "":
title += ' ' + _("document") + ' #' + str(docname)
if version != "":
title += ' ' + _("version") + ' #' + str(version)
out = """<div style="width:90%%;margin:auto;min-height:100px;margin-top:10px">
<!--start file list-->
%s
<!--end file list--></div>
""" % (filelist)
return out
def tmpl_bibrecdoc_filelist(self, ln, types, verbose_files=''):
"""
Displays the file list for a record.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'types' *array* - The different types to display, each record in the format:
- 'name' *string* - The name of the format
- 'content' *array of string* - The HTML code produced by tmpl_bibdoc_filelist, for the right files
- 'verbose_files' - A string representing in a verbose way the
file information.
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
for mytype in types:
out += "<small><b>%s</b> %s:</small>" % (mytype['name'], _("file(s)"))
out += "<ul>"
for content in mytype['content']:
out += content
out += "</ul>"
if verbose_files:
out += "<pre>%s</pre>" % verbose_files
return out
def tmpl_bibdoc_filelist(self, ln, versions=[], imageurl='', recid='', docname=''):
"""
Displays the file list for a record.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'versions' *array* - The different versions to display, each record in the format:
- 'version' *string* - The version
- 'content' *string* - The HTML code produced by tmpl_bibdocfile_filelist, for the right file
- 'previous' *bool* - If the file has previous versions
- 'imageurl' *string* - The URL to the file image
- 'recid' *int* - The record id
- 'docname' *string* - The name of the document
"""
# load the right message language
_ = gettext_set_language(ln)
out = """<table border="0" cellspacing="1" class="searchbox">
<tr>
<td align="left" colspan="2" class="portalboxheader">
<img src='%(imageurl)s' border="0" /> %(docname)s
</td>
</tr>""" % {
'imageurl' : imageurl,
'docname' : docname
}
for version in versions:
if version['previous']:
versiontext = """<br />(%(see)s <a href="%(siteurl)s/record/%(recID)s/files/?docname=%(docname)s&version=all%(ln_link)s">%(previous)s</a>)""" % {
'see' : _("see"),
'siteurl' : CFG_SITE_URL,
'docname' : urllib.quote(docname),
'recID': recid,
'previous': _("previous"),
'ln_link': (ln != CFG_SITE_LANG and '&ln=' + ln) or '',
}
else:
versiontext = ""
out += """<tr>
<td class="portalboxheader">
<font size="-2">%(version)s %(ver)s%(text)s</font>
</td>
<td>
<table>
""" % {
'version' : _("version"),
'ver' : version['version'],
'text' : versiontext,
}
for content in version['content']:
out += content
out += "</table></td></tr>"
out += "</table>"
return out
def tmpl_bibdocfile_filelist(self, ln, recid, name, version, md, superformat, subformat, nice_size, description):
"""
Displays a file in the file list.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'recid' *int* - The id of the record
- 'name' *string* - The name of the file
- 'version' *string* - The version
- 'md' *datetime* - the modification date
- 'superformat' *string* - The display superformat
- 'subformat' *string* - The display subformat
- 'nice_size' *string* - The nice_size of the file
- 'description' *string* - The description that might have been associated
to the particular file
"""
# load the right message language
_ = gettext_set_language(ln)
urlbase = '%s/record/%s/files/%s' % (
CFG_SITE_URL,
recid,
'%s%s' % (name, superformat))
urlargd = {'version' : version}
if subformat:
urlargd['subformat'] = subformat
link_label = '%s%s' % (name, superformat)
if subformat:
link_label += ' (%s)' % subformat
link = create_html_link(urlbase, urlargd, cgi.escape(link_label))
return """<tr>
<td valign="top">
<small>%(link)s</small>
</td>
<td valign="top">
<font size="-2" color="green">[%(nice_size)s]</font>
<font size="-2"><em>%(md)s</em>
</td>
<td valign="top"><em>%(description)s</em></td>
</tr>""" % {
'link' : link,
'nice_size' : nice_size,
'md' : convert_datestruct_to_dategui(md.timetuple(), ln),
'description' : cgi.escape(description),
}
def tmpl_submit_summary (self, ln, values):
"""
Displays the summary for the submit procedure.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'values' *array* - The values of submit. Each of the records contain the following fields:
- 'name' *string* - The name of the field
- 'mandatory' *bool* - If the field is mandatory or not
- 'value' *string* - The inserted value
- 'page' *int* - The submit page on which the field is entered
"""
# load the right message language
_ = gettext_set_language(ln)
out = """<body style="background-image: url(%(images)s/header_background.gif);"><table border="0">""" % \
{ 'images' : CFG_SITE_URL + '/img' }
for value in values:
if value['mandatory']:
color = "red"
else:
color = ""
out += """<tr>
<td align="right">
<small>
<a href='' onclick="window.opener.document.forms[0].curpage.value='%(page)s';window.opener.document.forms[0].action='/submit?ln=%(ln)s';window.opener.document.forms[0].submit();return false;">
<font color="%(color)s">%(name)s</font>
</a>
</small>
</td>
<td>
<i><small><font color="black">%(value)s</font></small></i>
</td>
</tr>""" % {
'color' : color,
'name' : value['name'],
'value' : value['value'],
'page' : value['page'],
'ln' : ln
}
out += "</table>"
return out
def tmpl_yoursubmissions(self, ln, order, doctypes, submissions):
"""
Displays the list of the user's submissions.
Parameters:
- 'ln' *string* - The language to display the interface in
- 'order' *string* - The ordering parameter
- 'doctypes' *array* - All the available doctypes, in structures:
- 'id' *string* - The doctype id
- 'name' *string* - The display name of the doctype
- 'selected' *bool* - If the doctype should be selected
- 'submissions' *array* - The available submissions, in structures:
- 'docname' *string* - The document name
- 'actname' *string* - The action name
- 'status' *string* - The status of the document
- 'cdate' *string* - Creation date
- 'mdate' *string* - Modification date
- 'id' *string* - The id of the submission
- 'reference' *string* - The display name of the doctype
- 'pending' *bool* - If the submission is pending
- 'act' *string* - The action code
- 'doctype' *string* - The doctype code
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
out += """
<br />
<form action="">
<input type="hidden" value="%(order)s" name="order" />
<input type="hidden" name="deletedId" />
<input type="hidden" name="deletedDoctype" />
<input type="hidden" name="deletedAction" />
<input type="hidden" name="ln" value="%(ln)s"/>
<table class="searchbox" width="100%%" summary="" >
<tr>
<th class="portalboxheader">%(for)s
<select name="doctype" onchange="document.forms[0].submit();">
<option value="">%(alltype)s</option>
""" % {
'order' : order,
'for' : _("For"),
'alltype' : _("all types of document"),
'ln' : ln,
}
for doctype in doctypes:
out += """<option value="%(id)s" %(sel)s>%(name)s</option>""" % {
'id' : doctype['id'],
'name' : doctype['name'],
'sel' : doctype['selected'] and "selected=\"selected\"" or ""
}
out += """ </select>
</th>
</tr>
<tr>
<td class="portalboxbody">
<table>
<tr>
<td></td>
</tr>
"""
num = 0
docname = ""
for submission in submissions:
if submission['docname'] != docname:
docname = submission['docname']
out += """</table>
<br/> <br/><h3>%(docname)s</h3>
<table border="0" class="searchbox" align="left" width="100%%">
<tr>
<th class="headerselected">%(action)s
<a href='' onclick='document.forms[0].order.value="actiondown";document.forms[0].submit();return false;'><img src="%(images)s/smalldown.gif" alt="down" border="0" /></a>
<a href='' onclick='document.forms[0].order.value="actionup";document.forms[0].submit();return false;'><img src="%(images)s/smallup.gif" alt="up" border="0" /></a>
</th>
<th class="headerselected">%(status)s
<a href='' onclick='document.forms[0].order.value="statusdown";document.forms[0].submit();return false;'><img src="%(images)s/smalldown.gif" alt="down" border="0" /></a>
<a href='' onclick='document.forms[0].order.value="statusup";document.forms[0].submit();return false;'><img src="%(images)s/smallup.gif" alt="up" border="0" /></a>
</th>
<th class="headerselected">%(id)s</th>
<th class="headerselected">%(reference)s
<a href='' onclick='document.forms[0].order.value="refdown";document.forms[0].submit();return false;'><img src="%(images)s/smalldown.gif" alt="down" border="0" /></a>
<a href='' onclick='document.forms[0].order.value="refup";document.forms[0].submit();return false;'><img src="%(images)s/smallup.gif" alt="up" border="0" /></a>
</th>
<th class="headerselected">%(first)s
<a href='' onclick='document.forms[0].order.value="cddown";document.forms[0].submit();return false;'><img src="%(images)s/smalldown.gif" alt="down" border="0" /></a>
<a href='' onclick='document.forms[0].order.value="cdup";document.forms[0].submit();return false;'><img src="%(images)s/smallup.gif" alt="up" border="0" /></a>
</th>
<th class="headerselected">%(last)s
<a href='' onclick='document.forms[0].order.value="mddown";document.forms[0].submit();return false;'><img src="%(images)s/smalldown.gif" alt="down" border="0" /></a>
<a href='' onclick='document.forms[0].order.value="mdup";document.forms[0].submit();return false;'><img src="%(images)s/smallup.gif" alt="up" border="0" /></a>
</th>
</tr>
""" % {
'docname' : submission['docname'],
'action' : _("Action"),
'status' : _("Status"),
'id' : _("Subm.No."),
'reference' : _("Reference"),
'images' : CFG_SITE_URL + '/img',
'first' : _("First access"),
'last' : _("Last access"),
}
if submission['pending']:
idtext = """<a href="submit/direct?access=%(id)s&sub=%(action)s%(doctype)s%(ln_link)s">%(id)s</a>
<a onclick='if (confirm("%(sure)s")){document.forms[0].deletedId.value="%(id)s";document.forms[0].deletedDoctype.value="%(doctype)s";document.forms[0].deletedAction.value="%(action)s";document.forms[0].submit();return true;}else{return false;}' href=''><img src="%(images)s/smallbin.gif" border="0" alt='%(delete)s' /></a>
""" % {
'images' : CFG_SITE_URL + '/img',
'id' : submission['id'],
'action' : submission['act'],
'doctype' : submission['doctype'],
'sure' : _("Are you sure you want to delete this submission?"),
'delete' : _("Delete submission %(x_id)s in %(x_docname)s") % {
'x_id' : str(submission['id']),
'x_docname' : str(submission['docname'])
},
'ln_link': (ln != CFG_SITE_LANG and '&ln=' + ln) or ''
}
else:
idtext = submission['id']
if operator.mod(num,2) == 0:
color = "#e2e2e2"
else:
color = "#f0f0f0"
if submission['reference']:
reference = submission['reference']
if not submission['pending']:
# record was integrated, so propose link:
reference = create_html_link('%s/search' % CFG_SITE_URL, {
'ln' : ln,
'p' : submission['reference'],
'f' : 'reportnumber'
}, submission['reference'])
else:
reference = """<font color="red">%s</font>""" % _("Reference not yet given")
cdate = str(submission['cdate']).replace(" "," ")
mdate= str(submission['mdate']).replace(" "," ")
out += """
<tr bgcolor="%(color)s">
<td align="center" class="mycdscell">
%(actname)s
</td>
<td align="center" class="mycdscell">
%(status)s
</td>
<td class="mycdscell">
%(idtext)s
</td>
<td class="mycdscell">
%(reference)s
</td>
<td class="mycdscell">
%(cdate)s
</td>
<td class="mycdscell">
%(mdate)s
</td>
</tr>
""" % {
'color' : color,
'actname' : submission['actname'],
'status' : submission['status'],
'idtext' : idtext,
'reference' : reference,
'cdate' : cdate,
'mdate' : mdate,
}
num += 1
out += "</table></td></tr></table></form>"
return out
def tmpl_yourapprovals(self, ln, referees):
"""
Displays the doctypes and categories for which the user is referee
Parameters:
- 'ln' *string* - The language to display the interface in
- 'referees' *array* - All the doctypes for which the user is referee:
- 'doctype' *string* - The doctype
- 'docname' *string* - The display name of the doctype
- 'categories' *array* - The specific categories for which the user is referee:
- 'id' *string* - The category id
- 'name' *string* - The display name of the category
"""
# load the right message language
_ = gettext_set_language(ln)
out = """ <table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(refdocs)s</th>
</tr>
<tr>
<td class="portalboxbody">""" % {
'refdocs' : _("Refereed Documents"),
}
for doctype in referees:
out += """<ul><li><b>%(docname)s</b><ul>""" % doctype
if doctype ['categories'] is None:
out += '''<li><a href="publiline.py?doctype=%(doctype)s%(ln_link)s">%(generalref)s</a></li>''' % {
'docname' : doctype['docname'],
'doctype' : doctype['doctype'],
'generalref' : _("You are a general referee"),
'ln_link': '&ln=' + ln}
else:
for category in doctype['categories']:
out += """<li><a href="publiline.py?doctype=%(doctype)s&categ=%(categ)s%(ln_link)s">%(referee)s</a></li>""" % {
'referee' : _("You are a referee for category:") + ' ' + str(category['name']) + ' (' + str(category['id']) + ')',
'doctype' : doctype['doctype'],
'categ' : category['id'],
'ln_link': '&ln=' + ln}
out += "</ul><br /></li></ul>"
out += "</td></tr></table>"
out += '''<p>To see the status of documents for which approval has been requested, click <a href=\"%(url)s/publiline.py?flow=cplx\">here</a></p>''' % {'url' : CFG_SITE_URL}
return out
def tmpl_publiline_selectdoctype(self, ln, docs):
"""
Displays the doctypes that the user can select
Parameters:
- 'ln' *string* - The language to display the interface in
- 'docs' *array* - All the doctypes that the user can select:
- 'doctype' *string* - The doctype
- 'docname' *string* - The display name of the doctype
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(list)s</th>
</tr>
<tr>
<td class="portalboxbody">
%(select)s:
</small>
<blockquote>""" % {
'list' : _("List of refereed types of documents"),
'select' : _("Select one of the following types of documents to check the documents status"),
}
for doc in docs:
params = {'ln' : ln}
params.update(doc)
out += '<li><a href="publiline.py?doctype=%(doctype)s&ln=%(ln)s">%(docname)s</a></li><br />' % params
out += """</blockquote>
</td>
</tr>
</table>
<a href="publiline.py?flow=cplx&ln=%s">%s</a>""" % (ln, _("Go to specific approval workflow"))
return out
def tmpl_publiline_selectcplxdoctype(self, ln, docs):
"""
Displays the doctypes that the user can select in a complex workflow
Parameters:
- 'ln' *string* - The language to display the interface in
- 'docs' *array* - All the doctypes that the user can select:
- 'doctype' *string* - The doctype
- 'docname' *string* - The display name of the doctype
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(list)s</th>
</tr>
<tr>
<td class="portalboxbody">
%(select)s:
</small>
<blockquote>""" % {
'list' : _("List of refereed types of documents"),
'select' : _("Select one of the following types of documents to check the documents status"),
}
for doc in docs:
params = {'ln' : ln}
params.update(doc)
out += '<li><a href="publiline.py?flow=cplx&doctype=%(doctype)s&ln=%(ln)s">%(docname)s</a></li><br />' % params
out += """</blockquote> </td> </tr> </table> </li><br/>"""
return out
def tmpl_publiline_selectcateg(self, ln, doctype, title, categories):
"""
Displays the categories from a doctype that the user can select
Parameters:
- 'ln' *string* - The language to display the interface in
- 'doctype' *string* - The doctype
- 'title' *string* - The doctype name
- 'categories' *array* - All the categories that the user can select:
- 'id' *string* - The id of the category
- 'waiting' *int* - The number of documents waiting
- 'approved' *int* - The number of approved documents
- 'rejected' *int* - The number of rejected documents
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(title)s: %(list_categ)s</th>
</tr>
<tr>
<td class="portalboxbody">
%(choose_categ)s
<blockquote>
<form action="publiline.py" method="get">
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="" />
<input type="hidden" name="ln" value="%(ln)s" />
</form>
<table>
<tr>
<td align="left">""" % {
'title' : title,
'doctype' : doctype,
'list_categ' : _("List of refereed categories"),
'choose_categ' : _("Please choose a category"),
'ln' : ln,
}
for categ in categories:
num = categ['waiting'] + categ['approved'] + categ['rejected']
if categ['waiting'] != 0:
classtext = "class=\"blocknote\""
else:
classtext = ""
out += """<a href="" onclick="document.forms[0].categ.value='%(id)s';document.forms[0].submit();return false;"><small %(classtext)s>%(id)s</small></a><small> (%(num)s document(s)""" % {
'id' : categ['id'],
'classtext' : classtext,
'num' : num,
}
if categ['waiting'] != 0:
out += """| %(waiting)s <img alt="%(pending)s" src="%(images)s/waiting_or.gif" border="0" />""" % {
'waiting' : categ['waiting'],
'pending' : _("Pending"),
'images' : CFG_SITE_URL + '/img',
}
if categ['approved'] != 0:
out += """| %(approved)s<img alt="%(approved_text)s" src="%(images)s/smchk_gr.gif" border="0" />""" % {
'approved' : categ['approved'],
'approved_text' : _("Approved"),
'images' : CFG_SITE_URL + '/img',
}
if categ['rejected'] != 0:
out += """| %(rejected)s<img alt="%(rejected_text)s" src="%(images)s/cross_red.gif" border="0" />""" % {
'rejected' : categ['rejected'],
'rejected_text' : _("Rejected"),
'images' : CFG_SITE_URL + '/img',
}
out += ")</small><br />"
out += """
</td>
<td>
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(key)s:</th>
</tr>
<tr>
<td>
<img alt="%(pending)s" src="%(images)s/waiting_or.gif" border="0" /> %(waiting)s<br />
<img alt="%(approved)s" src="%(images)s/smchk_gr.gif" border="0" /> %(already_approved)s<br />
<img alt="%(rejected)s" src="%(images)s/cross_red.gif" border="0" /> %(rejected_text)s<br /><br />
<small class="blocknote"> </small> %(somepending)s<br />
</td>
</tr>
</table>
</td>
</tr>
</table>
</blockquote>
</td>
</tr>
</table>""" % {
'key' : _("Key"),
'pending' : _("Pending"),
'images' : CFG_SITE_URL + '/img',
'waiting' : _("Waiting for approval"),
'approved' : _("Approved"),
'already_approved' : _("Already approved"),
'rejected' : _("Rejected"),
'rejected_text' : _("Rejected"),
'somepending' : _("Some documents are pending."),
}
return out
def tmpl_publiline_selectcplxcateg(self, ln, doctype, title, types):
"""
Displays the categories from a doctype that the user can select
Parameters:
- 'ln' *string* - The language to display the interface in
- 'doctype' *string* - The doctype
- 'title' *string* - The doctype name
- 'categories' *array* - All the categories that the user can select:
- 'id' *string* - The id of the category
- 'waiting' *int* - The number of documents waiting
- 'approved' *int* - The number of approved documents
- 'rejected' *int* - The number of rejected documents
"""
# load the right message language
_ = gettext_set_language(ln)
out = ""
#out = """
# <table class="searchbox" width="100%%" summary="">
# <tr>
# <th class="portalboxheader">%(title)s: %(list_type)s</th>
# </tr>
# </table><br />
# <table class="searchbox" width="100%%" summary="">
# <tr>""" % {
# 'title' : title,
# 'list_type' : _("List of specific approvals"),
# }
columns = []
columns.append ({'apptype' : 'RRP',
'list_categ' : _("List of refereing categories"),
'id_form' : 0,
})
#columns.append ({'apptype' : 'RPB',
# 'list_categ' : _("List of publication categories"),
# 'id_form' : 1,
# })
#columns.append ({'apptype' : 'RDA',
# 'list_categ' : _("List of direct approval categories"),
# 'id_form' : 2,
# })
for column in columns:
out += """
<td>
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(list_categ)s</th>
</tr>
<tr>
<td class="portalboxbody">
%(choose_categ)s
<blockquote>
<form action="publiline.py" method="get">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="ln" value="%(ln)s" />
</form>
<table>
<tr>
<td align="left">""" % {
'doctype' : doctype,
'apptype' : column['apptype'],
'list_categ' : column['list_categ'],
'choose_categ' : _("Please choose a category"),
'ln' : ln,
}
for categ in types[column['apptype']]:
num = categ['waiting'] + categ['approved'] + categ['rejected'] + categ['cancelled']
if categ['waiting'] != 0:
classtext = "class=\"blocknote\""
else:
classtext = ""
out += """<table><tr><td width="200px">* <a href="" onclick="document.forms[%(id_form)s].categ.value='%(id)s';document.forms[%(id_form)s].submit();return false;"><small %(classtext)s>%(desc)s</small></td><td width="150px"></a><small> Total document(s) : %(num)s """ % {
'id' : categ['id'],
'id_form' : column['id_form'],
'classtext' : classtext,
'num' : num,
'desc' : categ['desc'],
}
out += """<td width="100px">"""
#if categ['waiting'] != 0:
out += """ %(waiting)s   <img alt="%(pending)s" src="%(images)s/waiting_or.gif" border="0" /></td>""" % {
'waiting' : categ['waiting'],
'pending' : _("Pending"),
'images' : CFG_SITE_URL + '/img',
}
out += """<td width="100px">"""
#if categ['approved'] != 0:
out += """ %(approved)s   <img alt="%(approved_text)s" src="%(images)s/smchk_gr.gif" border="0" /></td>""" % {
'approved' : categ['approved'],
'approved_text' : _("Approved"),
'images' : CFG_SITE_URL + '/img',
}
out += """<td width="100px">"""
#if categ['rejected'] != 0:
out += """ %(rejected)s  <img alt="%(rejected_text)s" src="%(images)s/cross_red.gif" border="0" /></td>""" % {
'rejected' : categ['rejected'],
'rejected_text' : _("Rejected"),
'images' : CFG_SITE_URL + '/img',
}
out += """<td width="100px">"""
#if categ['cancelled'] != 0:
out += """ %(cancelled)s  <img alt="%(cancelled_text)s" src="%(images)s/smchk_rd.gif" border="0" /></td>""" % {
'cancelled' : categ['cancelled'],
'cancelled_text' : _("Cancelled"),
'images' : CFG_SITE_URL + '/img',
}
out += "</small></td></tr>"
out += """
</table>
</td>
</tr>
</table>
</blockquote>
</td>
</tr>
</table>
</td>"""
# Key
out += """
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(key)s:</th>
</tr>
<tr>
<td>
<img alt="%(pending)s" src="%(images)s/waiting_or.gif" border="0" /> %(waiting)s<br />
<img alt="%(approved)s" src="%(images)s/smchk_gr.gif" border="0" /> %(already_approved)s<br />
<img alt="%(rejected)s" src="%(images)s/cross_red.gif" border="0" /> %(rejected_text)s<br />
<img alt="%(cancelled)s" src="%(images)s/smchk_rd.gif" border="0" /> %(cancelled_text)s<br /><br />
<small class="blocknote"> </small> %(somepending)s<br />
</td>
</tr>
</table>
</blockquote>
</td>
</tr>
</table>""" % {
'key' : _("Key"),
'pending' : _("Pending"),
'images' : CFG_SITE_URL + '/img',
'waiting' : _("Waiting for approval"),
'approved' : _("Approved"),
'already_approved' : _("Already approved"),
'rejected' : _("Rejected"),
'rejected_text' : _("Rejected"),
'cancelled' : _("Cancelled"),
'cancelled_text' : _("Cancelled"),
'somepending' : _("Some documents are pending."),
}
return out
def tmpl_publiline_selectdocument(self, ln, doctype, title, categ, docs):
"""
Displays the documents that the user can select in the specified category
Parameters:
- 'ln' *string* - The language to display the interface in
- 'doctype' *string* - The doctype
- 'title' *string* - The doctype name
- 'categ' *string* - the category
- 'docs' *array* - All the categories that the user can select:
- 'RN' *string* - The id of the document
- 'status' *string* - The status of the document
"""
# load the right message language
_ = gettext_set_language(ln)
out = """
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(title)s - %(categ)s: %(list)s</th>
</tr>
<tr>
<td class="portalboxbody">
%(choose_report)s
<blockquote>
<form action="publiline.py" method="get">
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="" />
<input type="hidden" name="ln" value="%(ln)s">
</form>
<table class="searchbox">
<tr>
<th class="portalboxheader">%(report_no)s</th>
<th class="portalboxheader">%(pending)s</th>
<th class="portalboxheader">%(approved)s</th>
<th class="portalboxheader">%(rejected)s</th>
</tr>
""" % {
'doctype' : doctype,
'title' : title,
'categ' : categ,
'list' : _("List of refereed documents"),
'choose_report' : _("Click on a report number for more information."),
'report_no' : _("Report Number"),
'pending' : _("Pending"),
'approved' : _("Approved"),
'rejected' : _("Rejected"),
'ln': ln,
}
for doc in docs:
status = doc ['status']
if status == "waiting":
out += """<tr>
<td align="center">
<a href="" onclick="document.forms[0].RN.value='%(rn)s';document.forms[0].submit();return false;">%(rn)s</a>
</td>
<td align="center">
<img alt="check" src="%(images)s/waiting_or.gif" />
</td>
<td align="center"> </td>
<td align="center"> </td>
</tr>
""" % {
'rn' : doc['RN'],
'images' : CFG_SITE_URL + '/img',
}
elif status == "rejected":
out += """<tr>
<td align="center">
<a href="" onclick="document.forms[0].RN.value='%(rn)s';document.forms[0].submit();return false;">%(rn)s</a>
</td>
<td align="center"> </td>
<td align="center"> </td>
<td align="center"><img alt="check" src="%(images)s/cross_red.gif" /></td>
</tr>
""" % {
'rn' : doc['RN'],
'images' : CFG_SITE_URL + '/img',
}
elif status == "approved":
out += """<tr>
<td align="center">
<a href="" onclick="document.forms[0].RN.value='%(rn)s';document.forms[0].submit();return false;">%(rn)s</a>
</td>
<td align="center"> </td>
<td align="center"><img alt="check" src="%(images)s/smchk_gr.gif" /></td>
<td align="center"> </td>
</tr>
""" % {
'rn' : doc['RN'],
'images' : CFG_SITE_URL + '/img',
}
out += """ </table>
</blockquote>
</td>
</tr>
</table>"""
return out
def tmpl_publiline_selectcplxdocument(self, ln, doctype, title, categ, categname, docs, apptype):
"""
Displays the documents that the user can select in the specified category
Parameters:
- 'ln' *string* - The language to display the interface in
- 'doctype' *string* - The doctype
- 'title' *string* - The doctype name
- 'categ' *string* - the category
- 'docs' *array* - All the categories that the user can select:
- 'RN' *string* - The id of the document
- 'status' *string* - The status of the document
- 'apptype' *string* - the approval type
"""
# load the right message language
_ = gettext_set_language(ln)
listtype = ""
if apptype == "RRP":
listtype = _("List of refereed documents")
elif apptype == "RPB":
listtype = _("List of publication documents")
elif apptype == "RDA":
listtype = _("List of direct approval documents")
out = """
<table class="searchbox" width="100%%" summary="">
<tr>
<th class="portalboxheader">%(title)s - %(categname)s: %(list)s</th>
</tr>
<tr>
<td class="portalboxbody">
%(choose_report)s
<blockquote>
<form action="publiline.py" method="get">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="ln" value="%(ln)s" />
</form>
<table class="searchbox">
<tr>
<th class="portalboxheader">%(report_no)s</th>
<th class="portalboxheader">%(pending)s</th>
<th class="portalboxheader">%(approved)s</th>
<th class="portalboxheader">%(rejected)s</th>
<th class="portalboxheader">%(cancelled)s</th>
</tr>
""" % {
'doctype' : doctype,
'title' : title,
'categname' : categname,
'categ' : categ,
'list' : listtype,
'choose_report' : _("Click on a report number for more information."),
'apptype' : apptype,
'report_no' : _("Report Number"),
'pending' : _("Pending"),
'approved' : _("Approved"),
'rejected' : _("Rejected"),
'cancelled' : _("Cancelled"),
'ln': ln,
}
for doc in docs:
status = doc ['status']
if status == "waiting":
out += """<tr>
<td align="center">
<a href="" onclick="document.forms[0].RN.value='%(rn)s';document.forms[0].submit();return false;">%(rn)s</a>
</td>
<td align="center"><img alt="check" src="%(images)s/waiting_or.gif" /></td>
<td align="center"> </td>
<td align="center"> </td>
<td align="center"> </td>
</tr>
""" % {
'rn' : doc['RN'],
'images' : CFG_SITE_URL + '/img',
}
elif status == "rejected":
out += """<tr>
<td align="center">
<a href="" onclick="document.forms[0].RN.value='%(rn)s';document.forms[0].submit();return false;">%(rn)s</a>
</td>
<td align="center"> </td>
<td align="center"> </td>
<td align="center"><img alt="check" src="%(images)s/cross_red.gif" /></td>
<td align="center"> </td>
</tr>
""" % {
'rn' : doc['RN'],
'images' : CFG_SITE_URL + '/img',
}
elif status == "approved":
out += """<tr>
<td align="center">
<a href="" onclick="document.forms[0].RN.value='%(rn)s';document.forms[0].submit();return false;">%(rn)s</a>
</td>
<td align="center"> </td>
<td align="center"><img alt="check" src="%(images)s/smchk_gr.gif" /></td>
<td align="center"> </td>
<td align="center"> </td>
</tr>
""" % {
'rn' : doc['RN'],
'images' : CFG_SITE_URL + '/img',
}
elif status == "cancelled":
out += """<tr>
<td align="center">
<a href="" onclick="document.forms[0].RN.value='%(rn)s';document.forms[0].submit();return false;">%(rn)s</a>
</td>
<td align="center"> </td>
<td align="center"> </td>
<td align="center"> </td>
<td align="center"><img alt="check" src="%(images)s/smchk_rd.gif" /></td>
</tr>
""" % {
'rn' : doc['RN'],
'images' : CFG_SITE_URL + '/img',
}
out += """ </table>
</blockquote>
</td>
</tr>
</table>"""
return out
def tmpl_publiline_displaydoc(self, ln, doctype, docname, categ, rn, status, dFirstReq, dLastReq, dAction, access, confirm_send, auth_code, auth_message, authors, title, sysno, newrn, note):
"""
Displays the categories from a doctype that the user can select
Parameters:
- 'ln' *string* - The language to display the interface in
- 'doctype' *string* - The doctype
- 'docname' *string* - The doctype name
- 'categ' *string* - the category
- 'rn' *string* - The document RN (id number)
- 'status' *string* - The status of the document
- 'dFirstReq' *string* - The date of the first approval request
- 'dLastReq' *string* - The date of the last approval request
- 'dAction' *string* - The date of the last action (approval or rejection)
- 'confirm_send' *bool* - must display a confirmation message about sending approval email
- 'auth_code' *bool* - authorised to referee this document
- 'auth_message' *string* - ???
- 'authors' *string* - the authors of the submission
- 'title' *string* - the title of the submission
- 'sysno' *string* - the unique database id for the record
- 'newrn' *string* - the record number assigned to the submission
- 'note' *string* - Note about the approval request.
"""
# load the right message language
_ = gettext_set_language(ln)
if status == "waiting":
image = """<img src="%s/waiting_or.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
elif status == "approved":
image = """<img src="%s/smchk_gr.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
elif status == "rejected":
image = """<img src="%s/iconcross.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
else:
image = ""
out = """
<table class="searchbox" summary="">
<tr>
<th class="portalboxheader">%(image)s %(rn)s</th>
</tr>
<tr>
<td class="portalboxbody">""" % {
'image' : image,
'rn' : rn,
}
if confirm_send:
out += """<i><strong class="headline">%(requestsent)s</strong></i><br /><br />""" % {
'requestsent' : _("Your request has been sent to the referee."),
}
out += """<form action="publiline.py">
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="ln" value="%(ln)s" />
<small>""" % {
'rn' : rn,
'categ' : categ,
'doctype' : doctype,
'ln' : ln,
}
if title != "unknown":
out += """<strong class="headline">%(title_text)s</strong>%(title)s<br /><br />""" % {
'title_text' : _("Title:"),
'title' : title,
}
if authors != "":
out += """<strong class="headline">%(author_text)s</strong>%(authors)s<br /><br />""" % {
'author_text' : _("Author:"),
'authors' : authors,
}
if sysno != "":
out += """<strong class="headline">%(more)s</strong>
<a href="%(siteurl)s/record/%(sysno)s?ln=%(ln)s">%(click)s</a>
<br /><br />
""" % {
'more' : _("More information:"),
'click' : _("Click here"),
'siteurl' : CFG_SITE_URL,
'sysno' : sysno,
'ln' : ln,
}
if note and auth_code == 0:
out += """<table><tr><td valign="top"><strong class="headline">%(note_text)s</strong></td><td><em>%(note)s</em></td></tr></table>""" % {
'note_text' : _("Approval note:"),
'note' : cgi.escape(note).replace('\n', '<br />'),
}
if status == "waiting":
out += _("This document is still %(x_fmt_open)swaiting for approval%(x_fmt_close)s.") % {'x_fmt_open': '<strong class="headline">',
'x_fmt_close': '</strong>'}
out += "<br /><br />"
out += _("It was first sent for approval on:") + ' <strong class="headline">' + str(dFirstReq) + '</strong><br />'
if dLastReq == "0000-00-00 00:00:00":
out += _("Last approval email was sent on:") + ' <strong class="headline">' + str(dFirstReq) + '</strong><br />'
else:
out += _("Last approval email was sent on:") + ' <strong class="headline">' + str(dLastReq) + '</strong><br />'
out += "<br />" + _("You can send an approval request email again by clicking the following button:") + " <br />" +\
"""<input class="adminbutton" type="submit" name="send" value="%(send)s" onclick="return confirm('%(warning)s')" />""" % {
'send' : _("Send Again"),
'warning' : _("WARNING! Upon confirmation, an email will be sent to the referee.")
}
if auth_code == 0:
out += "<br />" + _("As a referee for this document, you may click this button to approve or reject it") + ":<br />" +\
"""<input class="adminbutton" type="submit" name="approval" value="%(approve)s" onclick="window.location='approve.py?%(access)s&ln=%(ln)s';return false;" />""" % {
'approve' : _("Approve/Reject"),
'access' : access,
'ln' : ln
}
if status == "approved":
out += _("This document has been %(x_fmt_open)sapproved%(x_fmt_close)s.") % {'x_fmt_open': '<strong class="headline">', 'x_fmt_close': '</strong>'}
out += '<br />' + _("Its approved reference is:") + ' <strong class="headline">' + str(newrn) + '</strong><br /><br />'
out += _("It was first sent for approval on:") + ' <strong class="headline">' + str(dFirstReq) + '</strong><br />'
if dLastReq == "0000-00-00 00:00:00":
out += _("Last approval email was sent on:") + ' <strong class="headline">' + str(dFirstReq) + '</strong><br />'
else:
out += _("Last approval email was sent on:") + ' <strong class="headline">' + str(dLastReq) + '</strong><br />' +\
_("It was approved on:") + ' <strong class="headline">' + str(dAction) + '</strong><br />'
if status == "rejected":
out += _("This document has been %(x_fmt_open)srejected%(x_fmt_close)s.") % {'x_fmt_open': '<strong class="headline">', 'x_fmt_close': '</strong>'}
out += "<br /><br />"
out += _("It was first sent for approval on:") + ' <strong class="headline">' + str(dFirstReq) +'</strong><br />'
if dLastReq == "0000-00-00 00:00:00":
out += _("Last approval email was sent on:") + ' <strong class="headline">' + str(dFirstReq) + '</strong><br />'
else:
out += _("Last approval email was sent on:") + ' <strong class="headline">' + str(dLastReq) +'</strong><br />'
out += _("It was rejected on:") + ' <strong class="headline">' + str(dAction) + '</strong><br />'
out += """ </small></form>
<br />
</td>
</tr>
</table>"""
return out
def tmpl_publiline_displaycplxdoc(self, ln, doctype, docname, categ, rn, apptype, status, dates, isPubCom, isEdBoard, isReferee, isProjectLeader, isAuthor, authors, title, sysno, newrn):
# load the right message language
_ = gettext_set_language(ln)
if status == "waiting":
image = """<img src="%s/waiting_or.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
elif status == "approved":
image = """<img src="%s/smchk_gr.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
elif status == "rejected":
image = """<img src="%s/iconcross.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
elif status == "cancelled":
image = """<img src="%s/smchk_rd.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
else:
image = ""
out = """
<table class="searchbox" summary="">
<tr>
<th class="portalboxheader">%(image)s %(rn)s</th>
</tr>
<tr>
<td class="portalboxbody">""" % {
'image' : image,
'rn' : rn,
}
out += """<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="action" value="" />
<input type="hidden" name="ln" value="%(ln)s" />
<small>""" % {
'rn' : rn,
'categ' : categ,
'doctype' : doctype,
'apptype' : apptype,
'ln': ln,
}
out += "<table><tr height='30px'><td width='120px'>"
if title != "unknown":
out += """<strong class="headline">%(title_text)s</strong></td><td>%(title)s</td></tr>""" % {
'title_text' : _("Title:"),
'title' : title,
}
out += "<tr height='30px'><td width='120px'>"
if authors != "":
out += """<strong class="headline">%(author_text)s</strong></td><td>%(authors)s</td></tr>""" % {
'author_text' : _("Author:"),
'authors' : authors,
}
out += "<tr height='30px'><td width='120px'>"
if sysno != "":
out += """<strong class="headline">%(more)s</strong>
</td><td><a href="%(siteurl)s/record/%(sysno)s?ln=%(ln)s">%(click)s</a>
</td></tr>
""" % {
'more' : _("More information:"),
'click' : _("Click here"),
'siteurl' : CFG_SITE_URL,
'sysno' : sysno,
'ln' : ln,
}
out += "</table>"
out += "<br /><br />"
if apptype == "RRP":
out += "<table><tr><td width='400px'>"
out += _("It has first been asked for refereing process on the ") + "</td><td>" + ' <strong class="headline">' + str(dates['dFirstReq']) + '</strong><br /></td></tr>'
out += "<tr><td width='400px'>"
out += _("Last request e-mail was sent to the publication committee chair on the ") + "</td><td>" + ' <strong class="headline">' + str(dates['dLastReq']) + '</strong><br /></td></tr>'
if dates['dRefereeSel'] != None:
out += "<tr><td width='400px'>"
out += _("A referee has been selected by the publication committee on the ") + "</td><td>" + ' <strong class="headline">' + str(dates['dRefereeSel']) + '</strong><br /></td></tr>'
else:
out += "<tr><td width='400px'>"
out += _("No referee has been selected yet.") + "</td><td>"
if (status != "cancelled") and (isPubCom == 0):
out += displaycplxdoc_displayauthaction (action="RefereeSel", linkText=_("Select a referee"))
out += '<br /></td></tr>'
if dates['dRefereeRecom'] != None:
out += "<tr><td width='400px'>"
out += _("The referee has sent his final recommendations to the publication committee on the ") + "</td><td>" + ' <strong class="headline">' + str(dates['dRefereeRecom']) + '</strong><br /></td></tr>'
else:
out += "<tr><td width='400px'>"
out += _("No recommendation from the referee yet.") + "</td><td>"
if (status != "cancelled") and (dates['dRefereeSel'] != None) and (isReferee == 0):
out += displaycplxdoc_displayauthaction (action="RefereeRecom", linkText=_("Send a recommendation"))
out += '<br /></td></tr>'
if dates['dPubComRecom'] != None:
out += "<tr><td width='400px'>"
out += _("The publication committee has sent his final recommendations to the project leader on the ") + "</td><td>" + ' <strong class="headline">' + str(dates['dPubComRecom']) + '</strong><br /></td></tr>'
else:
out += "<tr><td width='400px'>"
out += _("No recommendation from the publication committee yet.") + "</td><td>"
if (status != "cancelled") and (dates['dRefereeRecom'] != None) and (isPubCom == 0):
out += displaycplxdoc_displayauthaction (action="PubComRecom", linkText=_("Send a recommendation"))
out += '<br /></td></tr>'
if status == "cancelled":
out += "<tr><td width='400px'>"
out += _("It has been cancelled by the author on the ") + "</td><td>" + ' <strong class="headline">' + str(dates['dProjectLeaderAction']) + '</strong><br /></td></tr>'
elif dates['dProjectLeaderAction'] != None:
if status == "approved":
out += "<tr><td width='400px'>"
out += _("It has been approved by the project leader on the ") + "</td><td>" + ' <strong class="headline">' + str(dates['dProjectLeaderAction']) + '</strong><br /></td></tr>'
elif status == "rejected":
out += "<tr><td width='400px'>"
out += _("It has been rejected by the project leader on the ") + "</td><td>" + ' <strong class="headline">' + str(dates['dProjectLeaderAction']) + '</strong><br /></td></tr>'
else:
out += "<tr><td width='400px'>"
out += _("No final decision taken yet.") + "</td><td>"
if (dates['dPubComRecom'] != None) and (isProjectLeader == 0):
out += displaycplxdoc_displayauthaction (action="ProjectLeaderDecision", linkText=_("Take a decision"))
if isAuthor == 0:
out += displaycplxdoc_displayauthaction (action="AuthorCancel", linkText=_("Cancel"))
out += '<br /></table>'
elif apptype == "RPB":
out += _("It has first been asked for refereing process on the ") + ' <strong class="headline">' + str(dates['dFirstReq']) + '</strong><br />'
out += _("Last request e-mail was sent to the publication committee chair on the ") + ' <strong class="headline">' + str(dates['dLastReq']) + '</strong><br />'
if dates['dEdBoardSel'] != None:
out += _("An editorial board has been selected by the publication committee on the ") + ' <strong class="headline">' + str(dates['dEdBoardSel']) + '</strong>'
if (status != "cancelled") and (isEdBoard == 0):
out += displaycplxdoc_displayauthaction (action="AddAuthorList", linkText=_("Add an author list"))
out += '<br />'
else:
out += _("No editorial board has been selected yet.")
if (status != "cancelled") and (isPubCom == 0):
out += displaycplxdoc_displayauthaction (action="EdBoardSel", linkText=_("Select an editorial board"))
out += '<br />'
if dates['dRefereeSel'] != None:
out += _("A referee has been selected by the editorial board on the ") + ' <strong class="headline">' + str(dates['dRefereeSel']) + '</strong><br />'
else:
out += _("No referee has been selected yet.")
if (status != "cancelled") and (dates['dEdBoardSel'] != None) and (isEdBoard == 0):
out += displaycplxdoc_displayauthaction (action="RefereeSel", linkText=_("Select a referee"))
out += '<br />'
if dates['dRefereeRecom'] != None:
out += _("The referee has sent his final recommendations to the editorial board on the ") + ' <strong class="headline">' + str(dates['dRefereeRecom']) + '</strong><br />'
else:
out += _("No recommendation from the referee yet.")
if (status != "cancelled") and (dates['dRefereeSel'] != None) and (isReferee == 0):
out += displaycplxdoc_displayauthaction (action="RefereeRecom", linkText=_("Send a recommendation"))
out += '<br />'
if dates['dEdBoardRecom'] != None:
out += _("The editorial board has sent his final recommendations to the publication committee on the ") + ' <strong class="headline">' + str(dates['dRefereeRecom']) + '</strong><br />'
else:
out += _("No recommendation from the editorial board yet.")
if (status != "cancelled") and (dates['dRefereeRecom'] != None) and (isEdBoard == 0):
out += displaycplxdoc_displayauthaction (action="EdBoardRecom", linkText=_("Send a recommendation"))
out += '<br />'
if dates['dPubComRecom'] != None:
out += _("The publication committee has sent his final recommendations to the project leader on the ") + ' <strong class="headline">' + str(dates['dPubComRecom']) + '</strong><br />'
else:
out += _("No recommendation from the publication committee yet.")
if (status != "cancelled") and (dates['dEdBoardRecom'] != None) and (isPubCom == 0):
out += displaycplxdoc_displayauthaction (action="PubComRecom", linkText=_("Send a recommendation"))
out += '<br />'
if status == "cancelled":
out += _("It has been cancelled by the author on the ") + ' <strong class="headline">' + str(dates['dProjectLeaderAction']) + '</strong><br />'
elif dates['dProjectLeaderAction'] != None:
if status == "approved":
out += _("It has been approved by the project leader on the ") + ' <strong class="headline">' + str(dates['dProjectLeaderAction']) + '</strong><br />'
elif status == "rejected":
out += _("It has been rejected by the project leader on the ") + ' <strong class="headline">' + str(dates['dProjectLeaderAction']) + '</strong><br />'
else:
out += _("No final decision taken yet.")
if (dates['dPubComRecom'] != None) and (isProjectLeader == 0):
out += displaycplxdoc_displayauthaction (action="ProjectLeaderDecision", linkText=_("Take a decision"))
if isAuthor == 0:
out += displaycplxdoc_displayauthaction (action="AuthorCancel", linkText=_("Cancel"))
out += '<br />'
elif apptype == "RDA":
out += _("It has first been asked for refereing process on the ") + ' <strong class="headline">' + str(dates['dFirstReq']) + '</strong><br />'
out += _("Last request e-mail was sent to the project leader on the ") + ' <strong class="headline">' + str(dates['dLastReq']) + '</strong><br />'
if status == "cancelled":
out += _("It has been cancelled by the author on the ") + ' <strong class="headline">' + str(dates['dProjectLeaderAction']) + '</strong><br />'
elif dates['dProjectLeaderAction'] != None:
if status == "approved":
out += _("It has been approved by the project leader on the ") + ' <strong class="headline">' + str(dates['dProjectLeaderAction']) + '</strong><br />'
elif status == "rejected":
out += _("It has been rejected by the project leader on the ") + ' <strong class="headline">' + str(dates['dProjectLeaderAction']) + '</strong><br />'
else:
out += _("No final decision taken yet.")
if isProjectLeader == 0:
out += displaycplxdoc_displayauthaction (action="ProjectLeaderDecision", linkText=_("Take a decision"))
if isAuthor == 0:
out += displaycplxdoc_displayauthaction (action="AuthorCancel", linkText=_("Cancel"))
out += '<br />'
out += """ </small></form>
<br />
</td>
</tr>
</table>"""
return out
def tmpl_publiline_displaycplxdocitem(self,
doctype, categ, rn, apptype, action,
comments,
(user_can_view_comments, user_can_add_comment, user_can_delete_comment),
selected_category,
selected_topic, selected_group_id, comment_subject, comment_body, ln):
_ = gettext_set_language(ln)
if comments and user_can_view_comments:
comments_text = ''
comments_overview = '<ul>'
for comment in comments:
(cmt_uid, cmt_nickname, cmt_title, cmt_body, cmt_date, cmt_priority, cmtid) = comment
comments_overview += '<li><a href="#%s">%s - %s</a> (%s)</li>' % (cmtid, cmt_nickname, cmt_title, convert_datetext_to_dategui (cmt_date))
comments_text += """
<table class="bskbasket">
<thead class="bskbasketheader">
<tr><td class="bsktitle"><a name="%s"></a>%s - %s (%s)</td><td><a href=%s/publiline.py?flow=cplx&doctype=%s&apptype=%s&categ=%s&RN=%s&reply=true&commentId=%s&ln=%s#add_comment>Reply</a></td><td><a href="#top">Top</a></td></tr>
</thead>
<tbody>
<tr><td colspan="2">%s</td></tr>
</tbody>
</table>""" % (cmtid, cmt_nickname, cmt_title, convert_datetext_to_dategui (cmt_date), CFG_SITE_URL, doctype, apptype, categ, rn, cmt_uid, ln, email_quoted_txt2html(cmt_body))
comments_overview += '</ul>'
else:
comments_text = ''
comments_overview = 'None.'
body = ''
if user_can_view_comments:
body += """<h4>%(comments_label)s</h4>"""
if user_can_view_comments:
body += """%(comments)s"""
if user_can_add_comment:
validation = """
<input type="hidden" name="validate" value="go" />
<input type="submit" class="formbutton" value="%(button_label)s" />""" % {'button_label': _("Add Comment")}
body += self.tmpl_publiline_displaywritecomment (doctype, categ, rn, apptype, action, _("Add Comment"), comment_subject, validation, comment_body, ln)
body %= {
'comments_label': _("Comments"),
'action': action,
'button_label': _("Write a comment"),
'comments': comments_text}
content = '<br />'
out = """
<table class="bskbasket">
<thead class="bskbasketheader">
<tr>
<td class="bsktitle">
<a name="top"></a>
<h4>%(comments_overview_label)s</h4>
%(comments_overview)s
</td>
<td class="bskcmtcol"></td>
</tr>
</thead>
<tbody>
<tr>
<td colspan="2" style="padding: 5px;">
%(body)s
</td>
</tr>
</tbody>
</table>""" % {
'comments_overview_label' : _('Comments overview'),
'comments_overview' : comments_overview,
'body' : body,}
return out
def tmpl_publiline_displaywritecomment(self, doctype, categ, rn, apptype, action, write_label, title, validation, reply_message, ln):
_ = gettext_set_language(ln)
return """
<div style="width:100%%%%">
<hr />
<h2>%(write_label)s</h2>
<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="ln" value="%(ln)s" />
<p class="bsklabel">%(title_label)s:</p>
<a name="add_comment"></a>
<input type="text" name="msg_subject" size="80" value="%(title)s"/>
<p class="bsklabel">%(comment_label)s:</p>
<textarea name="msg_body" rows="20" cols="80">%(reply_message)s</textarea><br />
%(validation)s
</form>
</div>""" % {'write_label': write_label,
'title_label': _("Title"),
'title': title,
'comment_label': _("Comment"),
'rn' : rn,
'categ' : categ,
'doctype' : doctype,
'apptype' : apptype,
'action' : action,
'validation' : validation,
'reply_message' : reply_message,
'ln' : ln,
}
def tmpl_publiline_displaydocplxaction(self, ln, doctype, categ, rn, apptype, action, status, authors, title, sysno, subtitle1, email_user_pattern, stopon1, users, extrausers, stopon2, subtitle2, usersremove, stopon3, validate_btn):
# load the right message language
_ = gettext_set_language(ln)
if status == "waiting":
image = """<img src="%s/waiting_or.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
elif status == "approved":
image = """<img src="%s/smchk_gr.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
elif status == "rejected":
image = """<img src="%s/iconcross.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
else:
image = ""
out = """
<table class="searchbox" summary="">
<tr>
<th class="portalboxheader">%(image)s %(rn)s</th>
</tr>
<tr>
<td class="portalboxbody">
<small>""" % {
'image' : image,
'rn' : rn,
}
if title != "unknown":
out += """<strong class="headline">%(title_text)s</strong>%(title)s<br /><br />""" % {
'title_text' : _("Title:"),
'title' : title,
}
if authors != "":
out += """<strong class="headline">%(author_text)s</strong>%(authors)s<br /><br />""" % {
'author_text' : _("Author:"),
'authors' : authors,
}
if sysno != "":
out += """<strong class="headline">%(more)s</strong>
<a href="%(siteurl)s/record/%(sysno)s?ln=%(ln)s">%(click)s</a>
<br /><br />
""" % {
'more' : _("More information:"),
'click' : _("Click here"),
'siteurl' : CFG_SITE_URL,
'sysno' : sysno,
'ln' : ln,
}
out += """ </small>
<br />
</td>
</tr>
</table>"""
if ((apptype == "RRP") or (apptype == "RPB")) and ((action == "EdBoardSel") or (action == "RefereeSel")):
out += """
<table class="searchbox" summary="">
<tr>
<th class="portalboxheader">%(subtitle)s</th>
</tr>
<tr>
<td class="portalboxbody">""" % {
'subtitle' : subtitle1,
}
out += """<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="ln" value="%(ln)s" />""" % {
'rn' : rn,
'categ' : categ,
'doctype' : doctype,
'apptype' : apptype,
'action' : action,
'ln': ln,
}
out += ' <span class="adminlabel">1. %s </span>\n' % _("search for user")
out += ' <input class="admin_wvar" type="text" name="email_user_pattern" value="%s" />\n' % (email_user_pattern, )
out += ' <input class="adminbutton" type="submit" value="%s"/>\n' % (_("search for users"), )
if (stopon1 == "") and (email_user_pattern != ""):
out += ' <br /><span class="adminlabel">2. %s </span>\n' % _("select user")
out += ' <select name="id_user" class="admin_w200">\n'
out += ' <option value="0">*** %s ***</option>\n' % _("select user")
for elem in users:
elem_id = elem[0]
email = elem[1]
out += ' <option value="%s">%s</option>\n' % (elem_id, email)
for elem in extrausers:
elem_id = elem[0]
email = elem[1]
out += ' <option value="%s">%s %s</option>\n' % (elem_id, email, _("connected"))
out += ' </select>\n'
out += ' <input class="adminbutton" type="submit" value="%s" />\n' % (_("add this user"), )
out += stopon2
elif stopon1 != "":
out += stopon1
out += """
</form>
<br />
</td>
</tr>
</table>"""
if action == "EdBoardSel":
out += """
<table class="searchbox" summary="">
<tr>
<th class="portalboxheader">%(subtitle)s</th>
</tr>
<tr>
<td class="portalboxbody">""" % {
'subtitle' : subtitle2,
}
out += """<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="ln" value="%(ln)s" />""" % {
'rn' : rn,
'categ' : categ,
'doctype' : doctype,
'apptype' : apptype,
'action' : action,
'ln': ln,
}
out += ' <span class="adminlabel">1. %s </span>\n' % _("select user")
out += ' <select name="id_user_remove" class="admin_w200">\n'
out += ' <option value="0">*** %s ***</option>\n' % _("select user")
for elem in usersremove:
elem_id = elem[0]
email = elem[1]
out += ' <option value="%s">%s</option>\n' % (elem_id, email)
out += ' </select>\n'
out += ' <input class="adminbutton" type="submit" value="%s" />\n' % (_("remove this user"), )
out += stopon3
out += """
</form>
<br />
</td>
</tr>
</table>"""
if validate_btn != "":
out += """<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="validate" value="go" />
<input type="hidden" name="ln" value="%(ln)s" />
<input class="adminbutton" type="submit" value="%(validate_btn)s" />
</form>""" % {
'rn' : rn,
'categ' : categ,
'doctype' : doctype,
'apptype' : apptype,
'action' : action,
'validate_btn' : validate_btn,
'ln': ln,
}
return out
def tmpl_publiline_displaycplxrecom(self, ln, doctype, categ, rn, apptype, action, status, authors, title, sysno, msg_to, msg_to_group, msg_subject):
# load the right message language
_ = gettext_set_language(ln)
if status == "waiting":
image = """<img src="%s/waiting_or.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
elif status == "approved":
image = """<img src="%s/smchk_gr.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
elif status == "rejected":
image = """<img src="%s/iconcross.gif" alt="" align="right" />""" % (CFG_SITE_URL + '/img')
else:
image = ""
out = """
<table class="searchbox" summary="">
<tr>
<th class="portalboxheader">%(image)s %(rn)s</th>
</tr>
<tr>
<td class="portalboxbody">
<small>""" % {
'image' : image,
'rn' : rn,
}
if title != "unknown":
out += """<strong class="headline">%(title_text)s</strong>%(title)s<br /><br />""" % {
'title_text' : _("Title:"),
'title' : title,
}
if authors != "":
out += """<strong class="headline">%(author_text)s</strong>%(authors)s<br /><br />""" % {
'author_text' : _("Author:"),
'authors' : authors,
}
if sysno != "":
out += """<strong class="headline">%(more)s</strong>
<a href="%(siteurl)s/record/%(sysno)s?ln=%(ln)s">%(click)s</a>
<br /><br />
""" % {
'more' : _("More information:"),
'click' : _("Click here"),
'siteurl' : CFG_SITE_URL,
'sysno' : sysno,
'ln' : ln,
}
out += """ </small>
<br />
</td>
</tr>
</table>"""
# escape forbidden character
msg_to = escape_html(msg_to)
msg_to_group = escape_html(msg_to_group)
msg_subject = escape_html(msg_subject)
write_box = """
<form action="publiline.py" method="post">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="ln" value="%(ln)s" />
<div style="float: left; vertical-align:text-top; margin-right: 10px;">
<table class="mailbox">
<thead class="mailboxheader">
<tr>
<td class="inboxheader" colspan="2">
<table class="messageheader">
<tr>
<td class="mailboxlabel">%(to_label)s</td>"""
if msg_to != "":
addr_box = """
<td class="mailboxlabel">%(users_label)s</td>
<td style="width:100%%%%;" class="mailboxlabel">%(to_users)s</td>""" % {'users_label': _("User"),
'to_users' : msg_to,
}
if msg_to_group != "":
addr_box += """
</tr>
<tr>
<td class="mailboxlabel"> </td>
<td class="mailboxlabel">%(groups_label)s</td>
<td style="width:100%%%%;" class="mailboxlabel">%(to_groups)s</td>""" % {'groups_label': _("Group"),
'to_groups': msg_to_group,
}
elif msg_to_group != "":
addr_box = """
<td class="mailboxlabel">%(groups_label)s</td>
<td style="width:100%%%%;" class="mailboxlabel">%(to_groups)s</td>""" % {'groups_label': _("Group"),
'to_groups': msg_to_group,
}
else:
addr_box = """
<td class="mailboxlabel"> </td>
<td class="mailboxlabel"> </td>"""
write_box += addr_box
write_box += """
</tr>
<tr>
<td class="mailboxlabel"> </td>
<td> </td>
<td> </td>
</tr>
<tr>
<td class="mailboxlabel">%(subject_label)s</td>
<td colspan="2">
<input class="mailboxinput" type="text" name="msg_subject" value="%(subject)s" />
</td>
</tr>
</table>
</td>
</tr>
</thead>
<tfoot>
<tr>
<td style="height:0px" colspan="2"></td>
</tr>
</tfoot>
<tbody class="mailboxbody">
<tr>
<td class="mailboxlabel">%(message_label)s</td>
<td>
<textarea name="msg_body" rows="10" cols="50"></textarea>
</td>
</tr>
<tr class="mailboxfooter">
<td>
<select name="validate">
<option> %(select)s</option>
<option value="approve">%(approve)s</option>
<option value="reject">%(reject)s</option>
</select>
</td>
<td colspan="2" class="mailboxfoot">
<input type="submit" name="send_button" value="%(send_label)s" class="formbutton"/>
</td>
</tr>
</tbody>
</table>
</div>
</form>
"""
write_box = write_box % {'rn' : rn,
'categ' : categ,
'doctype' : doctype,
'apptype' : apptype,
'action' : action,
'subject' : msg_subject,
'to_label': _("To:"),
'subject_label': _("Subject:"),
'message_label': _("Message:"),
'send_label': _("SEND"),
'select' : _("Select:"),
'approve' : _("approve"),
'reject' : _("reject"),
'ln': ln,
}
out += write_box
return out
def displaycplxdoc_displayauthaction(action, linkText):
return """ <strong class="headline">(<a href="" onclick="document.forms[0].action.value='%(action)s';document.forms[0].submit();return false;">%(linkText)s</a>)</strong>""" % {
"action" : action,
"linkText" : linkText
}
| gpl-2.0 |
xlsdg/phantomjs-linux-armv7l | src/breakpad/src/tools/gyp/test/subdirectory/gyptest-SYMROOT-all.py | 399 | 1269 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
The configuration sets the Xcode SYMROOT variable and uses --depth=
to make Xcode behave like the other build tools--that is, put all
built targets in a single output build directory at the top of the tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')
test.relocate('src', 'relocate/src')
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
test.run_built_executable('prog2',
stdout="Hello from prog2.c\n",
chdir='relocate/src')
test.pass_test()
| bsd-3-clause |
cwayne18/ActivityTracker | py/pl.py | 3 | 20187 | #!/usr/bin/env python
# vim: set fileencoding=utf8 ts=4 sw=4 noexpandtab:
#
# (c) Sergey Astanin <s.astanin@gmail.com> 2008
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""usage: gpxplot.py [action] [options] track.gpx
Analyze GPS track and plot elevation and velocity profiles.
Features:
* using haversine formula to calculate distances (spherical Earth)
* support of multi-segment (discontinuous) tracks
* gnuplot support:
- generate plots if gnuplot.py is available
- generate gnuplot script if gnuplot.py is not available
- plot interactively and plot-to-file modes
* Google Chart API support:
- print URL or the plot
* tabular track profile data can be generated
* metric and English units
* timezone support
Actions:
-g plot using gnuplot.py
--gprint print gnuplot script to standard output
--google print Google Chart URL
--table print data table (default)
--polyline print Google maps polyline data
--output-file file to store the resultant GPX data in
Options:
-h, --help print this message
-E use English units (metric units used by default)
-x var plot var = { time | distance } against x-axis
-y var plot var = { elevation | velocity } against y-axis
-o imagefile save plot to image file (supported: PNG, JPG, EPS, SVG)
-t tzname use local timezone tzname (e.g. 'Europe/Moscow')
-n N_points reduce number of points in the plot to approximately N_points
-f window apply window filter to the data.
-s filename store the final GPX data in the specified file.
-e epsilon 'very small' value used for line smoothing
-z zoomf zoom factor - the change in magnification between different levels of magnification
-L levels indicate how many different levels of magnification the polyline has
"""
import sys
import datetime
import getopt
import string
import copy
from math import sqrt,sin,cos,asin,pi,ceil,pow
from os.path import basename
from re import sub
import logging
#logging.basicConfig(level=logging.DEBUG,format='%(levelname)s: %(message)s')
debug=logging.debug
try:
import pytz
except:
pass
GPX10='{http://www.topografix.com/GPX/1/0}'
GPX11='{http://www.topografix.com/GPX/1/1}'
URI='{http://www.w3.org/2001/XMLSchema-instance}'
dateformat='%Y-%m-%dT%H:%M:%SZ'
R=6371.0008 # Earth volumetric radius
milesperkm=0.621371192
feetperm=3.2808399
strptime=datetime.datetime.strptime
var_lat = 0
var_lon = 1
var_time= 2
var_alt = 3
var_dist= 4
var_vel = 5
var_names={ 't': var_time,
'time': var_time,
'd': var_dist,
'dist': var_dist,
'distance': var_dist,
'ele': var_alt,
'elevation': var_alt,
'a': var_alt,
'alt': var_alt,
'altitude': var_alt,
'v': var_vel,
'vel': var_vel,
'velocity': var_vel,
'lat': var_lat,
'latitude': var_lat,
'lon': var_lon,
'longitude': var_lon,
}
EXIT_EOPTION=1
EXIT_EDEPENDENCY=2
EXIT_EFORMAT=3
def haversin(theta):
return sin(0.5*theta)**2
def distance(p1,p2):
lat1,lon1=[a*pi/180.0 for a in p1]
lat2,lon2=[a*pi/180.0 for a in p2]
deltalat=lat2-lat1
deltalon=lon2-lon1
h=haversin(deltalat)+cos(lat1)*cos(lat2)*haversin(deltalon)
dist=2*R*asin(sqrt(h))
return dist
def read_all_segments(trksegs,tzname=None,ns=GPX10,pttag='trkpt'):
trk=[]
for seg in trksegs:
s=[]
prev_ele,prev_time=0.0,None
trkpts=seg.findall(ns+pttag)
for pt in trkpts:
lat=float(pt.attrib['lat'])
lon=float(pt.attrib['lon'])
time=pt.findtext(ns+'time')
def prettify_time(time):
time=sub(r'\.\d+Z$','Z',time)
time=strptime(time,dateformat)
if tzname:
time=time.replace(tzinfo=pytz.utc)
time=time.astimezone(pytz.timezone(tzname))
return time
if time:
prev_time=time
time=prettify_time(time)
elif prev_time: # timestamp is missing, use the prev point
time=prev_time
time=prettify_time(time)
ele=pt.findtext(ns+'ele')
if ele:
ele=float(ele)
prev_ele=ele
else:
ele=prev_ele # elevation data is missing, use the prev point
s.append([lat, lon, time, ele])
trk.append(s)
return trk
"""
Calculate the average point for the array
"""
def calc_avg_point(seg):
p_avg = copy.deepcopy(seg[0])
p_prev = seg[0]
time_delta = p_prev[var_time]-p_prev[var_time]
for p in seg[1:]:
p_avg[var_alt] += p[var_alt]
p_avg[var_lat] += p[var_lat]
p_avg[var_lon] += p[var_lon]
time_delta = time_delta + (p[var_time]-p_prev[var_time]) # datetile supports only addition with a delta
p_prev = p
p_avg[var_alt] /= len(seg)
p_avg[var_lat] /= len(seg)
p_avg[var_lon] /= len(seg)
time_delta = time_delta // len(seg)
p_avg[var_time] = p_avg[var_time] + time_delta
return p_avg
"""
Run the average filter on the tracks
"""
def filter_points(trk,filter_window=None):
if (filter_window <= 1):
return trk;
newtrk=trk
half_window=int(filter_window/2)
for s in range(len(newtrk)):
oldseg = newtrk[s]
newseg = oldseg[half_window:-half_window]
if (len(oldseg) >= filter_window):
for p in range(len(newseg)):
p_avg = calc_avg_point(oldseg[p:p+filter_window-1]);
newseg[p] = p_avg;
newtrk[s] = newseg
return newtrk
"""
Reduce the number of points on the tracks
"""
def reduce_points(trk,npoints=None):
count=sum([len(s) for s in trk])
if npoints:
ptperpt=1.0*count/npoints
else:
return trk
skip=int(ceil(ptperpt))
debug('ptperpt=%f skip=%d'%(ptperpt,skip))
newtrk=[]
for seg in trk:
if len(seg) > 0:
newseg=seg[:-1:skip]+[seg[-1]]
newtrk.append(newseg)
debug('original: %d pts, filtered: %d pts'%\
(count,sum([len(s) for s in newtrk])))
return newtrk
def eval_dist_velocity(trk):
dist=0.0
newtrk=[]
for seg in trk:
if len(seg)>0:
newseg=[]
prev_lat,prev_lon,prev_time,prev_ele=None,None,None,None
for pt in seg:
lat,lon,time,ele=pt
if prev_lat and prev_lon:
delta=distance([lat,lon],[prev_lat,prev_lon])
if time and prev_time:
try:
vel=3600*delta/((time-prev_time).seconds)
except ZeroDivisionError:
vel=0.0 # probably the point lacked the timestamp
else:
vel=0.0
else: # new segment
delta=0.0
vel=0.0
dist=dist+delta
newseg.append([lat,lon,time,ele,dist,vel])
prev_lat,prev_lon,prev_time=lat,lon,time
newtrk.append(newseg)
return newtrk
def load_xml_library():
try:
import xml.etree.ElementTree as ET
except:
try:
import elementtree.ElementTree as ET
except:
try:
import cElementTree as ET
except:
try:
import lxml.etree as ET
except:
print ('this script needs ElementTree (Python>=2.5)')
sys.exit(EXIT_EDEPENDENCY)
return ET;
def parse_gpx_data(gpxdata,tzname=None,npoints=None,filter_window=None,output_file_name=None):
ET = load_xml_library();
def find_trksegs_or_route(etree, ns):
trksegs=etree.findall('.//'+ns+'trkseg')
if trksegs:
return trksegs, "trkpt"
else: # try to display route if track is missing
rte=etree.findall('.//'+ns+'rte')
return rte, "rtept"
# try GPX10 namespace first
try:
ET.register_namespace('', GPX11.strip('{}'))
ET.register_namespace('', GPX10.strip('{}'))
etree = ET.XML(gpxdata)
except ET.ParseError as v:
row, column = v.position
print ("error on row %d, column %d:%d" % row, column, v)
trksegs,pttag=find_trksegs_or_route(etree, GPX10)
NS=GPX10
if not trksegs: # try GPX11 namespace otherwise
trksegs,pttag=find_trksegs_or_route(etree, GPX11)
NS=GPX11
if not trksegs: # try without any namespace
trksegs,pttag=find_trksegs_or_route(etree, "")
NS=""
trk=read_all_segments(trksegs,tzname=tzname,ns=NS,pttag=pttag)
trk=filter_points(trk,filter_window)
trk=reduce_points(trk,npoints=npoints)
trk=eval_dist_velocity(trk)
# Store the results if requested
if output_file_name:
store_gpx_trk(etree,trk,NS,pttag,output_file_name)
return trk
"""
Read the data from the specified GPX file
"""
def read_gpx_trk(input_file_name,tzname,npoints,filter_window,output_file_name):
if input_file_name == "-":
gpx=sys.stdin.read()
debug("length(gpx) from stdin = %d" % len(gpx))
else:
#gpx=open(input_file_name).read()
#print(gpx)
gpx=input_file_name
debug("length(gpx) from file = %d" % len(gpx))
return parse_gpx_data(gpx,tzname,npoints,filter_window,output_file_name)
"""
Store the updated track in the specified file
"""
def store_gpx_trk(etree,trk,ns=GPX10,pttag='trkpt',output_file_name="-"):
ET = load_xml_library();
if output_file_name == "-":
gpx=sys.stdout;
else:
gpx=open(output_file_name, 'w');
print("\n== This feature isn't working yet ==\n")
for node in etree.iterfind('.//'+ns+'trkseg'):
print (node.tag, node.attrib, node.text)
print("\n===========================\n")
ET.ElementTree(etree).write("D:/temp/output.gpx")
# ET.ElementTree(element).write(output_file_name, xml_declaration=True)
gpx.write(ET.tostring(trk));
return gpx.close();
def google_ext_encode(i):
"""
Google Charts' extended encoding,
see http://code.google.com/apis/chart/mappings.html#extended_values
"""
enc='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
enc=enc+enc.lower()+'0123456789-.'
i=int(i)%4096 # modulo 4096
figure=enc[int(i/len(enc))]+enc[int(i%len(enc))]
return figure
def google_text_encode_data(trk,x,y,min_x,max_x,min_y,max_y,metric=True):
if metric:
mlpkm,fpm=1.0,1.0
else:
mlpkm,fpm=milesperkm,feetperm
xenc=lambda x: "%.1f"%x
yenc=lambda y: "%.1f"%y
data='&chd=t:'+join([ join([xenc(p[x]*mlpkm) for p in seg],',')+\
'|'+join([yenc(p[y]*fpm) for p in seg],',') \
for seg in trk if len(seg) > 0],'|')
data=data+'&chds='+join([join([xenc(min_x),xenc(max_x),yenc(min_y),yenc(max_y)],',') \
for seg in trk if len(seg) > 0],',')
return data
def google_ext_encode_data(trk,x,y,min_x,max_x,min_y,max_y,metric=True):
if metric:
mlpkm,fpm=1.0,1.0
else:
mlpkm,fpm=milesperkm,feetperm
if max_x != min_x:
xenc=lambda x: google_ext_encode((x-min_x)*4095/(max_x-min_x))
else:
xenc=lambda x: google_ext_encode(0)
if max_y != min_y:
yenc=lambda y: google_ext_encode((y-min_y)*4095/(max_y-min_y))
else:
yenc=lambda y: google_ext_encode(0)
data='&chd=e:'+join([ join([xenc(p[x]*mlpkm) for p in seg],'')+\
','+join([yenc(p[y]*fpm) for p in seg],'') \
for seg in trk if len(seg) > 0],',')
return data
def google_chart_url(trk,x,y,metric=True):
if x != var_dist or y != var_alt:
print ('only distance-elevation profiles are supported in --google mode')
return
if not trk:
raise ValueError("Parsed track is empty")
if metric:
ele_units,dist_units='m','km'
mlpkm,fpm=1.0,1.0
else:
ele_units,dist_units='ft','miles'
mlpkm,fpm=milesperkm,feetperm
urlprefix='http://chart.apis.google.com/chart?chtt=gpxplot.appspot.com&chts=cccccc,9&'
url='chs=600x400&chco=9090FF&cht=lxy&chxt=x,y,x,y&chxp=2,100|3,100&'\
'chxl=2:|distance, %s|3:|elevation, %s|'%(dist_units,ele_units)
min_x=0
max_x=mlpkm*(max([max([p[x] for p in seg]) for seg in trk if len(seg) > 0]))
max_y=fpm*(max([max([p[y] for p in seg]) for seg in trk if len(seg) > 0]))
min_y=fpm*(min([min([p[y] for p in seg]) for seg in trk if len(seg) > 0]))
range='&chxr=0,0,%s|1,%s,%s'%(int(max_x),int(min_y),int(max_y))
data=google_ext_encode_data(trk,x,y,min_x,max_x,min_y,max_y,metric)
url=urlprefix+url+range+data
if len(url) > 2048:
raise OverflowError("URL too long, reduce number of points: "+(url))
return url
def print_gpx_trk(trk,file=sys.stdout,metric=True):
f=file
if metric:
f.write('# time(ISO) elevation(m) distance(km) velocity(km/h)\n')
km,m=1.0,1.0
else:
f.write('# time(ISO) elevation(ft) distance(miles) velocity(miles/h)\n')
km,m=milesperkm,feetperm
if not trk:
return
for seg in trk:
if len(seg) == 0:
continue
for p in seg:
f.write('%s %f %f %f\n'%\
((p[var_time].isoformat(),\
m*p[var_alt],km*p[var_dist],km*p[var_vel])))
f.write('\n')
def gen_gnuplot_script(trk,x,y,file=sys.stdout,metric=True,savefig=None):
if metric:
ele_units,dist_units='m','km'
else:
ele_units,dist_units='ft','miles'
file.write("unset key\n")
if x == var_time:
file.write("""set xdata time
set timefmt '%Y-%m-%dT%H:%M:%S'
set xlabel 'time'\n""")
else:
file.write("set xlabel 'distance, %s'\n"%dist_units)
if y == var_alt:
file.write("set ylabel 'elevation, %s'\n"%ele_units)
else:
file.write("set ylabel 'velocity, %s/h\n"%dist_units)
if savefig:
import re
ext=re.sub(r'.*\.','',savefig.lower())
if ext == 'png':
file.write("set terminal png; set output '%s';\n"%(savefig))
elif ext in ['jpg','jpeg']:
file.write("set terminal jpeg; set output '%s';\n"%(savefig))
elif ext == 'eps':
file.write("set terminal post eps; set output '%s';\n"%(savefig))
elif ext == 'svg':
file.write("set terminal svg; set output '%s';\n"%(savefig))
else:
print ('unsupported file type: %s'%ext)
sys.exit(EXIT_EFORMAT)
file.write("plot '-' u %d:%d w l\n"%(x-1,y-1,))
print_gpx_trk(trk,file=file,metric=metric)
file.write('e')
def get_gnuplot_script(trk,x,y,metric,savefig):
import StringIO
script=StringIO.StringIO()
gen_gnuplot_script(trk,x,y,file=script,metric=metric,savefig=savefig)
script=script.getvalue()
return script
def plot_in_gnuplot(trk,x,y,metric=True,savefig=None):
script=get_gnuplot_script(trk,x,y,metric,savefig)
try:
import Gnuplot
if not savefig:
g=Gnuplot.Gnuplot(persist=True)
else:
g=Gnuplot.Gnuplot()
g(script)
except: # python-gnuplot is not available or is broken
print ('gnuplot.py is not found')
def print_gnuplot_script(trk,x,y,metric=True,savefig=None):
script=get_gnuplot_script(trk,x,y,metric,savefig)
print ("%s" % script)
"""
This computes the appropriate zoom level of a point in terms of it's
distance from the relevant segment in the DP algorithm.
"""
def find_zoom_level(value,zoomLevelBreaks):
level = 0;
for zoom in zoomLevelBreaks:
if (value >= zoom):
break;
level += 1;
return level;
"""
Convert a value into a polyline encoded level string
http://code.google.com/apis/maps/documentation/utilities/polylinealgorithm.html
"""
def polyline_encode_level(value):
level_str = [];
nextValue = 0;
while (value >= 0x20):
nextValue = (0x20 | (value & 0x1f)) + 63;
level_str.append(nextValue);
value >>= 5;
finalValue = value + 63;
level_str.append(finalValue);
# Convert each value to its ASCII equivalentlevel_str
level_str = [chr(l) for l in level_str]
return level_str
"""
Convert a value into a polyline encoded point string
http://code.google.com/apis/maps/documentation/utilities/polylinealgorithm.html
"""
def polyline_encode_point(value):
# Get the two's compliment for negatives
if (value < 0):
value = ~(-1*value) + 1;
# Left-shift the binary value one bit:
value = value << 1;
# If the original decimal value is negative, invert this encoding
if (value < 0):
value = ~value;
# Break the binary value out into 5-bit chunks (starting from the right hand side)
# This will put the values in reverse order
value_str = [];
# while there are more than 5 bits left (that aren't all 0)...
while value >= 32: # 32 == 0xf0 == 100000
value_str.append(value & 31) # 31 == 0x1f == 11111
value = value >> 5
# OR each value with 0x20 if another bit chunk follows
value_str = [(l | 0x20) for l in value_str]
value_str.append(value)
# Add 63 to each value
value_str = [(l + 63) for l in value_str]
# Convert each value to its ASCII equivalent
value_str = [chr(l) for l in value_str]
return value_str
"""
Create static encoded polyline
http://code.google.com/apis/maps/documentation/utilities/polylinealgorithm.html
"""
def print_gpx_google_polyline(trk,numLevels,zoomFactor,epsilon,forceEndpoints):
import rdp
zoomLevelBreaks = []
for i in range (0,numLevels):
zoomLevelBreaks.append(epsilon*pow(zoomFactor, numLevels-i-1));
word_segments = 6;
for seg in trk:
if len(seg) == 0:
continue;
# Perform the RDP magic on the data
if (epsilon > 0):
seg = rdp.rdp(seg, epsilon);
segment_polyline = "";
segment_point = 0;
# Encode the points
for p in seg:
# Get the first point full value,
# then the difference between points
if (segment_point == 0):
lat = (int)(1e5 * p[var_lat]);
lon = (int)(1e5 * p[var_lon]);
else:
lat = (int)(1e5 * (p[var_lat]) - (int)(1e5 * seg[segment_point-1][var_lat]));
lon = (int)(1e5 * (p[var_lon]) - (int)(1e5 * seg[segment_point-1][var_lon]));
segment_point += 1;
segment_polyline += ''.join(polyline_encode_point(lat));
segment_polyline += ''.join(polyline_encode_point(lon));
print ("polyline: %s\n" % segment_polyline)
# Encode the levels
segment_levels = "";
segment_point = 0;
for p in seg:
distance = rdp.point_line_distance(p, seg[0], seg[-1]);
if ((forceEndpoints == 1) and (segment_point == 0 or segment_point == len(seg))):
level = numLevels - 1;
else:
level = numLevels - find_zoom_level(distance, zoomLevelBreaks) - 1;
segment_levels += ''.join(polyline_encode_level(level));
segment_point += 1;
print ("levels: %s\n" % segment_levels)
print ("\n");
return segment_polyline
def main():
metric=True
xvar=var_dist
action='printtable'
yvar=var_alt
imagefile=None
tzname=None
npoints=None
# polyline encoder default values
numLevels = 18;
zoomFactor = 2;
epsilon = 0.0;
forceEndpoints = True;
# filter parameters
filter_window=None
output_file_name=None
def print_see_usage():
print ('see usage: ' + basename(sys.argv[0]) + ' --help')
try: opts,args=getopt.getopt(sys.argv[1:],'hgEx:y:o:t:n:e:z:L:f:s:',
['help','gprint','google','table','polyline','output-file='])
except Exception as e:
print ("Exception: %s" % e)
print_see_usage()
sys.exit(EXIT_EOPTION)
for o, a in opts:
if o in ['-h','--help']:
print ("%s" % __doc__)
sys.exit(0)
if o == '-E':
metric=False
if o == '-g':
action='gnuplot'
if o == '--gprint':
action='printgnuplot'
if o == '--google':
action='googlechart'
if o == '--table':
action='printtable'
if o == '--polyline':
action='polyline'
if o == '-x':
if var_names.has_key(a):
xvar=var_names[a]
else:
print ("unknown x variable")
print_see_usage()
sys.exit(EXIT_EOPTION)
if o == '-y':
if var_names.has_key(a):
yvar=var_names[a]
else:
print ("unknown y variable")
print_see_usage()
sys.exit(EXIT_EOPTION)
if o == '-o':
imagefile=a
if o == '-t':
if not globals().has_key('pytz'):
print ("pytz module is required to change timezone")
sys.exit(EXIT_EDEPENDENCY)
tzname=a
if o == '-n':
npoints=int(a)
if o == '-f':
filter_window=int(a)
if o in ('-s','--output-file'):
output_file_name=a
if o == '-e':
epsilon=float(a)
if o == '-z':
zoomFactor=float(a)
if o == '-L':
numLevels=int(a)
if len(args) > 1:
print ("only one GPX file should be specified")
print_see_usage()
sys.exit(EXIT_EOPTION)
elif len(args) == 0:
print ("please provide a GPX file to process.")
print_see_usage()
sys.exit(EXIT_EOPTION)
input_file_name=args[0]
trk=read_gpx_trk(input_file_name,tzname,npoints,filter_window,output_file_name)
if action == 'gnuplot':
plot_in_gnuplot(trk,x=xvar,y=yvar,metric=metric,savefig=imagefile)
elif action == 'printgnuplot':
print_gnuplot_script(trk,x=xvar,y=yvar,metric=metric,savefig=imagefile)
elif action == 'printtable':
print_gpx_trk(trk,metric=metric)
elif action == 'googlechart':
print ("%s" % google_chart_url(trk,x=xvar,y=yvar,metric=metric))
elif action == 'polyline':
print_gpx_google_polyline(trk,numLevels,zoomFactor,epsilon,forceEndpoints)
if __name__ == '__main__':
main()
| gpl-3.0 |
johnjohndoe/spendb | spendb/model/run.py | 5 | 1968 | from datetime import datetime
from sqlalchemy.orm import relationship, backref
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, Unicode, DateTime
from spendb.core import db, url_for
from spendb.model.dataset import Dataset
class Run(db.Model):
""" A run is a generic grouping object for background operations
that perform logging to the frontend. """
__tablename__ = 'run'
# Status values
STATUS_RUNNING = 'running'
STATUS_COMPLETE = 'complete'
STATUS_FAILED = 'failed'
id = Column(Integer, primary_key=True)
operation = Column(Unicode())
status = Column(Unicode())
source = Column(Unicode())
time_start = Column(DateTime, default=datetime.utcnow)
time_end = Column(DateTime)
dataset_id = Column(Integer, ForeignKey('dataset.id'), nullable=True)
dataset = relationship(Dataset,
backref=backref('runs',
order_by='Run.time_start.desc()',
lazy='dynamic'))
def __init__(self, operation, status, dataset):
self.operation = operation
self.status = status
self.dataset = dataset
def to_dict(self):
return {
'id': self.id,
'api_url': url_for('runs_api.view', dataset=self.dataset.name,
id=self.id),
'operation': self.operation,
'status': self.status,
'source': self.source,
'time_start': self.time_start,
'time_end': self.time_end
}
@classmethod
def all(cls, dataset):
q = db.session.query(cls).filter_by(dataset=dataset)
return q.order_by(cls.time_start.asc())
@classmethod
def by_id(cls, dataset, id):
return cls.all(dataset).filter_by(id=id).first()
def __repr__(self):
return "<Run(%r, %r, %r)>" % (self.source, self.id, self.status)
| agpl-3.0 |
SergiuMir/python-phonenumbers | python/phonenumbers/data/region_SH.py | 10 | 1452 | """Auto-generated file, do not edit by hand. SH metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SH = PhoneMetadata(id='SH', country_code=290, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[256]\\d{4}', possible_number_pattern='\\d{4,5}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:[0-57-9]\\d|6[4-9])\\d{2}', possible_number_pattern='\\d{5}', example_number='22158'),
mobile=PhoneNumberDesc(national_number_pattern='[56]\\d{4}', possible_number_pattern='\\d{5}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='262\\d{2}', possible_number_pattern='\\d{5}'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
main_country_for_code=True)
| apache-2.0 |
sumifit/hunters | node_modules/alertify.js/node_modules/snyk/node_modules/snyk-tree/travis_after_all.py | 93 | 3418 | import os
import json
import time
import logging
try:
import urllib.request as urllib2
except ImportError:
import urllib2
log = logging.getLogger("travis.leader")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
TRAVIS_JOB_NUMBER = 'TRAVIS_JOB_NUMBER'
TRAVIS_BUILD_ID = 'TRAVIS_BUILD_ID'
POLLING_INTERVAL = 'LEADER_POLLING_INTERVAL'
build_id = os.getenv(TRAVIS_BUILD_ID)
polling_interval = int(os.getenv(POLLING_INTERVAL, '5'))
#assume, first job is the leader
is_leader = lambda job_number: job_number.endswith('.1')
if not os.getenv(TRAVIS_JOB_NUMBER):
# seems even for builds with only one job, this won't get here
log.fatal("Don't use defining leader for build without matrix")
exit(1)
elif is_leader(os.getenv(TRAVIS_JOB_NUMBER)):
log.info("This is a leader")
else:
#since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_MINION=YES")
log.info("This is a minion")
exit(0)
class MatrixElement(object):
def __init__(self, json_raw):
self.is_finished = json_raw['finished_at'] is not None
self.is_succeeded = json_raw['result'] == 0
self.number = json_raw['number']
self.is_leader = is_leader(self.number)
def matrix_snapshot():
"""
:return: Matrix List
"""
response = urllib2.build_opener().open("https://api.travis-ci.org/builds/{0}".format(build_id)).read()
raw_json = json.loads(response)
matrix_without_leader = [MatrixElement(element) for element in raw_json["matrix"]]
return matrix_without_leader
def wait_others_to_finish():
def others_finished():
"""
Dumps others to finish
Leader cannot finish, it is working now
:return: tuple(True or False, List of not finished jobs)
"""
snapshot = matrix_snapshot()
finished = [el.is_finished for el in snapshot if not el.is_leader]
return reduce(lambda a, b: a and b, finished), [el.number for el in snapshot if
not el.is_leader and not el.is_finished]
while True:
finished, waiting_list = others_finished()
if finished: break
log.info("Leader waits for minions {0}...".format(waiting_list)) # just in case do not get "silence timeout"
time.sleep(polling_interval)
try:
wait_others_to_finish()
final_snapshot = matrix_snapshot()
log.info("Final Results: {0}".format([(e.number, e.is_succeeded) for e in final_snapshot]))
BUILD_AGGREGATE_STATUS = 'BUILD_AGGREGATE_STATUS'
others_snapshot = [el for el in final_snapshot if not el.is_leader]
if reduce(lambda a, b: a and b, [e.is_succeeded for e in others_snapshot]):
os.environ[BUILD_AGGREGATE_STATUS] = "others_succeeded"
elif reduce(lambda a, b: a and b, [not e.is_succeeded for e in others_snapshot]):
log.error("Others Failed")
os.environ[BUILD_AGGREGATE_STATUS] = "others_failed"
else:
log.warn("Others Unknown")
os.environ[BUILD_AGGREGATE_STATUS] = "unknown"
#since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_LEADER=YES {0}={1}".format(BUILD_AGGREGATE_STATUS, os.environ[BUILD_AGGREGATE_STATUS]))
except Exception as e:
log.fatal(e) | apache-2.0 |
mhaessig/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_memorizingfile.py | 496 | 4252 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for memorizingfile module."""
import StringIO
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import memorizingfile
class UtilTest(unittest.TestCase):
"""A unittest for memorizingfile module."""
def check(self, memorizing_file, num_read, expected_list):
for unused in range(num_read):
memorizing_file.readline()
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
self.assertEqual(expected, actual)
def check_with_size(self, memorizing_file, read_size, expected_list):
read_list = []
read_line = ''
while True:
line = memorizing_file.readline(read_size)
line_length = len(line)
self.assertTrue(line_length <= read_size)
if line_length == 0:
if read_line != '':
read_list.append(read_line)
break
read_line += line
if line[line_length - 1] == '\n':
read_list.append(read_line)
read_line = ''
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
self.assertEqual(len(expected_list), len(read_list))
for expected, actual, read in zip(expected_list, actual_list,
read_list):
self.assertEqual(expected, actual)
self.assertEqual(expected, read)
def test_get_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'))
self.check(memorizing_file, 3, ['Hello\n', 'World\n', 'Welcome'])
def test_get_memorized_lines_limit_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'), 2)
self.check(memorizing_file, 3, ['Hello\n', 'World\n'])
def test_get_memorized_lines_empty_file(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
''))
self.check(memorizing_file, 10, [])
def test_get_memorized_lines_with_size(self):
for size in range(1, 10):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'))
self.check_with_size(memorizing_file, size,
['Hello\n', 'World\n', 'Welcome'])
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
ericlink/adms-server | playframework-dist/1.1-src/python/Lib/cgi.py | 2 | 36075 | #! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# XXX Perhaps there should be a slimmed version that doesn't contain
# all those backwards compatible and debugging classes and functions?
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from operator import attrgetter
import sys
import os
import urllib
import mimetools
import rfc822
import UserDict
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
"SvFormContentDict", "InterpFormContentDict", "FormContent",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
URL encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError, 'Maximum content length exceeded'
qs = fp.read(clength)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
dict = {}
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
if name in dict:
dict[name].append(value)
else:
dict[name] = [value]
return dict
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
Arguments:
qs: URL-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
Returns a list, as G-d intended.
"""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError, "bad query field: %r" % (name_value,)
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = urllib.unquote(nv[0].replace('+', ' '))
value = urllib.unquote(nv[1].replace('+', ' '))
r.append((name, value))
return r
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError, ('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = mimetools.Message(fp)
clength = headers.getheader('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError, 'Maximum content length exceeded'
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line[:2] == "--":
terminator = line.strip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
plist = [x.strip() for x in line.split(';')]
key = plist.pop(0).lower()
pdict = {}
for p in plist:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
file: the file(-like) object from which you can read the data;
None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes rfc822.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary="",
environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin
(not used when the request method is GET)
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
URL encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
fp = StringIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
self.fp = fp or sys.stdin
self.headers = headers
self.outerboundary = outerboundary
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
self.innerboundary = ""
if 'boundary' in pdict:
self.innerboundary = pdict['boundary']
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError, 'Maximum content length exceeded'
self.length = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError, name
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError, "not indexable"
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError, key
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if type(value) is type([]):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError, "not indexable"
keys = []
for item in self.list:
if item.name not in keys: keys.append(item.name)
return keys
def has_key(self, key):
"""Dictionary style has_key() method."""
if self.list is None:
raise TypeError, "not indexable"
for item in self.list:
if item.name == key: return True
return False
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError, "not indexable"
for item in self.list:
if item.name == key: return True
return False
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
self.list = list = []
for key, value in parse_qsl(qs, self.keep_blank_values,
self.strict_parsing):
list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
self.list = []
klass = self.FieldStorageClass or self.__class__
part = klass(self.fp, {}, ib,
environ, keep_blank_values, strict_parsing)
# Throw first part away
while not part.done:
headers = rfc822.Message(self.fp)
part = klass(self.fp, headers, ib,
environ, keep_blank_values, strict_parsing)
self.list.append(part)
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file('b')
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize))
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
self.file = self.__file = StringIO()
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file('')
self.file.write(self.__file.getvalue())
self.__file = None
self.file.write(line)
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary."""
next = "--" + self.outerboundary
last = next + "--"
delim = ""
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
odelim = delim
if line[-2:] == "\r\n":
delim = "\r\n"
line = line[:-2]
last_line_lfend = True
elif line[-1] == "\n":
delim = "\n"
line = line[:-1]
last_line_lfend = True
else:
delim = ""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next = "--" + self.outerboundary
last = next + "--"
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
last_line_lfend = line.endswith('\n')
def make_file(self, binary=None):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The 'binary' argument is unused -- the file is always opened
in binary mode.
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
import tempfile
return tempfile.TemporaryFile("w+b")
# Backwards Compatibility Classes
# ===============================
class FormContentDict(UserDict.UserDict):
"""Form content as dictionary with a list of values per field.
form = FormContentDict()
form[key] -> [value, value, ...]
key in form -> Boolean
form.keys() -> [key, key, ...]
form.values() -> [[val, val, ...], [val, val, ...], ...]
form.items() -> [(key, [val, val, ...]), (key, [val, val, ...]), ...]
form.dict == {key: [val, val, ...], ...}
"""
def __init__(self, environ=os.environ):
self.dict = self.data = parse(environ=environ)
self.query_string = environ['QUERY_STRING']
class SvFormContentDict(FormContentDict):
"""Form content as dictionary expecting a single value per field.
If you only expect a single value for each field, then form[key]
will return that single value. It will raise an IndexError if
that expectation is not true. If you expect a field to have
possible multiple values, than you can use form.getlist(key) to
get all of the values. values() and items() are a compromise:
they return single strings where there is a single value, and
lists of strings otherwise.
"""
def __getitem__(self, key):
if len(self.dict[key]) > 1:
raise IndexError, 'expecting a single value'
return self.dict[key][0]
def getlist(self, key):
return self.dict[key]
def values(self):
result = []
for value in self.dict.values():
if len(value) == 1:
result.append(value[0])
else: result.append(value)
return result
def items(self):
result = []
for key, value in self.dict.items():
if len(value) == 1:
result.append((key, value[0]))
else: result.append((key, value))
return result
class InterpFormContentDict(SvFormContentDict):
"""This class is present for backwards compatibility only."""
def __getitem__(self, key):
v = SvFormContentDict.__getitem__(self, key)
if v[0] in '0123456789+-.':
try: return int(v)
except ValueError:
try: return float(v)
except ValueError: pass
return v.strip()
def values(self):
result = []
for key in self.keys():
try:
result.append(self[key])
except IndexError:
result.append(self.dict[key])
return result
def items(self):
result = []
for key in self.keys():
try:
result.append((key, self[key]))
except IndexError:
result.append((key, self.dict[key]))
return result
class FormContent(FormContentDict):
"""This class is present for backwards compatibility only."""
def values(self, key):
if key in self.dict :return self.dict[key]
else: return None
def indexed_value(self, key, location):
if key in self.dict:
if len(self.dict[key]) > location:
return self.dict[key][location]
else: return None
else: return None
def value(self, key):
if key in self.dict: return self.dict[key][0]
else: return None
def length(self, key):
return len(self.dict[key])
def stripped(self, key):
if key in self.dict: return self.dict[key][0].strip()
else: return None
def pars(self):
return self.dict
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print "Content-type: text/html"
print
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec "testing print_exception() -- <I>italics?</I>"
def g(f=f):
f()
print "<H3>What follows is a test, not an actual exception:</H3>"
g()
except:
print_exception()
print "<H1>Second try with a small maxlen...</H1>"
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print
print "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print "<PRE>%s<B>%s</B></PRE>" % (
escape("".join(list[:-1])),
escape(list[-1]),
)
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = environ.keys()
keys.sort()
print
print "<H3>Shell Environment:</H3>"
print "<DL>"
for key in keys:
print "<DT>", escape(key), "<DD>", escape(environ[key])
print "</DL>"
print
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = form.keys()
keys.sort()
print
print "<H3>Form Contents:</H3>"
if not keys:
print "<P>No form fields."
print "<DL>"
for key in keys:
print "<DT>" + escape(key) + ":",
value = form[key]
print "<i>" + escape(repr(type(value))) + "</i>"
print "<DD>" + escape(repr(value))
print "</DL>"
print
def print_directory():
"""Dump the current directory as HTML."""
print
print "<H3>Current Working Directory:</H3>"
try:
pwd = os.getcwd()
except os.error, msg:
print "os.error:", escape(str(msg))
else:
print escape(pwd)
print
def print_arguments():
print
print "<H3>Command Line Arguments:</H3>"
print
print sys.argv
print
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print """
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
"""
# Utilities
# =========
def escape(s, quote=None):
'''Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.'''
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
import re
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| mit |
shubhdev/edx-platform | common/lib/xmodule/xmodule/open_ended_grading_classes/controller_query_service.py | 113 | 6432 | import dogstats_wrapper as dog_stats_api
import logging
from .grading_service_module import GradingService
log = logging.getLogger(__name__)
class ControllerQueryService(GradingService):
"""
Interface to controller query backend.
"""
METRIC_NAME = 'edxapp.open_ended_grading.controller_query_service'
def __init__(self, config, render_template):
config['render_template'] = render_template
super(ControllerQueryService, self).__init__(config)
self.url = config['url'] + config['grading_controller']
self.login_url = self.url + '/login/'
self.check_eta_url = self.url + '/get_submission_eta/'
self.combined_notifications_url = self.url + '/combined_notifications/'
self.grading_status_list_url = self.url + '/get_grading_status_list/'
self.flagged_problem_list_url = self.url + '/get_flagged_problem_list/'
self.take_action_on_flags_url = self.url + '/take_action_on_flags/'
def check_for_eta(self, location):
params = {
'location': location,
}
data = self.get(self.check_eta_url, params)
self._record_result('check_for_eta', data)
dog_stats_api.histogram(self._metric_name('check_for_eta.eta'), data.get('eta', 0))
return data
def check_combined_notifications(self, course_id, student_id, user_is_staff, last_time_viewed):
params = {
'student_id': student_id,
'course_id': course_id.to_deprecated_string(),
'user_is_staff': user_is_staff,
'last_time_viewed': last_time_viewed,
}
log.debug(self.combined_notifications_url)
data = self.get(self.combined_notifications_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'user_is_staff:{}'.format(user_is_staff)]
tags.extend(
u'{}:{}'.format(key, value)
for key, value in data.items()
if key not in ('success', 'version', 'error')
)
self._record_result('check_combined_notifications', data, tags)
return data
def get_grading_status_list(self, course_id, student_id):
params = {
'student_id': student_id,
'course_id': course_id.to_deprecated_string(),
}
data = self.get(self.grading_status_list_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
self._record_result('get_grading_status_list', data, tags)
dog_stats_api.histogram(
self._metric_name('get_grading_status_list.length'),
len(data.get('problem_list', [])),
tags=tags
)
return data
def get_flagged_problem_list(self, course_id):
params = {
'course_id': course_id.to_deprecated_string(),
}
data = self.get(self.flagged_problem_list_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string())]
self._record_result('get_flagged_problem_list', data, tags)
dog_stats_api.histogram(
self._metric_name('get_flagged_problem_list.length'),
len(data.get('flagged_submissions', []))
)
return data
def take_action_on_flags(self, course_id, student_id, submission_id, action_type):
params = {
'course_id': course_id.to_deprecated_string(),
'student_id': student_id,
'submission_id': submission_id,
'action_type': action_type
}
data = self.post(self.take_action_on_flags_url, params)
tags = [u'course_id:{}'.format(course_id.to_deprecated_string()), u'action_type:{}'.format(action_type)]
self._record_result('take_action_on_flags', data, tags)
return data
class MockControllerQueryService(object):
"""
Mock controller query service for testing
"""
def __init__(self, config, render_template):
pass
def check_for_eta(self, *args, **kwargs):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def check_combined_notifications(self, *args, **kwargs):
combined_notifications = {
"flagged_submissions_exist": False,
"version": 1,
"new_student_grading_to_view": False,
"success": True,
"staff_needs_to_grade": False,
"student_needs_to_peer_grade": True,
"overall_need_to_check": True
}
return combined_notifications
def get_grading_status_list(self, *args, **kwargs):
grading_status_list = {
"version": 1,
"problem_list": [
{
"problem_name": "Science Question -- Machine Assessed",
"grader_type": "NA",
"eta_available": True,
"state": "Waiting to be Graded",
"eta": 259200,
"location": "i4x://MITx/oe101x/combinedopenended/Science_SA_ML"
}, {
"problem_name": "Humanities Question -- Peer Assessed",
"grader_type": "NA",
"eta_available": True,
"state": "Waiting to be Graded",
"eta": 259200,
"location": "i4x://MITx/oe101x/combinedopenended/Humanities_SA_Peer"
}
],
"success": True
}
return grading_status_list
def get_flagged_problem_list(self, *args, **kwargs):
flagged_problem_list = {
"version": 1,
"success": False,
"error": "No flagged submissions exist for course: MITx/oe101x/2012_Fall"
}
return flagged_problem_list
def take_action_on_flags(self, *args, **kwargs):
"""
Mock later if needed. Stub function for now.
@param params:
@return:
"""
pass
def convert_seconds_to_human_readable(seconds):
if seconds < 60:
human_string = "{0} seconds".format(seconds)
elif seconds < 60 * 60:
human_string = "{0} minutes".format(round(seconds / 60, 1))
elif seconds < (24 * 60 * 60):
human_string = "{0} hours".format(round(seconds / (60 * 60), 1))
else:
human_string = "{0} days".format(round(seconds / (60 * 60 * 24), 1))
return human_string
| agpl-3.0 |
nowls/gnuradio | gr-digital/python/digital/utils/tagged_streams.py | 47 | 4622 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# DEPRECATED -- Marked for removal in 3.8
from gnuradio import gr
import pmt
def make_lengthtags(lengths, offsets, tagname='length', vlen=1):
tags = []
assert(len(offsets) == len(lengths))
for offset, length in zip(offsets, lengths):
tag = gr.tag_t()
tag.offset = offset/vlen
tag.key = pmt.string_to_symbol(tagname)
tag.value = pmt.from_long(length/vlen)
tags.append(tag)
return tags
def string_to_vector(string):
v = []
for s in string:
v.append(ord(s))
return v
def strings_to_vectors(strings, lengthtagname):
vs = [string_to_vector(string) for string in strings]
return packets_to_vectors(vs, lengthtagname)
def vector_to_string(v):
s = []
for d in v:
s.append(chr(d))
return ''.join(s)
def vectors_to_strings(data, tags, lengthtagname):
packets = vectors_to_packets(data, tags, lengthtagname)
return [vector_to_string(packet) for packet in packets]
def count_bursts(data, tags, lengthtagname, vlen=1):
lengthtags = [t for t in tags
if pmt.symbol_to_string(t.key) == lengthtagname]
lengths = {}
for tag in lengthtags:
if tag.offset in lengths:
raise ValueError(
"More than one tags with key {0} with the same offset={1}."
.format(lengthtagname, tag.offset))
lengths[tag.offset] = pmt.to_long(tag.value)*vlen
in_burst = False
in_packet = False
packet_length = None
packet_pos = None
burst_count = 0
for pos in range(len(data)):
if pos in lengths:
if in_packet:
print("Got tag at pos {0} current packet_pos is {1}".format(pos, packet_pos))
raise StandardError("Received packet tag while in packet.")
packet_pos = -1
packet_length = lengths[pos]
in_packet = True
if not in_burst:
burst_count += 1
in_burst = True
elif not in_packet:
in_burst = False
if in_packet:
packet_pos += 1
if packet_pos == packet_length-1:
in_packet = False
packet_pos = None
return burst_count
def vectors_to_packets(data, tags, lengthtagname, vlen=1):
lengthtags = [t for t in tags
if pmt.symbol_to_string(t.key) == lengthtagname]
lengths = {}
for tag in lengthtags:
if tag.offset in lengths:
raise ValueError(
"More than one tags with key {0} with the same offset={1}."
.format(lengthtagname, tag.offset))
lengths[tag.offset] = pmt.to_long(tag.value)*vlen
if 0 not in lengths:
raise ValueError("There is no tag with key {0} and an offset of 0"
.format(lengthtagname))
pos = 0
packets = []
while pos < len(data):
if pos not in lengths:
raise ValueError("There is no tag with key {0} and an offset of {1}."
"We were expecting one."
.format(lengthtagname, pos))
length = lengths[pos]
if length == 0:
raise ValueError("Packets cannot have zero length.")
if pos+length > len(data):
raise ValueError("The final packet is incomplete.")
packets.append(data[pos: pos+length])
pos += length
return packets
def packets_to_vectors(packets, lengthtagname, vlen=1):
tags = []
data = []
offset = 0
for packet in packets:
data.extend(packet)
tag = gr.tag_t()
tag.offset = offset/vlen
tag.key = pmt.string_to_symbol(lengthtagname)
tag.value = pmt.from_long(len(packet)/vlen)
tags.append(tag)
offset = offset + len(packet)
return data, tags
| gpl-3.0 |
reversefold/jevent | examples/gboth.py | 1 | 1811 | import logging
import sys
import time
log = logging.getLogger(__name__)
def run_client():
from examples import gclient as client
client.main()
def run_server():
from examples import gserver as server
server.main()
def main_greenlets():
from greenlet import greenlet, GreenletExit
s = greenlet(run_server)
c = greenlet(run_client)
s.switch()
log.info("Server started and switched back")
c.switch()
log.info("Client started and switched back")
while not s.dead:
if c.dead:
log.info("Client dead, exiting")
s.throw(GreenletExit())
break
c.switch()
s.switch()
def main_threads():
import threading
s = threading.Thread(target=run_server)
c = threading.Thread(target=run_client)
s.start()
log.info("Server started")
# time.sleep(1)
c.start()
log.info("Client started")
c.join()
log.info("Client done")
# import time
# time.sleep(1)
# log.info("Sleep done")
# import ctypes
# for tid, tobj in threading._active.items():
# if tobj is s:
# break
# ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.pyobject(Exception))
from examples import gserver as server
from jevent import ioloop
ioloop._go = False
s.go = False
s.join()
log.info("Server done")
if __name__ == '__main__':
#loggingFormat = '%(asctime)s,%(msecs)03d %(levelname)-5.5s [%(processName)s-%(thread)d-%(threadName)s] [%(name)s] %(message)s (line %(lineno)d %(funcName)s)'
loggingFormat = '%(asctime)s,%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s (line %(lineno)d %(funcName)s)'
logging.basicConfig(level=logging.ERROR, format=loggingFormat, datefmt='%Y-%m-%d %H:%M:%S')
# main_greenlets()
main_threads()
| mit |
crateio/carrier | carrier/config/__init__.py | 1 | 5489 | import errno
import imp
import importlib
import os
import six
class Config(dict):
"""
Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
execfile(filename, d.__dict__)
except IOError, e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, six.string_types):
modname, objname = obj.rsplit(".", 1)
mod = importlib.import_module(modname)
obj = getattr(mod, objname)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| bsd-2-clause |
rshorey/moxie | moxie/cores/alert.py | 4 | 2231 | # Copyright (c) Paul R. Tagliamonte <tag@pault.ag>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import asyncio
from aiocore import Service
class AlertService(Service):
identifier = "moxie.cores.alert.AlertService"
def __init__(self):
self.callbacks = []
super(AlertService, self).__init__()
@asyncio.coroutine
def starting(self, job):
yield from self._emit("starting", job=job)
@asyncio.coroutine
def running(self, job):
yield from self._emit("running", job=job)
@asyncio.coroutine
def success(self, job, result):
yield from self._emit("success", job=job, result=result)
@asyncio.coroutine
def failure(self, job, result):
yield from self._emit("failure", job=job, result=result)
@asyncio.coroutine
def error(self, job, result):
yield from self._emit("error", job=job, result=result)
def register(self, callback):
self.callbacks.append(callback)
@asyncio.coroutine
def _emit(self, flavor, **kwargs):
kwargs['type'] = flavor
for handler in self.callbacks:
asyncio.async(handler(kwargs))
@asyncio.coroutine
def __call__(self):
pass
| mit |
yatinkumbhare/openstack-nova | nova/tests/unit/scheduler/test_host_filters.py | 67 | 1522 | # Copyright 2011 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
from nova.scheduler import filters
from nova.scheduler.filters import all_hosts_filter
from nova.scheduler.filters import compute_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class HostFiltersTestCase(test.NoDBTestCase):
def test_filter_handler(self):
# Double check at least a couple of known filters exist
filter_handler = filters.HostFilterHandler()
classes = filter_handler.get_matching_classes(
['nova.scheduler.filters.all_filters'])
self.assertIn(all_hosts_filter.AllHostsFilter, classes)
self.assertIn(compute_filter.ComputeFilter, classes)
def test_all_host_filter(self):
filt_cls = all_hosts_filter.AllHostsFilter()
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
| apache-2.0 |
Metronus/metronus | Metronus-Project/metronus_app/test/testDepartment.py | 1 | 13673 | from metronus_app.model.role import Role
from metronus_app.model.company import Company
from metronus_app.model.employee import Employee
from metronus_app.model.project import Project
from metronus_app.model.department import Department
from django.contrib.auth.models import User
from metronus_app.model.administrator import Administrator
from metronus_app.model.projectDepartment import ProjectDepartment
from metronus_app.model.projectDepartmentEmployeeRole import ProjectDepartmentEmployeeRole
from django.test import TestCase, Client
from django.urls import reverse
import json
class DepartmentTestCase(TestCase):
"""This class provides a test case for department management"""
@classmethod
def setUpTestData(cls):
"""
Loads the data to the database for tests to be done
"""
company1 = Company.objects.create(
cif="123",
company_name="company1",
short_name="mplp",
email="company1@gmail.com",
phone="123456789"
)
company2 = Company.objects.create(
cif="456",
company_name="company2",
short_name="lmao",
email="company2@gmail.com",
phone="1987654321"
)
admin_user = User.objects.create_user(
username="admin1",
password="123456",
email="admin1@metronus.es",
first_name="Pepito",
last_name="Pérez"
)
# Admin
Administrator.objects.create(
user=admin_user,
user_type="A",
identifier="adm01",
phone="666555444",
company_id=company1
)
employee1_user = User.objects.create_user(
username="emp1",
password="123456",
email="emp1@metronus.es",
first_name="Álvaro",
last_name="Varo"
)
employee2_user = User.objects.create_user(
username="emp2",
password="123456",
email="emp2@metronus.es",
first_name="Alberto",
last_name="Berto"
)
# Employee 1
employee1 = Employee.objects.create(
user=employee1_user,
user_type="E",
identifier="emp01",
phone="666555444",
company_id=company1
)
# Employee 2
Employee.objects.create(
user=employee2_user,
user_type="E",
identifier="emp02",
phone="666555444",
company_id=company2
)
dep1 = Department.objects.create(name="dep1", active=True, company_id=company1)
Department.objects.create(name="dep2", active=True, company_id=company1)
Department.objects.create(name="dep3", active=False, company_id=company1)
Department.objects.create(name="dep3", active=True, company_id=company2)
proj1 = Project.objects.create(name="TestProject", deleted=False, company_id=company1)
Project.objects.create(name="TestProject2", deleted=False, company_id=company1)
pd1 = ProjectDepartment.objects.create(
project_id=proj1,
department_id=dep1)
ProjectDepartmentEmployeeRole.objects.create(
projectDepartment_id=pd1,
employee_id=employee1,
role_id=Role.objects.create(name="Project manager", tier=40))
def test_create_department_positive(self):
""" Logged in as an administrator, try to create an department"""
c = Client()
c.login(username="admin1", password="123456")
logs_before = Department.objects.all().count()
response = c.post("/department/create", {
"department_id": "0",
"name": "dep4",
})
self.assertEquals(response.status_code, 302)
# Check that the department has been successfully created
dep = Department.objects.all().last()
self.assertEquals(dep.name, "dep4")
self.assertEquals(dep.company_id, Administrator.objects.get(identifier="adm01").company_id)
self.assertEquals(dep.active, True)
logs_after = Department.objects.all().count()
self.assertEquals(logs_before + 1, logs_after)
def test_create_async_department_positive(self):
""" Logged in as an administrator, try to create an department"""
c = Client()
c.login(username="admin1", password="123456")
logs_before = Department.objects.all().count()
response = c.post(reverse("department_create_async"), {
"department_id": "0",
"name": "dep4",
})
self.assertEquals(response.status_code, 200)
# Check that the department has been successfully created
dep = Department.objects.all().last()
self.assertEquals(dep.name, "dep4")
self.assertEquals(dep.company_id, Administrator.objects.get(identifier="adm01").company_id)
self.assertEquals(dep.active, True)
# response in bytes must be decode to string
data = response.content.decode("utf-8")
# string to dict
data = json.loads(data)
self.assertEquals(data["repeated_name"], False)
self.assertEquals(data["success"], True)
logs_after = Department.objects.all().count()
self.assertEquals(logs_before + 1, logs_after)
def test_create_department_duplicate_async(self):
""" Logged in as an administrator, try to create an department with the name of an existing company"""
c = Client()
c.login(username="admin1", password="123456")
# ??????????????????? Again
# logs_before = Department.objects.all().count()
response = c.post(reverse("department_create_async"), {
"department_id": "0",
"name": "dep1",
})
self.assertEquals(response.status_code, 200)
# response in bytes must be decode to string
data = response.content.decode("utf-8")
# string to dict
data = json.loads(data)
self.assertEquals(data["repeated_name"], True)
self.assertEquals(data["success"], False)
def test_create_department_duplicate(self):
""" Logged in as an administrator, try to create an department with the name of an existing company"""
c = Client()
c.login(username="admin1", password="123456")
# ??????????????????? Again
# logs_before = Department.objects.all().count()
response = c.post("/department/create", {
"department_id": "0",
"name": "dep1",
})
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context["repeated_name"], True)
def test_create_department_not_logged(self):
""" Without authentication, try to create an department """
c = Client()
response = c.get("/department/create")
self.assertEquals(response.status_code, 403)
def test_create_department_not_allowed(self):
""" Without proper roles, try to create an department """
c = Client()
c.login(username="emp2", password="123456")
response = c.get("/department/create")
self.assertEquals(response.status_code, 403)
def test_list_departments_positive(self):
"""As an admin, try to list the departments """
c = Client()
c.login(username="admin1", password="123456")
response = c.get("/department/list")
self.assertEquals(response.status_code, 200)
self.assertEquals(len(response.context["departments"]), 2)
self.assertEquals(response.context["departments"][0].name, "dep1")
def test_list_departments_positive_search(self):
"""As an admin, search the departments """
c = Client()
c.login(username="admin1", password="123456")
response = c.get(reverse("department_search",args=("p2",)))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context["departments"][0].name, "dep2")
def test_list_departments_positive_2(self):
"""As an employee with proper roles, try to list the departments """
c = Client()
c.login(username="emp1", password="123456")
response = c.get("/department/list")
self.assertEquals(response.status_code, 200)
self.assertEquals(len(response.context["departments"]), 1)
self.assertEquals(response.context["departments"][0].name, "dep1")
def test_view_department_positive(self):
"""As an admin, try to view a department """
c = Client()
c.login(username="admin1", password="123456")
response = c.get("/department/list")
department=response.context["departments"][0]
dep_id = department.id
response = c.get("/department/view/"+str(dep_id)+"/")
self.assertEquals(response.status_code, 200)
self.assertEquals(len(response.context["employees"]),
Employee.objects.filter(user__is_active=True,
projectdepartmentemployeerole__projectDepartment_id__department_id=department,
projectdepartmentemployeerole__role_id__tier__lte=40).distinct().count())
self.assertEquals(len(response.context["tasks"]), 0)
# self.assertEquals(response.context["employees"][0].department.id, dep_id)
self.assertTrue(response.context["coordinators"] is not None)
def test_view_department_not_allowed(self):
"""Without proper roles, try to view a department """
c = Client()
c.login(username="emp2", password="123456")
dep_id = Department.objects.all().first()
response = c.get("/department/view/"+str(dep_id.id)+"/")
self.assertEquals(response.status_code, 403)
def test_list_departments_not_logged(self):
"""Without authentication, try to list the departments """
c = Client()
response = c.get("/department/list")
self.assertEquals(response.status_code, 403)
def test_list_departments_not_allowed(self):
"""Without proper roles, try to list the departments """
c = Client()
c.login(username="emp2", password="123456")
response = c.get("/department/list")
self.assertEquals(response.status_code, 403)
def test_edit_department_get(self):
"""As an admin, try to get the edit department form """
c = Client()
c.login(username="admin1", password="123456")
response = c.get("/department/list")
dep_id = response.context["departments"][0].id
response = c.get("/department/edit/"+str(dep_id)+"/")
self.assertEquals(response.status_code, 200)
form = response.context["form"]
self.assertEquals(form.initial["name"], "dep1")
self.assertEquals(form.initial["department_id"], dep_id)
def test_edit_department_404(self):
"""As an admin, try to edit an inexistent department"""
c = Client()
c.login(username="admin1", password="123456")
response = c.get("/department/edit?department_id=9000")
self.assertEquals(response.status_code, 404)
def test_edit_department_positive(self):
"""
Logged in as an administrator, try to edit a deapartment
"""
c = Client()
c.login(username="admin1", password="123456")
pro=Department.objects.get(name="dep1")
response = c.post("/department/edit/"+str(pro.id)+"/", {
"department_id": pro.id,
"name": "Metronosa"
})
self.assertEquals(response.status_code, 302)
pro_up=Department.objects.get(pk=pro.id)
self.assertEquals(pro_up.name, "Metronosa")
def test_delete_department_positive(self):
"""As an admin, try to delete a department"""
c = Client()
c.login(username="admin1", password="123456")
response = c.get("/department/list")
dep_id = response.context["departments"][0].id
response = c.get("/department/delete/"+str(dep_id)+"/")
self.assertRedirects(response, "/department/list", fetch_redirect_response=False)
self.assertFalse(Department.objects.get(pk=dep_id).active)
def test_delete_department_not_allowed(self):
"""As an admin, try to delete a department from other company"""
c = Client()
c.login(username="admin1", password="123456")
response = c.get("/department/list")
dep_id = response.context["departments"][0].id
c.logout()
c.login(username="admin2", password="123456")
response = c.get("/department/delete/"+str(dep_id)+"/")
self.assertEquals(response.status_code, 403)
def test_delete_department_not_active(self):
"""As an admin, try to delete an already deleted department """
c = Client()
c.login(username="admin1", password="123456")
dep_id = Department.objects.get(active=False).id
response = c.get("/department/delete/"+str(dep_id)+"/")
self.assertEquals(response.status_code, 404)
def test_recover_department_positive(self):
"""As an admin, recover a department"""
c = Client()
c.login(username="admin1", password="123456")
response = c.get("/department/list")
dep_id = Department.objects.get(active=False).id
response = c.get(reverse("department_recover",args=(dep_id,)))
self.assertRedirects(response, "/department/list", fetch_redirect_response=False)
self.assertTrue(Department.objects.get(pk=dep_id).active)
| mpl-2.0 |
webcube/django-hyperadmin | setup.py | 1 | 1621 | #!/usr/bin/env python
import os
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
VERSION = '0.9.0'
PATH = os.path.dirname(os.path.abspath(__file__))
try:
LONG_DESC = '\n===='+open(os.path.join(PATH, 'README.rst'), 'r').read().split('====', 1)[-1]
except IOError: #happens when using tox
LONG_DESC = ''
setup(name='django-hyperadmin',
version=VERSION,
description="A hypermedia API framework for Django.",
long_description=LONG_DESC,
classifiers=[
'Programming Language :: Python',
'Environment :: Web Environment',
'Framework :: Django',
'Operating System :: OS Independent',
'Natural Language :: English',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords='django hypermedia HATEOAS REST',
author = 'Jason Kraus',
author_email = 'zbyte64@gmail.com',
maintainer = 'Jason Kraus',
maintainer_email = 'zbyte64@gmail.com',
url='http://github.com/zbyte64/django-hyperadmin',
license='New BSD License',
packages=find_packages(exclude=['tests']),
test_suite='tests.runtests.runtests',
tests_require=(
'pep8',
'coverage',
'django',
'Mock',
'nose',
'django-nose',
),
install_requires=[
'mimeparse',
],
include_package_data = True,
zip_safe = False,
)
| bsd-3-clause |
gogobebe2/Replicating-DeepMind | src/ai/NeuralNet.py | 6 | 4717 | """
NeuralNet class creates a neural network.
"""
from convnet import *
import numpy as np
import time
from collections import OrderedDict
class SimpleDataProvider:
dims = None
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
pass
def get_data_dims(self, idx=0):
assert self.dims is not None
assert idx >= 0 and idx < len(self.dims)
return self.dims[idx]
def advance_batch(self):
pass
class NeuralNet(ConvNet):
def __init__(self, nr_inputs, nr_outputs, layers_file, params_file, output_layer_name):
"""
Initialize a NeuralNet
@param nr_inputs: number of inputs in data layer
@param nr_outputs: number of target values in another data layer
@param layers_file: path to layers file
@param params_file: path to params file
@param output_layer_name: name of the output layer
"""
# Save data parameters
self.nr_inputs = nr_inputs
self.nr_outputs = nr_outputs
SimpleDataProvider.dims = (nr_inputs, nr_outputs)
# Save layer parameters
self.layers_file = layers_file
self.params_file = params_file
self.output_layer_name = output_layer_name
# Initialise ConvNet, including self.libmodel
op = NeuralNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
ConvNet.__init__(self, op, load_dic)
def train(self, inputs, outputs):
"""
Train neural net with inputs and outputs.
@param inputs: NxM numpy.ndarray, where N is number of inputs and M is batch size
@param outputs: KxM numpy.ndarray, where K is number of outputs and M is batch size
@return cost?
"""
assert inputs.shape[0] == self.nr_inputs
assert outputs.shape[0] == self.nr_outputs
assert inputs.shape[1] == outputs.shape[1]
# start training in GPU
self.libmodel.startBatch([inputs, outputs], 1, False) # second parameter is 'progress', third parameter means 'only test, don't train'
# wait until processing has finished
cost = self.libmodel.finishBatch()
# return cost (error)
return cost
def predict(self, inputs):
"""
Predict neural network output layer activations for input.
@param inputs: NxM numpy.ndarray, where N is number of inputs and M is batch size
"""
assert inputs.shape[0] == self.nr_inputs
batch_size = inputs.shape[1]
outputs = np.zeros((batch_size, self.nr_outputs), dtype=np.float32)
# start feed-forward pass in GPU
self.libmodel.startFeatureWriter([inputs, outputs.transpose().copy()], [outputs], [self.output_layer_name])
# wait until processing has finished
self.libmodel.finishBatch()
# now activations of output layer should be in 'outputs'
return outputs
def get_weight_stats(self):
# copy weights from GPU to CPU memory
self.sync_with_host()
wscales = OrderedDict()
for name,val in sorted(self.layers.items(), key=lambda x: x[1]['id']): # This is kind of hacky but will do for now.
l = self.layers[name]
if 'weights' in l:
wscales[l['name'], 'biases'] = (n.mean(n.abs(l['biases'])), n.mean(n.abs(l['biasesInc'])))
for i,(w,wi) in enumerate(zip(l['weights'],l['weightsInc'])):
wscales[l['name'], 'weights' + str(i)] = (n.mean(n.abs(w)), n.mean(n.abs(wi)))
return wscales
def save_network(self, epoch):
self.epoch = epoch
self.batchnum = 1
self.sync_with_host()
self.save_state().join()
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
#op.delete_option("train_batch_range")
#op.delete_option("test_batch_range")
#op.delete_option("dp_type")
#op.delete_option("data_path")
op.options["train_batch_range"].default="0"
op.options["test_batch_range"].default="0"
op.options["dp_type"].default="image"
op.options["data_path"].default="/storage/hpc_kristjan/cuda-convnet4" # TODO: remove this
op.options["layer_def"].default="ai/deepmind-layers.cfg"
op.options["layer_params"].default="ai/deepmind-params.cfg"
#op.options["save_path"].default="."
#op.options["gpu"].default="0"
op.options["dp_type"].default="simple"
op.options["minibatch_size"].default = 32
DataProvider.register_data_provider('simple', 'Simple data provider', SimpleDataProvider)
return op
| gpl-3.0 |
qutebrowser/qutebrowser | qutebrowser/browser/webkit/webkittab.py | 1 | 37238 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Wrapper over our (QtWebKit) WebView."""
import re
import functools
import xml.etree.ElementTree
from typing import cast, Iterable, Optional
from PyQt5.QtCore import pyqtSlot, Qt, QUrl, QPoint, QTimer, QSizeF, QSize
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWebKitWidgets import QWebPage, QWebFrame
from PyQt5.QtWebKit import QWebSettings, QWebHistory, QWebElement
from PyQt5.QtPrintSupport import QPrinter
from qutebrowser.browser import browsertab, shared
from qutebrowser.browser.webkit import (webview, tabhistory, webkitelem,
webkitsettings, webkitinspector)
from qutebrowser.utils import qtutils, usertypes, utils, log, debug, resources
from qutebrowser.keyinput import modeman
from qutebrowser.qt import sip
class WebKitAction(browsertab.AbstractAction):
"""QtWebKit implementations related to web actions."""
action_class = QWebPage
action_base = QWebPage.WebAction
def exit_fullscreen(self):
raise browsertab.UnsupportedOperationError
def save_page(self):
"""Save the current page."""
raise browsertab.UnsupportedOperationError
def show_source(self, pygments=False):
self._show_source_pygments()
def run_string(self, name: str) -> None:
"""Add special cases for new API.
Those were added to QtWebKit 5.212 (which we enforce), but we don't get
the new API from PyQt. Thus, we'll need to use the raw numbers.
"""
new_actions = {
# https://github.com/qtwebkit/qtwebkit/commit/a96d9ef5d24b02d996ad14ff050d0e485c9ddc97
'RequestClose': QWebPage.ToggleVideoFullscreen + 1,
# https://github.com/qtwebkit/qtwebkit/commit/96b9ba6269a5be44343635a7aaca4a153ea0366b
'Unselect': QWebPage.ToggleVideoFullscreen + 2,
}
if name in new_actions:
self._widget.triggerPageAction(new_actions[name])
return
super().run_string(name)
class WebKitPrinting(browsertab.AbstractPrinting):
"""QtWebKit implementations related to printing."""
def check_pdf_support(self):
pass
def check_preview_support(self):
pass
def to_pdf(self, filename):
printer = QPrinter()
printer.setOutputFileName(filename)
self.to_printer(printer)
def to_printer(self, printer, callback=None):
self._widget.print(printer)
# Can't find out whether there was an error...
if callback is not None:
callback(True)
class WebKitSearch(browsertab.AbstractSearch):
"""QtWebKit implementations related to searching on the page."""
def __init__(self, tab, parent=None):
super().__init__(tab, parent)
self._flags = self._empty_flags()
def _empty_flags(self):
return QWebPage.FindFlags(0) # type: ignore[call-overload]
def _call_cb(self, callback, found, text, flags, caller):
"""Call the given callback if it's non-None.
Delays the call via a QTimer so the website is re-rendered in between.
Args:
callback: What to call
found: If the text was found
text: The text searched for
flags: The flags searched with
caller: Name of the caller.
"""
found_text = 'found' if found else "didn't find"
# Removing FindWrapsAroundDocument to get the same logging as with
# QtWebEngine
debug_flags = debug.qflags_key(
QWebPage, flags & ~QWebPage.FindWrapsAroundDocument,
klass=QWebPage.FindFlag)
if debug_flags != '0x0000':
flag_text = 'with flags {}'.format(debug_flags)
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
QTimer.singleShot(0, functools.partial(callback, found))
self.finished.emit(found)
def clear(self):
if self.search_displayed:
self.cleared.emit()
self.search_displayed = False
# We first clear the marked text, then the highlights
self._widget.findText('')
self._widget.findText('', QWebPage.HighlightAllOccurrences)
def search(self, text, *, ignore_case=usertypes.IgnoreCase.never,
reverse=False, wrap=True, result_cb=None):
# Don't go to next entry on duplicate search
if self.text == text and self.search_displayed:
log.webview.debug("Ignoring duplicate search request"
" for {}".format(text))
return
# Clear old search results, this is done automatically on QtWebEngine.
self.clear()
self.text = text
self.search_displayed = True
self._flags = self._empty_flags()
if self._is_case_sensitive(ignore_case):
self._flags |= QWebPage.FindCaseSensitively
if reverse:
self._flags |= QWebPage.FindBackward
if wrap:
self._flags |= QWebPage.FindWrapsAroundDocument
# We actually search *twice* - once to highlight everything, then again
# to get a mark so we can navigate.
found = self._widget.findText(text, self._flags)
self._widget.findText(text,
self._flags | QWebPage.HighlightAllOccurrences)
self._call_cb(result_cb, found, text, self._flags, 'search')
def next_result(self, *, result_cb=None):
self.search_displayed = True
found = self._widget.findText(self.text, self._flags)
self._call_cb(result_cb, found, self.text, self._flags, 'next_result')
def prev_result(self, *, result_cb=None):
self.search_displayed = True
# The int() here makes sure we get a copy of the flags.
flags = QWebPage.FindFlags(
int(self._flags)) # type: ignore[call-overload]
if flags & QWebPage.FindBackward:
flags &= ~QWebPage.FindBackward
else:
flags |= QWebPage.FindBackward
found = self._widget.findText(self.text, flags)
self._call_cb(result_cb, found, self.text, flags, 'prev_result')
class WebKitCaret(browsertab.AbstractCaret):
"""QtWebKit implementations related to moving the cursor/selection."""
def __init__(self,
tab: 'WebKitTab',
mode_manager: modeman.ModeManager,
parent: QWidget = None) -> None:
super().__init__(tab, mode_manager, parent)
self._selection_state = browsertab.SelectionState.none
@pyqtSlot(usertypes.KeyMode)
def _on_mode_entered(self, mode):
if mode != usertypes.KeyMode.caret:
return
if self._widget.hasSelection():
self._selection_state = browsertab.SelectionState.normal
else:
self._selection_state = browsertab.SelectionState.none
self.selection_toggled.emit(self._selection_state)
settings = self._widget.settings()
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, True)
if self._widget.isVisible():
# Sometimes the caret isn't immediately visible, but unfocusing
# and refocusing it fixes that.
self._widget.clearFocus()
self._widget.setFocus(Qt.OtherFocusReason)
# Move the caret to the first element in the viewport if there
# isn't any text which is already selected.
#
# Note: We can't use hasSelection() here, as that's always
# true in caret mode.
if self._selection_state is browsertab.SelectionState.none:
self._widget.page().currentFrame().evaluateJavaScript(
resources.read_file('javascript/position_caret.js'))
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, _mode):
settings = self._widget.settings()
if settings.testAttribute(QWebSettings.CaretBrowsingEnabled):
if (self._selection_state is not browsertab.SelectionState.none and
self._widget.hasSelection()):
# Remove selection if it exists
self._widget.triggerPageAction(QWebPage.MoveToNextChar)
settings.setAttribute(QWebSettings.CaretBrowsingEnabled, False)
self._selection_state = browsertab.SelectionState.none
def move_to_next_line(self, count=1):
if self._selection_state is not browsertab.SelectionState.none:
act = QWebPage.SelectNextLine
else:
act = QWebPage.MoveToNextLine
for _ in range(count):
self._widget.triggerPageAction(act)
if self._selection_state is browsertab.SelectionState.line:
self._select_line_to_end()
def move_to_prev_line(self, count=1):
if self._selection_state is not browsertab.SelectionState.none:
act = QWebPage.SelectPreviousLine
else:
act = QWebPage.MoveToPreviousLine
for _ in range(count):
self._widget.triggerPageAction(act)
if self._selection_state is browsertab.SelectionState.line:
self._select_line_to_start()
def move_to_next_char(self, count=1):
if self._selection_state is browsertab.SelectionState.normal:
act = QWebPage.SelectNextChar
elif self._selection_state is browsertab.SelectionState.line:
return
else:
act = QWebPage.MoveToNextChar
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_prev_char(self, count=1):
if self._selection_state is browsertab.SelectionState.normal:
act = QWebPage.SelectPreviousChar
elif self._selection_state is browsertab.SelectionState.line:
return
else:
act = QWebPage.MoveToPreviousChar
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_end_of_word(self, count=1):
if self._selection_state is browsertab.SelectionState.normal:
act = [QWebPage.SelectNextWord]
if utils.is_windows: # pragma: no cover
act.append(QWebPage.SelectPreviousChar)
elif self._selection_state is browsertab.SelectionState.line:
return
else:
act = [QWebPage.MoveToNextWord]
if utils.is_windows: # pragma: no cover
act.append(QWebPage.MoveToPreviousChar)
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_next_word(self, count=1):
if self._selection_state is browsertab.SelectionState.normal:
act = [QWebPage.SelectNextWord]
if not utils.is_windows: # pragma: no branch
act.append(QWebPage.SelectNextChar)
elif self._selection_state is browsertab.SelectionState.line:
return
else:
act = [QWebPage.MoveToNextWord]
if not utils.is_windows: # pragma: no branch
act.append(QWebPage.MoveToNextChar)
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
def move_to_prev_word(self, count=1):
if self._selection_state is browsertab.SelectionState.normal:
act = QWebPage.SelectPreviousWord
elif self._selection_state is browsertab.SelectionState.line:
return
else:
act = QWebPage.MoveToPreviousWord
for _ in range(count):
self._widget.triggerPageAction(act)
def move_to_start_of_line(self):
if self._selection_state is browsertab.SelectionState.normal:
act = QWebPage.SelectStartOfLine
elif self._selection_state is browsertab.SelectionState.line:
return
else:
act = QWebPage.MoveToStartOfLine
self._widget.triggerPageAction(act)
def move_to_end_of_line(self):
if self._selection_state is browsertab.SelectionState.normal:
act = QWebPage.SelectEndOfLine
elif self._selection_state is browsertab.SelectionState.line:
return
else:
act = QWebPage.MoveToEndOfLine
self._widget.triggerPageAction(act)
def move_to_start_of_next_block(self, count=1):
if self._selection_state is not browsertab.SelectionState.none:
act = [QWebPage.SelectNextLine,
QWebPage.SelectStartOfBlock]
else:
act = [QWebPage.MoveToNextLine,
QWebPage.MoveToStartOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
if self._selection_state is browsertab.SelectionState.line:
self._select_line_to_end()
def move_to_start_of_prev_block(self, count=1):
if self._selection_state is not browsertab.SelectionState.none:
act = [QWebPage.SelectPreviousLine,
QWebPage.SelectStartOfBlock]
else:
act = [QWebPage.MoveToPreviousLine,
QWebPage.MoveToStartOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
if self._selection_state is browsertab.SelectionState.line:
self._select_line_to_start()
def move_to_end_of_next_block(self, count=1):
if self._selection_state is not browsertab.SelectionState.none:
act = [QWebPage.SelectNextLine,
QWebPage.SelectEndOfBlock]
else:
act = [QWebPage.MoveToNextLine,
QWebPage.MoveToEndOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
if self._selection_state is browsertab.SelectionState.line:
self._select_line_to_end()
def move_to_end_of_prev_block(self, count=1):
if self._selection_state is not browsertab.SelectionState.none:
act = [QWebPage.SelectPreviousLine, QWebPage.SelectEndOfBlock]
else:
act = [QWebPage.MoveToPreviousLine, QWebPage.MoveToEndOfBlock]
for _ in range(count):
for a in act:
self._widget.triggerPageAction(a)
if self._selection_state is browsertab.SelectionState.line:
self._select_line_to_start()
def move_to_start_of_document(self):
if self._selection_state is not browsertab.SelectionState.none:
act = QWebPage.SelectStartOfDocument
else:
act = QWebPage.MoveToStartOfDocument
self._widget.triggerPageAction(act)
if self._selection_state is browsertab.SelectionState.line:
self._select_line()
def move_to_end_of_document(self):
if self._selection_state is not browsertab.SelectionState.none:
act = QWebPage.SelectEndOfDocument
else:
act = QWebPage.MoveToEndOfDocument
self._widget.triggerPageAction(act)
def toggle_selection(self, line=False):
if line:
self._selection_state = browsertab.SelectionState.line
self._select_line()
self.reverse_selection()
self._select_line()
self.reverse_selection()
elif self._selection_state is not browsertab.SelectionState.normal:
self._selection_state = browsertab.SelectionState.normal
else:
self._selection_state = browsertab.SelectionState.none
self.selection_toggled.emit(self._selection_state)
def drop_selection(self):
self._widget.triggerPageAction(QWebPage.MoveToNextChar)
def selection(self, callback):
callback(self._widget.selectedText())
def reverse_selection(self):
self._tab.run_js_async("""{
const sel = window.getSelection();
sel.setBaseAndExtent(
sel.extentNode, sel.extentOffset, sel.baseNode,
sel.baseOffset
);
}""")
def _select_line(self):
self._widget.triggerPageAction(QWebPage.SelectStartOfLine)
self.reverse_selection()
self._widget.triggerPageAction(QWebPage.SelectEndOfLine)
self.reverse_selection()
def _select_line_to_end(self):
# direction of selection (if anchor is to the left or right
# of focus) has to be checked before moving selection
# to the end of line
if self._js_selection_left_to_right():
self._widget.triggerPageAction(QWebPage.SelectEndOfLine)
def _select_line_to_start(self):
if not self._js_selection_left_to_right():
self._widget.triggerPageAction(QWebPage.SelectStartOfLine)
def _js_selection_left_to_right(self):
"""Return True iff the selection's direction is left to right."""
return self._tab.private_api.run_js_sync("""
var sel = window.getSelection();
var position = sel.anchorNode.compareDocumentPosition(sel.focusNode);
(!position && sel.anchorOffset < sel.focusOffset ||
position === Node.DOCUMENT_POSITION_FOLLOWING);
""")
def _follow_selected(self, *, tab=False):
if QWebSettings.globalSettings().testAttribute(
QWebSettings.JavascriptEnabled):
if tab:
self._tab.data.override_target = usertypes.ClickTarget.tab
self._tab.run_js_async("""
const aElm = document.activeElement;
if (window.getSelection().anchorNode) {
window.getSelection().anchorNode.parentNode.click();
} else if (aElm && aElm !== document.body) {
aElm.click();
}
""")
else:
selection = self._widget.selectedHtml()
if not selection:
# Getting here may mean we crashed, but we can't do anything
# about that until this commit is released:
# https://github.com/annulen/webkit/commit/0e75f3272d149bc64899c161f150eb341a2417af
# TODO find a way to check if something is focused
self._follow_enter(tab)
return
try:
selected_element = xml.etree.ElementTree.fromstring(
'<html>{}</html>'.format(selection)).find('a')
except xml.etree.ElementTree.ParseError:
raise browsertab.WebTabError('Could not parse selected '
'element!')
if selected_element is not None:
try:
href = selected_element.attrib['href']
except KeyError:
raise browsertab.WebTabError('Anchor element without '
'href!')
url = self._tab.url().resolved(QUrl(href))
if tab:
self._tab.new_tab_requested.emit(url)
else:
self._tab.load_url(url)
def follow_selected(self, *, tab=False):
try:
self._follow_selected(tab=tab)
finally:
self.follow_selected_done.emit()
class WebKitZoom(browsertab.AbstractZoom):
"""QtWebKit implementations related to zooming."""
def _set_factor_internal(self, factor):
self._widget.setZoomFactor(factor)
class WebKitScroller(browsertab.AbstractScroller):
"""QtWebKit implementations related to scrolling."""
# FIXME:qtwebengine When to use the main frame, when the current one?
def pos_px(self):
return self._widget.page().mainFrame().scrollPosition()
def pos_perc(self):
return self._widget.scroll_pos
def to_point(self, point):
self._widget.page().mainFrame().setScrollPosition(point)
def to_anchor(self, name):
self._widget.page().mainFrame().scrollToAnchor(name)
def delta(self, x: int = 0, y: int = 0) -> None:
qtutils.check_overflow(x, 'int')
qtutils.check_overflow(y, 'int')
self._widget.page().mainFrame().scroll(x, y)
def delta_page(self, x: float = 0.0, y: float = 0.0) -> None:
if y.is_integer():
y = int(y)
if y == 0:
pass
elif y < 0:
self.page_up(count=-y)
elif y > 0:
self.page_down(count=y)
y = 0
if x == 0 and y == 0:
return
size = self._widget.page().mainFrame().geometry()
self.delta(int(x * size.width()), int(y * size.height()))
def to_perc(self, x=None, y=None):
if x is None and y == 0:
self.top()
elif x is None and y == 100:
self.bottom()
else:
for val, orientation in [(x, Qt.Horizontal), (y, Qt.Vertical)]:
if val is not None:
frame = self._widget.page().mainFrame()
maximum = frame.scrollBarMaximum(orientation)
if maximum == 0:
continue
pos = int(maximum * val / 100)
pos = qtutils.check_overflow(pos, 'int', fatal=False)
frame.setScrollBarValue(orientation, pos)
def _key_press(self, key, count=1, getter_name=None, direction=None):
frame = self._widget.page().mainFrame()
getter = None if getter_name is None else getattr(frame, getter_name)
# FIXME:qtwebengine needed?
# self._widget.setFocus()
for _ in range(min(count, 5000)):
# Abort scrolling if the minimum/maximum was reached.
if (getter is not None and
frame.scrollBarValue(direction) == getter(direction)):
return
self._tab.fake_key_press(key)
def up(self, count=1):
self._key_press(Qt.Key_Up, count, 'scrollBarMinimum', Qt.Vertical)
def down(self, count=1):
self._key_press(Qt.Key_Down, count, 'scrollBarMaximum', Qt.Vertical)
def left(self, count=1):
self._key_press(Qt.Key_Left, count, 'scrollBarMinimum', Qt.Horizontal)
def right(self, count=1):
self._key_press(Qt.Key_Right, count, 'scrollBarMaximum', Qt.Horizontal)
def top(self):
self._key_press(Qt.Key_Home)
def bottom(self):
self._key_press(Qt.Key_End)
def page_up(self, count=1):
self._key_press(Qt.Key_PageUp, count, 'scrollBarMinimum', Qt.Vertical)
def page_down(self, count=1):
self._key_press(Qt.Key_PageDown, count, 'scrollBarMaximum',
Qt.Vertical)
def at_top(self):
return self.pos_px().y() == 0
def at_bottom(self):
frame = self._widget.page().currentFrame()
return self.pos_px().y() >= frame.scrollBarMaximum(Qt.Vertical)
class WebKitHistoryPrivate(browsertab.AbstractHistoryPrivate):
"""History-related methods which are not part of the extension API."""
def __init__(self, tab: 'WebKitTab') -> None:
self._tab = tab
self._history = cast(QWebHistory, None)
def serialize(self):
return qtutils.serialize(self._history)
def deserialize(self, data):
qtutils.deserialize(data, self._history)
def load_items(self, items):
if items:
self._tab.before_load_started.emit(items[-1].url)
stream, _data, user_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, self._history)
for i, data in enumerate(user_data):
self._history.itemAt(i).setUserData(data)
cur_data = self._history.currentItem().userData()
if cur_data is not None:
if 'zoom' in cur_data:
self._tab.zoom.set_factor(cur_data['zoom'])
if ('scroll-pos' in cur_data and
self._tab.scroller.pos_px() == QPoint(0, 0)):
QTimer.singleShot(0, functools.partial(
self._tab.scroller.to_point, cur_data['scroll-pos']))
class WebKitHistory(browsertab.AbstractHistory):
"""QtWebKit implementations related to page history."""
def __init__(self, tab):
super().__init__(tab)
self.private_api = WebKitHistoryPrivate(tab)
def __len__(self):
return len(self._history)
def __iter__(self):
return iter(self._history.items())
def current_idx(self):
return self._history.currentItemIndex()
def can_go_back(self):
return self._history.canGoBack()
def can_go_forward(self):
return self._history.canGoForward()
def _item_at(self, i):
return self._history.itemAt(i)
def _go_to_item(self, item):
self._tab.before_load_started.emit(item.url())
self._history.goToItem(item)
def back_items(self):
return self._history.backItems(self._history.count())
def forward_items(self):
return self._history.forwardItems(self._history.count())
class WebKitElements(browsertab.AbstractElements):
"""QtWebKit implementations related to elements on the page."""
_tab: 'WebKitTab'
def find_css(self, selector, callback, error_cb, *, only_visible=False):
utils.unused(error_cb)
mainframe = self._widget.page().mainFrame()
if mainframe is None:
raise browsertab.WebTabError("No frame focused!")
elems = []
frames = webkitelem.get_child_frames(mainframe)
for f in frames:
frame_elems = cast(Iterable[QWebElement], f.findAllElements(selector))
for elem in frame_elems:
elems.append(webkitelem.WebKitElement(elem, tab=self._tab))
if only_visible:
# pylint: disable=protected-access
elems = [e for e in elems if e._is_visible(mainframe)]
# pylint: enable=protected-access
callback(elems)
def find_id(self, elem_id, callback):
def find_id_cb(elems):
"""Call the real callback with the found elements."""
if not elems:
callback(None)
else:
callback(elems[0])
# Escape non-alphanumeric characters in the selector
# https://www.w3.org/TR/CSS2/syndata.html#value-def-identifier
elem_id = re.sub(r'[^a-zA-Z0-9_-]', r'\\\g<0>', elem_id)
self.find_css('#' + elem_id, find_id_cb, error_cb=lambda exc: None)
def find_focused(self, callback):
frame = self._widget.page().currentFrame()
if frame is None:
callback(None)
return
elem = frame.findFirstElement('*:focus')
if elem.isNull():
callback(None)
else:
callback(webkitelem.WebKitElement(elem, tab=self._tab))
def find_at_pos(self, pos, callback):
assert pos.x() >= 0
assert pos.y() >= 0
frame = self._widget.page().frameAt(pos)
if frame is None:
# This happens when we click inside the webview, but not actually
# on the QWebPage - for example when clicking the scrollbar
# sometimes.
log.webview.debug("Hit test at {} but frame is None!".format(pos))
callback(None)
return
# You'd think we have to subtract frame.geometry().topLeft() from the
# position, but it seems QWebFrame::hitTestContent wants a position
# relative to the QWebView, not to the frame. This makes no sense to
# me, but it works this way.
hitresult = frame.hitTestContent(pos)
if hitresult.isNull():
# For some reason, the whole hit result can be null sometimes (e.g.
# on doodle menu links).
log.webview.debug("Hit test result is null!")
callback(None)
return
try:
elem = webkitelem.WebKitElement(hitresult.element(), tab=self._tab)
except webkitelem.IsNullError:
# For some reason, the hit result element can be a null element
# sometimes (e.g. when clicking the timetable fields on
# https://www.sbb.ch/ ).
log.webview.debug("Hit test result element is null!")
callback(None)
return
callback(elem)
class WebKitAudio(browsertab.AbstractAudio):
"""Dummy handling of audio status for QtWebKit."""
def set_muted(self, muted: bool, override: bool = False) -> None:
raise browsertab.WebTabError('Muting is not supported on QtWebKit!')
def is_muted(self):
return False
def is_recently_audible(self):
return False
class WebKitTabPrivate(browsertab.AbstractTabPrivate):
"""QtWebKit-related methods which aren't part of the public API."""
def networkaccessmanager(self):
return self._widget.page().networkAccessManager()
def clear_ssl_errors(self):
self.networkaccessmanager().clear_all_ssl_errors()
def event_target(self):
return self._widget
def shutdown(self):
self._widget.shutdown()
def run_js_sync(self, code):
document_element = self._widget.page().mainFrame().documentElement()
result = document_element.evaluateJavaScript(code)
return result
def _init_inspector(self, splitter, win_id, parent=None):
return webkitinspector.WebKitInspector(splitter, win_id, parent)
class WebKitTab(browsertab.AbstractTab):
"""A QtWebKit tab in the browser."""
def __init__(self, *, win_id, mode_manager, private, parent=None):
super().__init__(win_id=win_id,
mode_manager=mode_manager,
private=private,
parent=parent)
widget = webview.WebView(win_id=win_id, tab_id=self.tab_id,
private=private, tab=self)
if private:
self._make_private(widget)
self.history = WebKitHistory(tab=self)
self.scroller = WebKitScroller(tab=self, parent=self)
self.caret = WebKitCaret(mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = WebKitZoom(tab=self, parent=self)
self.search = WebKitSearch(tab=self, parent=self)
self.printing = WebKitPrinting(tab=self)
self.elements = WebKitElements(tab=self)
self.action = WebKitAction(tab=self)
self.audio = WebKitAudio(tab=self, parent=self)
self.private_api = WebKitTabPrivate(mode_manager=mode_manager,
tab=self)
# We're assigning settings in _set_widget
self.settings = webkitsettings.WebKitSettings(settings=None)
self._set_widget(widget)
self._connect_signals()
self.backend = usertypes.Backend.QtWebKit
def _install_event_filter(self):
self._widget.installEventFilter(self._tab_event_filter)
def _make_private(self, widget):
settings = widget.settings()
settings.setAttribute(QWebSettings.PrivateBrowsingEnabled, True)
def load_url(self, url):
self._load_url_prepare(url)
self._widget.load(url)
def url(self, *, requested=False):
frame = self._widget.page().mainFrame()
if requested:
return frame.requestedUrl()
else:
return frame.url()
def dump_async(self, callback, *, plain=False):
frame = self._widget.page().mainFrame()
if plain:
callback(frame.toPlainText())
else:
callback(frame.toHtml())
def run_js_async(self, code, callback=None, *, world=None):
if world is not None and world != usertypes.JsWorld.jseval:
log.webview.warning("Ignoring world ID {}".format(world))
result = self.private_api.run_js_sync(code)
if callback is not None:
callback(result)
def icon(self):
return self._widget.icon()
def reload(self, *, force=False):
if force:
action = QWebPage.ReloadAndBypassCache
else:
action = QWebPage.Reload
self._widget.triggerPageAction(action)
def stop(self):
self._widget.stop()
def title(self):
return self._widget.title()
def renderer_process_pid(self) -> Optional[int]:
return None
@pyqtSlot()
def _on_history_trigger(self):
url = self.url()
requested_url = self.url(requested=True)
self.history_item_triggered.emit(url, requested_url, self.title())
def set_html(self, html, base_url=QUrl()):
self._widget.setHtml(html, base_url)
@pyqtSlot()
def _on_load_started(self):
super()._on_load_started()
nam = self._widget.page().networkAccessManager()
nam.netrc_used = False
# Make sure the icon is cleared when navigating to a page without one.
self.icon_changed.emit(QIcon())
@pyqtSlot(bool)
def _on_load_finished(self, ok: bool) -> None:
super()._on_load_finished(ok)
self._update_load_status(ok)
@pyqtSlot()
def _on_frame_load_finished(self):
"""Make sure we emit an appropriate status when loading finished.
While Qt has a bool "ok" attribute for loadFinished, it always is True
when using error pages... See
https://github.com/qutebrowser/qutebrowser/issues/84
"""
self._on_load_finished(not self._widget.page().error_occurred)
@pyqtSlot()
def _on_webkit_icon_changed(self):
"""Emit iconChanged with a QIcon like QWebEngineView does."""
if sip.isdeleted(self._widget):
log.webview.debug("Got _on_webkit_icon_changed for deleted view!")
return
self.icon_changed.emit(self._widget.icon())
@pyqtSlot(QWebFrame)
def _on_frame_created(self, frame):
"""Connect the contentsSizeChanged signal of each frame."""
# FIXME:qtwebengine those could theoretically regress:
# https://github.com/qutebrowser/qutebrowser/issues/152
# https://github.com/qutebrowser/qutebrowser/issues/263
frame.contentsSizeChanged.connect(self._on_contents_size_changed)
@pyqtSlot(QSize)
def _on_contents_size_changed(self, size):
self.contents_size_changed.emit(QSizeF(size))
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(self, navigation):
super()._on_navigation_request(navigation)
if not navigation.accepted:
return
log.webview.debug("target {} override {}".format(
self.data.open_target, self.data.override_target))
if self.data.override_target is not None:
target = self.data.override_target
self.data.override_target = None
else:
target = self.data.open_target
if (navigation.navigation_type == navigation.Type.link_clicked and
target != usertypes.ClickTarget.normal):
tab = shared.get_tab(self.win_id, target)
tab.load_url(navigation.url)
self.data.open_target = usertypes.ClickTarget.normal
navigation.accepted = False
if navigation.is_main_frame:
self.settings.update_for_url(navigation.url)
@pyqtSlot('QNetworkReply*')
def _on_ssl_errors(self, reply):
self._insecure_hosts.add(reply.url().host())
def _connect_signals(self):
view = self._widget
page = view.page()
frame = page.mainFrame()
page.windowCloseRequested.connect(self.window_close_requested)
page.linkHovered.connect(self.link_hovered)
page.loadProgress.connect(self._on_load_progress)
frame.loadStarted.connect(self._on_load_started)
view.scroll_pos_changed.connect(self.scroller.perc_changed)
view.titleChanged.connect(self.title_changed)
view.urlChanged.connect(self._on_url_changed)
view.shutting_down.connect(self.shutting_down)
page.networkAccessManager().sslErrors.connect(self._on_ssl_errors)
frame.loadFinished.connect(self._on_frame_load_finished)
view.iconChanged.connect(self._on_webkit_icon_changed)
page.frameCreated.connect(self._on_frame_created)
frame.contentsSizeChanged.connect(self._on_contents_size_changed)
frame.initialLayoutCompleted.connect(self._on_history_trigger)
page.navigation_request.connect(self._on_navigation_request)
| gpl-3.0 |
NikNitro/Python-iBeacon-Scan | sympy/polys/domains/old_polynomialring.py | 24 | 13861 | """Implementation of :class:`PolynomialRing` class. """
from __future__ import print_function, division
from sympy.polys.domains.ring import Ring
from sympy.polys.domains.compositedomain import CompositeDomain
from sympy.polys.domains.characteristiczero import CharacteristicZero
from sympy.polys.domains.old_fractionfield import FractionField
from sympy.polys.polyclasses import DMP, DMF
from sympy.polys.polyerrors import (GeneratorsNeeded, PolynomialError,
CoercionFailed, ExactQuotientFailed, NotReversible)
from sympy.polys.polyutils import dict_from_basic, basic_from_dict, _dict_reorder
from sympy.polys.orderings import monomial_key, build_product_order
from sympy.polys.agca.modules import FreeModulePolyRing
from sympy.core.compatibility import iterable, range
from sympy.utilities import public
# XXX why does this derive from CharacteristicZero???
@public
class PolynomialRingBase(Ring, CharacteristicZero, CompositeDomain):
"""
Base class for generalized polynomial rings.
This base class should be used for uniform access to generalized polynomial
rings. Subclasses only supply information about the element storage etc.
Do not instantiate.
"""
has_assoc_Ring = True
has_assoc_Field = True
default_order = "grevlex"
def __init__(self, dom, *gens, **opts):
if not gens:
raise GeneratorsNeeded("generators not specified")
lev = len(gens) - 1
self.ngens = len(gens)
self.zero = self.dtype.zero(lev, dom, ring=self)
self.one = self.dtype.one(lev, dom, ring=self)
self.domain = self.dom = dom
self.symbols = self.gens = gens
# NOTE 'order' may not be set if inject was called through CompositeDomain
self.order = opts.get('order', monomial_key(self.default_order))
def new(self, element):
return self.dtype(element, self.dom, len(self.gens) - 1, ring=self)
def __str__(self):
s_order = str(self.order)
orderstr = (
" order=" + s_order) if s_order != self.default_order else ""
return str(self.dom) + '[' + ','.join(map(str, self.gens)) + orderstr + ']'
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.dom,
self.gens, self.order))
def __eq__(self, other):
"""Returns `True` if two domains are equivalent. """
return isinstance(other, PolynomialRingBase) and \
self.dtype == other.dtype and self.dom == other.dom and \
self.gens == other.gens and self.order == other.order
def from_ZZ_python(K1, a, K0):
"""Convert a Python `int` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_QQ_python(K1, a, K0):
"""Convert a Python `Fraction` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY `mpz` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY `mpq` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_RealField(K1, a, K0):
"""Convert a mpmath `mpf` object to `dtype`. """
return K1(K1.dom.convert(a, K0))
def from_AlgebraicField(K1, a, K0):
"""Convert a `ANP` object to `dtype`. """
if K1.dom == K0:
return K1(a)
def from_GlobalPolynomialRing(K1, a, K0):
"""Convert a `DMP` object to `dtype`. """
if K1.gens == K0.gens:
if K1.dom == K0.dom:
return K1(a.rep) # set the correct ring
else:
return K1(a.convert(K1.dom).rep)
else:
monoms, coeffs = _dict_reorder(a.to_dict(), K0.gens, K1.gens)
if K1.dom != K0.dom:
coeffs = [ K1.dom.convert(c, K0.dom) for c in coeffs ]
return K1(dict(zip(monoms, coeffs)))
def get_field(self):
"""Returns a field associated with `self`. """
return FractionField(self.dom, *self.gens)
def poly_ring(self, *gens):
"""Returns a polynomial ring, i.e. `K[X]`. """
raise NotImplementedError('nested domains not allowed')
def frac_field(self, *gens):
"""Returns a fraction field, i.e. `K(X)`. """
raise NotImplementedError('nested domains not allowed')
def revert(self, a):
try:
return 1/a
except (ExactQuotientFailed, ZeroDivisionError):
raise NotReversible('%s is not a unit' % a)
def gcdex(self, a, b):
"""Extended GCD of `a` and `b`. """
return a.gcdex(b)
def gcd(self, a, b):
"""Returns GCD of `a` and `b`. """
return a.gcd(b)
def lcm(self, a, b):
"""Returns LCM of `a` and `b`. """
return a.lcm(b)
def factorial(self, a):
"""Returns factorial of `a`. """
return self.dtype(self.dom.factorial(a))
def _vector_to_sdm(self, v, order):
"""
For internal use by the modules class.
Convert an iterable of elements of this ring into a sparse distributed
module element.
"""
raise NotImplementedError
def _sdm_to_dics(self, s, n):
"""Helper for _sdm_to_vector."""
from sympy.polys.distributedmodules import sdm_to_dict
dic = sdm_to_dict(s)
res = [{} for _ in range(n)]
for k, v in dic.items():
res[k[0]][k[1:]] = v
return res
def _sdm_to_vector(self, s, n):
"""
For internal use by the modules class.
Convert a sparse distributed module into a list of length ``n``.
>>> from sympy import QQ, ilex
>>> from sympy.abc import x, y
>>> R = QQ.old_poly_ring(x, y, order=ilex)
>>> L = [((1, 1, 1), QQ(1)), ((0, 1, 0), QQ(1)), ((0, 0, 1), QQ(2))]
>>> R._sdm_to_vector(L, 2)
[x + 2*y, x*y]
"""
dics = self._sdm_to_dics(s, n)
# NOTE this works for global and local rings!
return [self(x) for x in dics]
def free_module(self, rank):
"""
Generate a free module of rank ``rank`` over ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).free_module(2)
QQ[x]**2
"""
return FreeModulePolyRing(self, rank)
def _vector_to_sdm_helper(v, order):
"""Helper method for common code in Global and Local poly rings."""
from sympy.polys.distributedmodules import sdm_from_dict
d = {}
for i, e in enumerate(v):
for key, value in e.to_dict().items():
d[(i,) + key] = value
return sdm_from_dict(d, order)
@public
class GlobalPolynomialRing(PolynomialRingBase):
"""A true polynomial ring, with objects DMP. """
is_PolynomialRing = is_Poly = True
dtype = DMP
def from_FractionField(K1, a, K0):
"""
Convert a ``DMF`` object to ``DMP``.
Examples
========
>>> from sympy.polys.polyclasses import DMP, DMF
>>> from sympy.polys.domains import ZZ
>>> from sympy.abc import x
>>> f = DMF(([ZZ(1), ZZ(1)], [ZZ(1)]), ZZ)
>>> K = ZZ.old_frac_field(x)
>>> F = ZZ.old_poly_ring(x).from_FractionField(f, K)
>>> F == DMP([ZZ(1), ZZ(1)], ZZ)
True
>>> type(F)
<class 'sympy.polys.polyclasses.DMP'>
"""
if a.denom().is_one:
return K1.from_GlobalPolynomialRing(a.numer(), K0)
def to_sympy(self, a):
"""Convert `a` to a SymPy object. """
return basic_from_dict(a.to_sympy_dict(), *self.gens)
def from_sympy(self, a):
"""Convert SymPy's expression to `dtype`. """
try:
rep, _ = dict_from_basic(a, gens=self.gens)
except PolynomialError:
raise CoercionFailed("can't convert %s to type %s" % (a, self))
for k, v in rep.items():
rep[k] = self.dom.from_sympy(v)
return self(rep)
def is_positive(self, a):
"""Returns True if `LC(a)` is positive. """
return self.dom.is_positive(a.LC())
def is_negative(self, a):
"""Returns True if `LC(a)` is negative. """
return self.dom.is_negative(a.LC())
def is_nonpositive(self, a):
"""Returns True if `LC(a)` is non-positive. """
return self.dom.is_nonpositive(a.LC())
def is_nonnegative(self, a):
"""Returns True if `LC(a)` is non-negative. """
return self.dom.is_nonnegative(a.LC())
def _vector_to_sdm(self, v, order):
"""
>>> from sympy import lex, QQ
>>> from sympy.abc import x, y
>>> R = QQ.old_poly_ring(x, y)
>>> f = R.convert(x + 2*y)
>>> g = R.convert(x * y)
>>> R._vector_to_sdm([f, g], lex)
[((1, 1, 1), 1), ((0, 1, 0), 1), ((0, 0, 1), 2)]
"""
return _vector_to_sdm_helper(v, order)
class GeneralizedPolynomialRing(PolynomialRingBase):
"""A generalized polynomial ring, with objects DMF. """
dtype = DMF
def new(self, a):
"""Construct an element of `self` domain from `a`. """
res = self.dtype(a, self.dom, len(self.gens) - 1, ring=self)
# make sure res is actually in our ring
if res.denom().terms(order=self.order)[0][0] != (0,)*len(self.gens):
from sympy.printing.str import sstr
raise CoercionFailed("denominator %s not allowed in %s"
% (sstr(res), self))
return res
def __contains__(self, a):
try:
a = self.convert(a)
except CoercionFailed:
return False
return a.denom().terms(order=self.order)[0][0] == (0,)*len(self.gens)
def from_FractionField(K1, a, K0):
dmf = K1.get_field().from_FractionField(a, K0)
return K1((dmf.num, dmf.den))
def to_sympy(self, a):
"""Convert `a` to a SymPy object. """
return (basic_from_dict(a.numer().to_sympy_dict(), *self.gens) /
basic_from_dict(a.denom().to_sympy_dict(), *self.gens))
def from_sympy(self, a):
"""Convert SymPy's expression to `dtype`. """
p, q = a.as_numer_denom()
num, _ = dict_from_basic(p, gens=self.gens)
den, _ = dict_from_basic(q, gens=self.gens)
for k, v in num.items():
num[k] = self.dom.from_sympy(v)
for k, v in den.items():
den[k] = self.dom.from_sympy(v)
return self((num, den)).cancel()
def _vector_to_sdm(self, v, order):
"""
Turn an iterable into a sparse distributed module.
Note that the vector is multiplied by a unit first to make all entries
polynomials.
>>> from sympy import ilex, QQ
>>> from sympy.abc import x, y
>>> R = QQ.old_poly_ring(x, y, order=ilex)
>>> f = R.convert((x + 2*y) / (1 + x))
>>> g = R.convert(x * y)
>>> R._vector_to_sdm([f, g], ilex)
[((0, 0, 1), 2), ((0, 1, 0), 1), ((1, 1, 1), 1), ((1,
2, 1), 1)]
"""
# NOTE this is quite inefficient...
u = self.one.numer()
for x in v:
u *= x.denom()
return _vector_to_sdm_helper([x.numer()*u/x.denom() for x in v], order)
@public
def PolynomialRing(dom, *gens, **opts):
r"""
Create a generalized multivariate polynomial ring.
A generalized polynomial ring is defined by a ground field `K`, a set
of generators (typically `x_1, \ldots, x_n`) and a monomial order `<`.
The monomial order can be global, local or mixed. In any case it induces
a total ordering on the monomials, and there exists for every (non-zero)
polynomial `f \in K[x_1, \ldots, x_n]` a well-defined "leading monomial"
`LM(f) = LM(f, >)`. One can then define a multiplicative subset
`S = S_> = \{f \in K[x_1, \ldots, x_n] | LM(f) = 1\}`. The generalized
polynomial ring corresponding to the monomial order is
`R = S^{-1}K[x_1, \ldots, x_n]`.
If `>` is a so-called global order, that is `1` is the smallest monomial,
then we just have `S = K` and `R = K[x_1, \ldots, x_n]`.
Examples
========
A few examples may make this clearer.
>>> from sympy.abc import x, y
>>> from sympy import QQ
Our first ring uses global lexicographic order.
>>> R1 = QQ.old_poly_ring(x, y, order=(("lex", x, y),))
The second ring uses local lexicographic order. Note that when using a
single (non-product) order, you can just specify the name and omit the
variables:
>>> R2 = QQ.old_poly_ring(x, y, order="ilex")
The third and fourth rings use a mixed orders:
>>> o1 = (("ilex", x), ("lex", y))
>>> o2 = (("lex", x), ("ilex", y))
>>> R3 = QQ.old_poly_ring(x, y, order=o1)
>>> R4 = QQ.old_poly_ring(x, y, order=o2)
We will investigate what elements of `K(x, y)` are contained in the various
rings.
>>> L = [x, 1/x, y/(1 + x), 1/(1 + y), 1/(1 + x*y)]
>>> test = lambda R: [f in R for f in L]
The first ring is just `K[x, y]`:
>>> test(R1)
[True, False, False, False, False]
The second ring is R1 localised at the maximal ideal (x, y):
>>> test(R2)
[True, False, True, True, True]
The third ring is R1 localised at the prime ideal (x):
>>> test(R3)
[True, False, True, False, True]
Finally the fourth ring is R1 localised at `S = K[x, y] \setminus yK[y]`:
>>> test(R4)
[True, False, False, True, False]
"""
order = opts.get("order", GeneralizedPolynomialRing.default_order)
if iterable(order):
order = build_product_order(order, gens)
order = monomial_key(order)
opts['order'] = order
if order.is_global:
return GlobalPolynomialRing(dom, *gens, **opts)
else:
return GeneralizedPolynomialRing(dom, *gens, **opts)
| gpl-3.0 |
jorgb/airs | gui/images/anim/progress_1_12.py | 1 | 3292 | #----------------------------------------------------------------------
# This file was generated by D:\personal\src\airs\gui\images\anim\make_images.py
#
from wx import ImageFromStream, BitmapFromImage, EmptyIcon
import cStringIO, zlib
def getData():
return zlib.decompress(
'x\xda\x01\x9a\x03e\xfc\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00$\x00\
\x00\x00$\x08\x02\x00\x00\x00nb\x0f\xcf\x00\x00\x00\x03sBIT\x08\x08\x08\xdb\
\xe1O\xe0\x00\x00\x03RIDATH\x89\xad\x97[\x8f\xdb6\x10\x85\xcf\x0c\xc7\x96b{\
\x17\x8bf\x9bn\xfa\x10\x14A\x0b\xa4\xff\xff\xbf\xf4\xa5/\xbd\xa4i\x91,\x16n\
\xe1\xca\xb6Lr\xfa@\x89\xe2R\x94\xac\x04=0lY\x17~\x9c\xc3\xe1\x90"\xef=\x16\
\xc8*Y\xd5\x93\x83U\x8d\'\x85h\xcdX3\t\xe9\xcc\xb3\xc3\xfdW\xef\xd8_\xb0\xbf\
\xf8\xa3\x83W(\x14\x00\x81\xb8o\xdd\x10\x84h#t+\xb41W\x9a\xa2\x99\xc8\x1e[\
\xfdt\xd6\x8bW\x00\x86\x08\x00\x13zFG\x05\xa0\xd0p\xf0\xc2\xd0\xdd\x8av\xd3\
\xfd/\xc3\x1a\x87?\x8e\xfe`u\xc5\xfd}\xa0H\x8a\xbc\xc0\x08\xf1Em\x84\xee\xd7\
\\4\xb6\x00\xdb_\xf0[\xe3\x9c\xc2\x10R@\xcaK#\x8b\xb0p\xd5+\xd6L\xf7\x15\xd5\
\x8cL\xf9\x89\xc7V\x7f>\xb8\xac\xb3\xf3JI\xe1\xc0\xaa\xfeu\xf2\x8d\x9b\x85}<\
\xeb\xef\x8d\x17\x82\xeb=\xf0\x89\x19!;\x16J\x88\x9eZ\xcdx\x03\xacq\xf8\xb5\
\x19,-\xf2\xa6\xc4\x136<\xb5ju\xb86\xc0RR\xc6\x8bJ\xc1a\xc0\xc6n\x0b\x91\xd0\
pv\x7f\x19\x9a\xed`\xef\x8f\xfa\xf7\xc5\xcb\xe8\xc9\xc0\x8b\x8c\xe0\xa4W8\
\x1d\\\x9d\n+\xe8\xe4p\xb0}?\x00X\xa5?O\xce\x10Y\r]\xcby\x86:\x1e\x13\x84\
\xb02\x00( \xb3\x98\xc60\x03\x7fp\xbc\x13\xea`O\xadw\xaa\xa6\xbfu\x8c\x0c\
\x8d~]\xf1Wk\x8a\xc5)\x14\xb0\xfdE\x1b\xab\xc5\xe0\x0c<1\x03\xac\xde7\xcel\
\x0c\x04\xc0\xa7s!\x07\xacv<\xab\xa8\x18o6\xe6n\x15\xaeh\x1f\x87\n\xe1\xa1\
\xa2\x83\xa1\xc7\xd6\xa7aEL\xf8K\xccg\xe76\xc6H\xe3\xd0\xb8!\xac\x8c\x17\xf4\
vkf\x8a\xd0N\xc0\xc4O\xad\x8e1Qge\xab\xc4g\xe7\x9cNf\xb7S}]\xf3\x0c)hc\xb05\
\xbe\'MJ\xfeu\xec\xd5Es\xb2\x10+\xa6\xfbjQ-\xb9Y\xc9Y\xf3\xc9\x13\xc9\x1eh\
\xbdJ\xfb|\xd2fQn\x0c\x8fK\\\xb9\xd7\xa4B\xb0:r0\xcc\x8d\x90tv\xdaC\xeb\xb1^\
F\xeay\xe4\xb5\x9c\x99\xe1\xe4\\c\xf3\xb3\xf5\x0b\x1e\x99\x83y\xc5yIeLd\xe0\
\x85\xc8\xc0\x87\xe3\xac5^3\xf9\xbe&e\x1f\x00\x07\xab\xa7E{\x14\x84\x82Kd\
\xc2\xb7\x10\x11\x99\x94*\x8c\xae\x90d\x01D\x98\xf5\xf8p\\DkK\x1eDj\xf8\xcb5\
\x0f\xebo\x1aS\xd4\x87\xa3\xdf_\xae\x875\xefwX\n\xb86\xb4b\x1a3R\xfd\xf4\x8f\
\x1d/\xbb)\xe9\xe4\x14\x80P^\xc4\x13\x18\x84\x94\x85\xf4nu%\xednW\xb4.\xe5\
\x99U\xb2J\xad\x7f\x96\xeeE\xa40\x10\xb2\xf1\xf5\xec\xbc\xdd\n\xbd\xbb\x91t\
\xb7\x14\x18\xa1\xea\xdb~be\x9d\xc9xu\x84\xed\x04\xaf\xearp\xc2\xf8\xf1\xd6d\
\xa4\xa4\xc5\xeex\x8a\x17\x90\xb12t\xbfo6FJ\xe1\xbd\xbb\x91\x85\xe5\xaa\xc8\
\x0b\xc8\xb8S\xeeZ\xaa\x19\xdfm\xf3\xdd\xf3\xf77q\r\xeb\x94\x86\xb5\x90W\'\
\xbb\x94\xa1\xdb\x0f\x15}\x93D\xf1\xed\x0b~XV\xef\xa7\xa8\x00\x84\x91\x0e\
\xc13\x8f~\xd8\xf1\xcb\x8a\x01\xbc\xac\xe8\xedv\x91}\xd9\xbe#\xdd\xaa\n#\x1b\
\x82\xc2\xf6\xfb\x97\xc6?\xd4\xe5\x95\xa5h\xe3x\xdd\xf0\n\xa6\x9cT\x86Mif\
\xc02\x9eP\xf9\x8d\xed\xfa\xfb\xd9g\xa9\xc7\x94\xab\xd1\xff\x03\x0b#7\x83\t\
\xfa\x0c\x1b1\xe1\xe4\xc2w\\\x00\xff\x01\x9f\xcd\x92:\xf3\xa7*\xa9\x00\x00\
\x00\x00IEND\xaeB`\x82\x1eD\xc4\x99' )
def getBitmap():
return BitmapFromImage(getImage())
def getImage():
stream = cStringIO.StringIO(getData())
return ImageFromStream(stream)
| gpl-2.0 |
yg257/Pangea | templates/root/ec2/lib/boto-2.34.0/boto/rds/regioninfo.py | 167 | 1513 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
class RDSRegionInfo(RegionInfo):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
from boto.rds import RDSConnection
super(RDSRegionInfo, self).__init__(connection, name, endpoint,
RDSConnection)
| apache-2.0 |
Trust-Code/trust-addons | purchase_requisitor/__init__.py | 4 | 1407 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from . import models
| agpl-3.0 |
icereval/osf.io | scripts/analytics/addon_snapshot.py | 4 | 5540 | from __future__ import absolute_import
import logging
# App must be initialized before models or ADDONS_AVAILABLE are available
from website.app import init_app
init_app()
from osf.models import OSFUser, AbstractNode
from framework.database import paginated
from scripts.analytics.base import SnapshotAnalytics
from website.settings import ADDONS_AVAILABLE
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Modified from scripts/analytics/benchmarks.py
def get_enabled_authorized_linked(user_settings_list, has_external_account, short_name):
""" Gather the number of users who have at least one node in each of the stages for an addon
:param user_settings_list: list of user_settings for a particualr addon
:param has_external_account: where addon is derrived from, determines method to load node settings
:param short_name: short name of addon to get correct node_settings
:return: dict with number of users that have at least one project at each stage
"""
from addons.forward.models import NodeSettings as ForwardNodeSettings
num_enabled = 0 # of users w/ 1+ addon account connected
num_authorized = 0 # of users w/ 1+ addon account connected to 1+ node
num_linked = 0 # of users w/ 1+ addon account connected to 1+ node and configured
# osfstorage and wiki don't have user_settings, so always assume they're enabled, authorized, linked
if short_name == 'osfstorage' or short_name == 'wiki':
num_enabled = num_authorized = num_linked = OSFUser.objects.filter(
is_registered=True,
password__isnull=False,
merged_by__isnull=True,
date_disabled__isnull=True,
date_confirmed__isnull=False
).count()
elif short_name == 'forward':
num_enabled = num_authorized = ForwardNodeSettings.objects.count()
num_linked = ForwardNodeSettings.objects.filter(url__isnull=False).count()
else:
for user_settings in paginated(user_settings_list):
node_settings_list = []
if has_external_account:
if user_settings.has_auth:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.oauth_grants.keys()]
else:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.nodes_authorized]
if any([ns.has_auth for ns in node_settings_list if ns]):
num_authorized += 1
if any([(ns.complete and ns.configured) for ns in node_settings_list if ns]):
num_linked += 1
return {
'enabled': num_enabled,
'authorized': num_authorized,
'linked': num_linked
}
class AddonSnapshot(SnapshotAnalytics):
@property
def collection_name(self):
return 'addon_snapshot'
def get_events(self, date=None):
super(AddonSnapshot, self).get_events(date)
counts = []
addons_available = {k: v for k, v in [(addon.short_name, addon) for addon in ADDONS_AVAILABLE]}
for short_name, addon in addons_available.iteritems():
has_external_account = hasattr(addon.models.get('nodesettings'), 'external_account')
connected_count = 0
deleted_count = 0
disconnected_count = 0
node_settings_model = addon.models.get('nodesettings')
if node_settings_model:
for node_settings in paginated(node_settings_model):
if node_settings.owner and not node_settings.owner.all_tags.filter(name='old_node_collection', system=True).exists():
connected_count += 1
deleted_count = addon.models['nodesettings'].objects.filter(deleted=True).count() if addon.models.get('nodesettings') else 0
if has_external_account:
disconnected_count = addon.models['nodesettings'].objects.filter(external_account__isnull=True, deleted=False).count() if addon.models.get('nodesettings') else 0
else:
if addon.models.get('nodesettings'):
for nsm in addon.models['nodesettings'].objects.filter(deleted=False):
if nsm.configured and not nsm.complete:
disconnected_count += 1
total = connected_count + deleted_count + disconnected_count
usage_counts = get_enabled_authorized_linked(addon.models.get('usersettings'), has_external_account, addon.short_name)
counts.append({
'provider': {
'name': short_name
},
'users': usage_counts,
'nodes': {
'total': total,
'connected': connected_count,
'deleted': deleted_count,
'disconnected': disconnected_count
}
})
logger.info(
'{} counted. Users with a linked node: {}, Total connected nodes: {}.'.format(
addon.short_name,
usage_counts['linked'],
total
)
)
return counts
def get_class():
return AddonSnapshot
if __name__ == '__main__':
addon_snapshot = AddonSnapshot()
events = addon_snapshot.get_events()
addon_snapshot.send_events(events)
| apache-2.0 |
stackforge/poppy | tests/unit/transport/pecan/controllers/base.py | 2 | 2763 | # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_context import context as context_utils
import six.moves as sm
import testtools
from poppy.transport.pecan import controllers
from poppy.transport.pecan.controllers import v1
from poppy.transport.validators import helpers
class BasePecanControllerUnitTest(testtools.TestCase):
def setUp(self, controller):
"""Engages all patches for unit testing controllers.
Patches the request, response, request context, and deserialization
decorator to satisfy all controller dependencies for unit testing.
:returns: None
"""
super(BasePecanControllerUnitTest, self).setUp()
self.addCleanup(
sm.reload_module,
controllers
)
self.addCleanup(
sm.reload_module,
v1
)
self.addCleanup(
sm.reload_module,
controller
)
self.addCleanup(
sm.reload_module,
context_utils
)
self.addCleanup(
sm.reload_module,
helpers
)
self.driver = mock.MagicMock()
self.response = mock.Mock()
context = mock.Mock()
context.tenant = '000000001'
context.user = 'user_id'
context_utils.get_current = context
context_utils.get_current.return_value = context
pecan_request_patcher = mock.patch('pecan.request')
self.request = pecan_request_patcher.start()
self.request.host_url = 'test_url'
self.request.base_url = 'test_url'
pecan_response_patcher = mock.patch('pecan.response')
self.response = pecan_response_patcher.start()
self.response.headers = {}
deco_patcher = mock.patch('poppy.transport.validators.helpers')
deco_patcher.start()
# Reload to engage patches
sm.reload_module(controller)
sm.reload_module(v1)
sm.reload_module(controllers)
sm.reload_module(helpers)
# self.addCleanup(deco_patcher.stop)
self.addCleanup(deco_patcher.stop)
self.addCleanup(pecan_response_patcher.stop)
self.addCleanup(pecan_request_patcher.stop)
| apache-2.0 |
Belxjander/Kirito | Python-3.5.0-Amiga/Lib/lib2to3/pgen2/driver.py | 89 | 5153 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import codecs
import io
import os
import logging
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = ""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(io.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except OSError as e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
def main(*args):
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
"""
if not args:
args = sys.argv[1:]
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='%(message)s')
for gt in args:
load_grammar(gt, save=True, force=True)
return True
if __name__ == "__main__":
sys.exit(int(not main()))
| gpl-3.0 |
amenonsen/ansible | lib/ansible/modules/network/cloudengine/ce_vxlan_arp.py | 6 | 24334 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_vxlan_arp
version_added: "2.4"
short_description: Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
description:
- Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
author: QijunPan (@QijunPan)
options:
evn_bgp:
description:
- Enables EVN BGP.
choices: ['enable', 'disable']
evn_source_ip:
description:
- Specifies the source address of an EVN BGP peer.
The value is in dotted decimal notation.
evn_peer_ip:
description:
- Specifies the IP address of an EVN BGP peer.
The value is in dotted decimal notation.
evn_server:
description:
- Configures the local device as the router reflector (RR) on the EVN network.
choices: ['enable', 'disable']
evn_reflect_client:
description:
- Configures the local device as the route reflector (RR) and its peer as the client.
choices: ['enable', 'disable']
vbdif_name:
description:
- Full name of VBDIF interface, i.e. Vbdif100.
arp_collect_host:
description:
- Enables EVN BGP or BGP EVPN to collect host information.
choices: ['enable', 'disable']
host_collect_protocol:
description:
- Enables EVN BGP or BGP EVPN to advertise host information.
choices: ['bgp','none']
bridge_domain_id:
description:
- Specifies a BD(bridge domain) ID.
The value is an integer ranging from 1 to 16777215.
arp_suppress:
description:
- Enables ARP broadcast suppression in a BD.
choices: ['enable', 'disable']
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan arp module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure EVN BGP on Layer 2 and Layer 3 VXLAN gateways to establish EVN BGP peer relationships.
ce_vxlan_arp:
evn_bgp: enable
evn_source_ip: 6.6.6.6
evn_peer_ip: 7.7.7.7
provider: "{{ cli }}"
- name: Configure a Layer 3 VXLAN gateway as a BGP RR.
ce_vxlan_arp:
evn_bgp: enable
evn_server: enable
provider: "{{ cli }}"
- name: Enable EVN BGP on a Layer 3 VXLAN gateway to collect host information.
ce_vxlan_arp:
vbdif_name: Vbdif100
arp_collect_host: enable
provider: "{{ cli }}"
- name: Enable Layer 2 and Layer 3 VXLAN gateways to use EVN BGP to advertise host information.
ce_vxlan_arp:
host_collect_protocol: bgp
provider: "{{ cli }}"
- name: Enable ARP broadcast suppression on a Layer 2 VXLAN gateway.
ce_vxlan_arp:
bridge_domain_id: 100
arp_suppress: enable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip":"7.7.7.7", state: "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"evn_bgp": "disable", "evn_source_ip": null, "evn_peer_ip": []}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip": ["7.7.7.7"]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["evn bgp",
"source-address 6.6.6.6",
"peer 7.7.7.7"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
from ansible.module_utils.connection import exec_command
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_v4addr(addr):
"""check is ipv4 addr is valid"""
if addr.count('.') == 3:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def get_evn_peers(config):
"""get evn peer ip list"""
get = re.findall(r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return list(set(get))
def get_evn_srouce(config):
"""get evn peer ip list"""
get = re.findall(
r"source-address ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return get[0]
def get_evn_reflect_client(config):
"""get evn reflect client list"""
get = re.findall(
r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s*reflect-client", config)
if not get:
return None
else:
return list(get)
class VxlanArp(object):
"""
Manages arp attributes of VXLAN.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.evn_bgp = self.module.params['evn_bgp']
self.evn_source_ip = self.module.params['evn_source_ip']
self.evn_peer_ip = self.module.params['evn_peer_ip']
self.evn_server = self.module.params['evn_server']
self.evn_reflect_client = self.module.params['evn_reflect_client']
self.vbdif_name = self.module.params['vbdif_name']
self.arp_collect_host = self.module.params['arp_collect_host']
self.host_collect_protocol = self.module.params[
'host_collect_protocol']
self.bridge_domain_id = self.module.params['bridge_domain_id']
self.arp_suppress = self.module.params['arp_suppress']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.config = "" # current config
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
required_together = [("vbdif_name", "arp_collect_host"), ("bridge_domain_id", "arp_suppress")]
self.module = AnsibleModule(argument_spec=self.spec,
required_together=required_together,
supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
cmd = 'display current-configuration '
cmd += ' '.join(flags)
cmd = cmd.strip()
rc, out, err = exec_command(self.module, cmd)
if rc != 0:
self.module.fail_json(msg=err)
cfg = str(out).strip()
return cfg
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = "| ignore-case section include evn bgp|host collect protocol bgp"
if self.vbdif_name:
exp += "|^interface %s$" % self.vbdif_name
if self.bridge_domain_id:
exp += "|^bridge-domain %s$" % self.bridge_domain_id
flags.append(exp)
cfg_str = self.get_config(flags)
config = cfg_str.split("\n")
exist_config = ""
for cfg in config:
if not cfg.startswith("display"):
exist_config += cfg
return exist_config
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_bridge_domain(self):
"""manage bridge domain configuration"""
if not self.bridge_domain_id:
return
# bridge-domain bd-id
# [undo] arp broadcast-suppress enable
cmd = "bridge-domain %s" % self.bridge_domain_id
if not is_config_exist(self.config, cmd):
self.module.fail_json(msg="Error: Bridge domain %s is not exist." % self.bridge_domain_id)
cmd = "arp broadcast-suppress enable"
exist = is_config_exist(self.config, cmd)
if self.arp_suppress == "enable" and not exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_suppress == "disable" and exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_evn_bgp(self):
"""enables EVN BGP and configure evn bgp command"""
evn_bgp_view = False
evn_bgp_enable = False
cmd = "evn bgp"
exist = is_config_exist(self.config, cmd)
if self.evn_bgp == "enable" or exist:
evn_bgp_enable = True
# [undo] evn bgp
if self.evn_bgp:
if self.evn_bgp == "enable" and not exist:
self.cli_add_command(cmd)
evn_bgp_view = True
elif self.evn_bgp == "disable" and exist:
self.cli_add_command(cmd, undo=True)
return
# [undo] source-address ip-address
if evn_bgp_enable and self.evn_source_ip:
cmd = "source-address %s" % self.evn_source_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] peer ip-address
# [undo] peer ipv4-address reflect-client
if evn_bgp_enable and self.evn_peer_ip:
cmd = "peer %s" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
if self.evn_reflect_client == "enable":
self.cli_add_command(
"peer %s reflect-client" % self.evn_peer_ip)
else:
if self.evn_reflect_client:
cmd = "peer %s reflect-client" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.evn_reflect_client == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_reflect_client == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
else:
if exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] server enable
if evn_bgp_enable and self.evn_server:
cmd = "server enable"
exist = is_config_exist(self.config, cmd)
if self.evn_server == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_server == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
if evn_bgp_view:
self.cli_add_command("quit")
def config_vbdif(self):
"""configure command at the VBDIF interface view"""
# interface vbdif bd-id
# [undo] arp collect host enable
cmd = "interface %s" % self.vbdif_name.lower().capitalize()
exist = is_config_exist(self.config, cmd)
if not exist:
self.module.fail_json(
msg="Error: Interface %s does not exist." % self.vbdif_name)
cmd = "arp collect host enable"
exist = is_config_exist(self.config, cmd)
if self.arp_collect_host == "enable" and not exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_collect_host == "disable" and exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_host_collect_protocal(self):
"""Enable EVN BGP or BGP EVPN to advertise host information"""
# [undo] host collect protocol bgp
cmd = "host collect protocol bgp"
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if self.host_collect_protocol == "bgp" and not exist:
self.cli_add_command(cmd)
elif self.host_collect_protocol == "none" and exist:
self.cli_add_command(cmd, undo=True)
else:
if self.host_collect_protocol == "bgp" and exist:
self.cli_add_command(cmd, undo=True)
def is_valid_vbdif(self, ifname):
"""check is interface vbdif is valid"""
if not ifname.upper().startswith('VBDIF'):
return False
bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "")
if not bdid.isdigit():
return False
if int(bdid) < 1 or int(bdid) > 16777215:
return False
return True
def check_params(self):
"""Check all input params"""
# bridge domain id check
if self.bridge_domain_id:
if not self.bridge_domain_id.isdigit():
self.module.fail_json(
msg="Error: Bridge domain id is not digit.")
if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215:
self.module.fail_json(
msg="Error: Bridge domain id is not in the range from 1 to 16777215.")
# evn_source_ip check
if self.evn_source_ip:
if not is_valid_v4addr(self.evn_source_ip):
self.module.fail_json(msg="Error: evn_source_ip is invalid.")
# evn_peer_ip check
if self.evn_peer_ip:
if not is_valid_v4addr(self.evn_peer_ip):
self.module.fail_json(msg="Error: evn_peer_ip is invalid.")
# vbdif_name check
if self.vbdif_name:
self.vbdif_name = self.vbdif_name.replace(
" ", "").lower().capitalize()
if not self.is_valid_vbdif(self.vbdif_name):
self.module.fail_json(msg="Error: vbdif_name is invalid.")
# evn_reflect_client and evn_peer_ip must set at the same time
if self.evn_reflect_client and not self.evn_peer_ip:
self.module.fail_json(
msg="Error: evn_reflect_client and evn_peer_ip must set at the same time.")
# evn_server and evn_reflect_client can not set at the same time
if self.evn_server == "enable" and self.evn_reflect_client == "enable":
self.module.fail_json(
msg="Error: evn_server and evn_reflect_client can not set at the same time.")
def get_proposed(self):
"""get proposed info"""
if self.evn_bgp:
self.proposed["evn_bgp"] = self.evn_bgp
if self.evn_source_ip:
self.proposed["evn_source_ip"] = self.evn_source_ip
if self.evn_peer_ip:
self.proposed["evn_peer_ip"] = self.evn_peer_ip
if self.evn_server:
self.proposed["evn_server"] = self.evn_server
if self.evn_reflect_client:
self.proposed["evn_reflect_client"] = self.evn_reflect_client
if self.arp_collect_host:
self.proposed["arp_collect_host"] = self.arp_collect_host
if self.host_collect_protocol:
self.proposed["host_collect_protocol"] = self.host_collect_protocol
if self.arp_suppress:
self.proposed["arp_suppress"] = self.arp_suppress
if self.vbdif_name:
self.proposed["vbdif_name"] = self.evn_peer_ip
if self.bridge_domain_id:
self.proposed["bridge_domain_id"] = self.bridge_domain_id
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
evn_bgp_exist = is_config_exist(self.config, "evn bgp")
if evn_bgp_exist:
self.existing["evn_bgp"] = "enable"
else:
self.existing["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(self.config, "server enable"):
self.existing["evn_server"] = "enable"
else:
self.existing["evn_server"] = "disable"
self.existing["evn_source_ip"] = get_evn_srouce(self.config)
self.existing["evn_peer_ip"] = get_evn_peers(self.config)
self.existing["evn_reflect_client"] = get_evn_reflect_client(
self.config)
if is_config_exist(self.config, "arp collect host enable"):
self.existing["host_collect_protocol"] = "enable"
else:
self.existing["host_collect_protocol"] = "disable"
if is_config_exist(self.config, "host collect protocol bgp"):
self.existing["host_collect_protocol"] = "bgp"
else:
self.existing["host_collect_protocol"] = None
if is_config_exist(self.config, "arp broadcast-suppress enable"):
self.existing["arp_suppress"] = "enable"
else:
self.existing["arp_suppress"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
evn_bgp_exist = is_config_exist(config, "evn bgp")
if evn_bgp_exist:
self.end_state["evn_bgp"] = "enable"
else:
self.end_state["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(config, "server enable"):
self.end_state["evn_server"] = "enable"
else:
self.end_state["evn_server"] = "disable"
self.end_state["evn_source_ip"] = get_evn_srouce(config)
self.end_state["evn_peer_ip"] = get_evn_peers(config)
self.end_state[
"evn_reflect_client"] = get_evn_reflect_client(config)
if is_config_exist(config, "arp collect host enable"):
self.end_state["host_collect_protocol"] = "enable"
else:
self.end_state["host_collect_protocol"] = "disable"
if is_config_exist(config, "host collect protocol bgp"):
self.end_state["host_collect_protocol"] = "bgp"
else:
self.end_state["host_collect_protocol"] = None
if is_config_exist(config, "arp broadcast-suppress enable"):
self.end_state["arp_suppress"] = "enable"
else:
self.end_state["arp_suppress"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.evn_bgp or self.evn_server or self.evn_peer_ip or self.evn_source_ip:
self.config_evn_bgp()
if self.vbdif_name and self.arp_collect_host:
self.config_vbdif()
if self.host_collect_protocol:
self.config_host_collect_protocal()
if self.bridge_domain_id and self.arp_suppress:
self.config_bridge_domain()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
evn_bgp=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_source_ip=dict(required=False, type='str'),
evn_peer_ip=dict(required=False, type='str'),
evn_server=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_reflect_client=dict(
required=False, type='str', choices=['enable', 'disable']),
vbdif_name=dict(required=False, type='str'),
arp_collect_host=dict(required=False, type='str',
choices=['enable', 'disable']),
host_collect_protocol=dict(
required=False, type='str', choices=['bgp', 'none']),
bridge_domain_id=dict(required=False, type='str'),
arp_suppress=dict(required=False, type='str',
choices=['enable', 'disable']),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanArp(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
rcharp/toyota-flask | venv/lib/python2.7/site-packages/numpy/f2py/crackfortran.py | 53 | 119354 | #!/usr/bin/env python
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files,funcs)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
from __future__ import division, absolute_import, print_function
import sys
import string
import fileinput
import re
import pprint
import os
import copy
import platform
from . import __version__
from .auxfuncs import *
f2py_version = __version__.version
# Global flags:
strictf77=1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform='fix' # 'fix','free'
quiet=0 # Be verbose if 0 (Obsolete: not used any more)
verbose=1 # Be quiet if 0, extra verbose if > 1.
tabchar=4*' '
pyffilename=''
f77modulename=''
skipemptyends=0 # for old F77 programs without 'program' statement
ignorecontains=1
dolowercase=1
debug=[]
# Global variables
groupcounter=0
grouplist={groupcounter:[]}
neededmodule=-1
expectbegin=1
skipblocksuntil=-1
usermodules=[]
f90modulevars={}
gotnextfile=1
filepositiontext=''
currentfilename=''
skipfunctions=[]
skipfuncs=[]
onlyfuncs=[]
include_paths=[]
previous_context = None
def reset_global_f2py_vars():
global groupcounter, grouplist, neededmodule, expectbegin, \
skipblocksuntil, usermodules, f90modulevars, gotnextfile, \
filepositiontext, currentfilename, skipfunctions, skipfuncs, \
onlyfuncs, include_paths, previous_context, \
strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename, \
f77modulename, skipemptyends, ignorecontains, dolowercase, debug
# flags
strictf77 = 1
sourcecodeform = 'fix'
quiet = 0
verbose = 1
tabchar = 4*' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0
ignorecontains = 1
dolowercase = 1
debug = []
# variables
groupcounter = 0
grouplist = {groupcounter:[]}
neededmodule =-1
expectbegin = 1
skipblocksuntil = -1
usermodules = []
f90modulevars = {}
gotnextfile = 1
filepositiontext = ''
currentfilename = ''
skipfunctions = []
skipfuncs = []
onlyfuncs = []
include_paths = []
previous_context = None
###### Some helper functions
def show(o,f=0):pprint.pprint(o)
errmess=sys.stderr.write
def outmess(line,flag=1):
global filepositiontext
if not verbose: return
if not quiet:
if flag:sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE=50
defaultimplicitrules={}
for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c]={'typespec':'real'}
for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'}
del c
badnames={}
invbadnames={}
for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
'max', 'min',
'flen', 'fshape',
'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
'type', 'default']:
badnames[n]=n+'_bn'
invbadnames[n+'_bn']=n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name, badnames[name]))
return badnames[name]
return name
def rmbadname(names): return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\
%(name, invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names): return [undo_rmbadname1(_m) for _m in names]
def getextension(name):
i=name.rfind('.')
if i==-1: return ''
if '\\' in name[i:]: return ''
if '/' in name[i:]: return ''
return name[i+1:]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open(file, 'r')
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
if line[0]!='!' and line.strip():
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&':
result = 1
break
line = f.readline()
f.close()
return result
####### Read fortran (77,90) code
def readfortrancode(ffile,dowithline=show,istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase, include_paths
if not istop:
saveglobals=gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase
if ffile==[]: return
localdolowercase = dolowercase
cont=0
finalline=''
ll=''
commentline=re.compile(r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)')
includeline=re.compile(r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1=re.compile(r'(?P<line>.*)&\s*\Z')
cont2=re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop: dowithline('', -1)
ll, l1='', ''
spacedigits=[' '] + [str(_m) for _m in range(10)]
filepositiontext=''
fin=fileinput.FileInput(ffile)
while True:
l=fin.readline()
if not l: break
if fin.isfirstline():
filepositiontext=''
currentfilename=fin.filename()
gotnextfile=1
l1=l
strictf77=0
sourcecodeform='fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77=1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform='free'
if strictf77: beginpattern=beginpattern77
else: beginpattern=beginpattern90
outmess('\tReading file %s (format:%s%s)\n'\
%(repr(currentfilename), sourcecodeform,
strictf77 and ',strict' or ''))
l=l.expandtabs().replace('\xa0', ' ')
while not l=='': # Get rid of newline characters
if l[-1] not in "\n\r\f": break
l=l[:-1]
if not strictf77:
r=commentline.match(l)
if r:
l=r.group('line')+' ' # Strip comments starting with `!'
rl=r.group('rest')
if rl[:4].lower()=='f2py': # f2py directive
l = l + 4*' '
r=commentline.match(rl[4:])
if r: l=l+r.group('line')
else: l = l + rl[4:]
if l.strip()=='': # Skip empty line
cont=0
continue
if sourcecodeform=='fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower()=='f2py': # f2py directive
l=' '+l[5:]
else: # Skip comment line
cont=0
continue
elif strictf77:
if len(l)>72: l=l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l)>5 and not l[5]==' '):
# Continuation of a previous line
ll=ll+l[6:]
finalline=''
origfinalline=''
else:
if not strictf77:
# F90 continuation
r=cont1.match(l)
if r: l=r.group('line') # Continuation follows ..
if cont:
ll=ll+cont2.match(l).group('line')
finalline=''
origfinalline=''
else:
l=' '+l[5:] # clean up line beginning from possible digits.
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline=ll
ll=l
cont=(r is not None)
else:
l=' '+l[5:] # clean up line beginning from possible digits.
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline =ll
ll=l
elif sourcecodeform=='free':
if not cont and ext=='.pyf' and mline_mark.match(l):
l = l + '\n'
while True:
lc = fin.readline()
if not lc:
errmess('Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r=cont1.match(l)
if r: l=r.group('line') # Continuation follows ..
if cont:
ll=ll+cont2.match(l).group('line')
finalline=''
origfinalline=''
else:
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline =ll
ll=l
cont=(r is not None)
else:
raise ValueError("Flag sourcecodeform must be either 'fix' or 'free': %s"%repr(sourcecodeform))
filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1)
m=includeline.match(origfinalline)
if m:
fn=m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1=ll
if localdolowercase:
finalline=ll.lower()
else: finalline=ll
origfinalline = ll
filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1, currentfilename, l1)
m=includeline.match(origfinalline)
if m:
fn=m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext=''
fin.close()
if istop: dowithline('', 1)
else:
gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase=saveglobals
########### Crack line
beforethisafter=r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))'+ \
r'\s*(?P<this>(\b(%s)\b))'+ \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern=re.compile(beforethisafter%('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
typespattern4implicit=re.compile(beforethisafter%('', fortrantypes+'|static|automatic|undefined', fortrantypes+'|static|automatic|undefined', '.*'), re.I)
#
functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
#modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77=r'program|block\s*data'
beginpattern77=re.compile(beforethisafter%('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
groupbegins90=groupbegins77+r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90=re.compile(beforethisafter%('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
endpattern=re.compile(beforethisafter%('', groupends, groupends, '[\w\s]*'), re.I), 'end'
#endifs='end\s*(if|do|where|select|while|forall)'
endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
endifpattern=re.compile(beforethisafter%('[\w]*?', endifs, endifs, '[\w\s]*'), re.I), 'endif'
#
implicitpattern=re.compile(beforethisafter%('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
dimensionpattern=re.compile(beforethisafter%('', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
externalpattern=re.compile(beforethisafter%('', 'external', 'external', '.*'), re.I), 'external'
optionalpattern=re.compile(beforethisafter%('', 'optional', 'optional', '.*'), re.I), 'optional'
requiredpattern=re.compile(beforethisafter%('', 'required', 'required', '.*'), re.I), 'required'
publicpattern=re.compile(beforethisafter%('', 'public', 'public', '.*'), re.I), 'public'
privatepattern=re.compile(beforethisafter%('', 'private', 'private', '.*'), re.I), 'private'
intrisicpattern=re.compile(beforethisafter%('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic'
intentpattern=re.compile(beforethisafter%('', 'intent|depend|note|check', 'intent|depend|note|check', '\s*\(.*?\).*'), re.I), 'intent'
parameterpattern=re.compile(beforethisafter%('', 'parameter', 'parameter', '\s*\(.*'), re.I), 'parameter'
datapattern=re.compile(beforethisafter%('', 'data', 'data', '.*'), re.I), 'data'
callpattern=re.compile(beforethisafter%('', 'call', 'call', '.*'), re.I), 'call'
entrypattern=re.compile(beforethisafter%('', 'entry', 'entry', '.*'), re.I), 'entry'
callfunpattern=re.compile(beforethisafter%('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
commonpattern=re.compile(beforethisafter%('', 'common', 'common', '.*'), re.I), 'common'
usepattern=re.compile(beforethisafter%('', 'use', 'use', '.*'), re.I), 'use'
containspattern=re.compile(beforethisafter%('', 'contains', 'contains', ''), re.I), 'contains'
formatpattern=re.compile(beforethisafter%('', 'format', 'format', '.*'), re.I), 'format'
## Non-fortran and f2py-specific statements
f2pyenhancementspattern=re.compile(beforethisafter%('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I|re.S), 'f2pyenhancements'
multilinepattern = re.compile(r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r, '_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I)
def crackline(line,reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occured
Cracked data is saved in grouplist[0].
"""
global beginpattern, groupcounter, groupname, groupcache, grouplist, gotnextfile,\
filepositiontext, currentfilename, neededmodule, expectbegin, skipblocksuntil,\
skipemptyends, previous_context
if ';' in line and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
for l in line.split(';'):
assert reset==0, repr(reset) # XXX: non-zero reset values need testing
crackline(l, reset)
return
if reset<0:
groupcounter=0
groupname={groupcounter:''}
groupcache={groupcounter:{}}
grouplist={groupcounter:[]}
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['block']=''
groupcache[groupcounter]['name']=''
neededmodule=-1
skipblocksuntil=-1
return
if reset>0:
fl=0
if f77modulename and neededmodule==groupcounter: fl=2
while groupcounter>fl:
outmess('crackline: groupcounter=%s groupname=%s\n'%(repr(groupcounter), repr(groupname)))
outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1
if f77modulename and neededmodule==groupcounter:
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end interface
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end module
neededmodule=-1
return
if line=='': return
flag=0
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
intrisicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
implicitpattern, typespattern, commonpattern,
callpattern, usepattern, containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern
]:
m = pat[0].match(line)
if m:
break
flag=flag+1
if not m:
re_1 = crackline_re_1
if 0<=skipblocksuntil<=groupcounter:return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name=invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1=re.match(r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z'%name, markouterparen(line), re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line='callfun %s(%s) result (%s)'%(name, a, m2.group('result'))
else: line='callfun %s(%s)'%(name, a)
m = callfunpattern[0].match(line)
if not m:
outmess('crackline: could not resolve function call for line=%s.\n'%repr(line))
return
analyzeline(m, 'callfun', line)
return
if verbose>1 or (verbose==1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n'%(groupcounter))
return
elif pat[1]=='end':
if 0<=skipblocksuntil<groupcounter:
groupcounter=groupcounter-1
if skipblocksuntil<=groupcounter: return
if groupcounter<=0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.' \
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this')==groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' % \
(repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil==groupcounter:
skipblocksuntil=-1
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1
if not skipemptyends:
expectbegin=1
elif pat[1] == 'begin':
if 0<=skipblocksuntil<=groupcounter:
groupcounter=groupcounter+1
return
gotnextfile=0
analyzeline(m, pat[1], line)
expectbegin=0
elif pat[1]=='endif':
pass
elif pat[1]=='contains':
if ignorecontains: return
if 0<=skipblocksuntil<=groupcounter: return
skipblocksuntil=groupcounter
else:
if 0<=skipblocksuntil<=groupcounter:return
analyzeline(m, pat[1], line)
def markouterparen(line):
l='';f=0
for c in line:
if c=='(':
f=f+1
if f==1: l=l+'@(@'; continue
elif c==')':
f=f-1
if f==0: l=l+'@)@'; continue
l=l+c
return l
def markoutercomma(line,comma=','):
l='';f=0
cc=''
for c in line:
if (not cc or cc==')') and c=='(':
f=f+1
cc = ')'
elif not cc and c=='\'' and (not l or l[-1]!='\\'):
f=f+1
cc = '\''
elif c==cc:
f=f-1
if f==0:
cc=''
elif c==comma and f==0:
l=l+'@'+comma+'@'
continue
l=l+c
assert not f, repr((f, line, l, cc))
return l
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
def appenddecl(decl,decl2,force=1):
if not decl: decl={}
if not decl2: return decl
if decl is decl2: return decl
for k in list(decl2.keys()):
if k=='typespec':
if force or k not in decl:
decl[k]=decl2[k]
elif k=='attrspec':
for l in decl2[k]:
decl=setattrspec(decl, l, force)
elif k=='kindselector':
decl=setkindselector(decl, decl2[k], force)
elif k=='charselector':
decl=setcharselector(decl, decl2[k], force)
elif k in ['=', 'typename']:
if force or k not in decl:
decl[k]=decl2[k]
elif k=='note':
pass
elif k in ['intent', 'check', 'dimension', 'optional', 'required']:
errmess('appenddecl: "%s" not implemented.\n'%k)
else:
raise Exception('appenddecl: Unknown variable definition key:' + \
str(k))
return decl
selectpattern=re.compile(r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
nameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
callnameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvenameargspattern(line):
line = markouterparen(line)
m1=nameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind')
m1=callnameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), None, None
return None, [], None, None
def analyzeline(m, case, line):
global groupcounter, groupname, groupcache, grouplist, filepositiontext,\
currentfilename, f77modulename, neededinterface, neededmodule, expectbegin,\
gotnextfile, previous_context
block=m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
and not skipemptyends and groupcounter<1:
newname=os.path.basename(currentfilename).split('.')[0]
outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname)
gotnextfile=0
groupcounter=groupcounter+1
groupname[groupcounter]='program'
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['block']='program'
groupcache[groupcounter]['name']=newname
groupcache[groupcounter]['from']='fromsky'
expectbegin=0
if case in ['begin', 'call', 'callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data', block, re.I): block='block data'
if re.match(r'python\s*module', block, re.I): block='python module'
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is None:
if block=='block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface', 'block data']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block, name, groupcounter)
if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')])
else: args=[]
if '' in args:
while '' in args:
args.remove('')
outmess('analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule=0
needinterface=0
if case in ['call', 'callfun']:
needinterface=1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name']==name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block={'call':'subroutine','callfun':'function'}[case]
if f77modulename and neededmodule==-1 and groupcounter<=1:
neededmodule=groupcounter+2
needmodule=1
if block != 'interface':
needinterface=1
# Create new block(s)
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
if needmodule:
if verbose>1:
outmess('analyzeline: Creating module block %s\n'%repr(f77modulename), 0)
groupname[groupcounter]='module'
groupcache[groupcounter]['block']='python module'
groupcache[groupcounter]['name']=f77modulename
groupcache[groupcounter]['from']=''
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
if needinterface:
if verbose>1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (groupcounter), 0)
groupname[groupcounter]='interface'
groupcache[groupcounter]['block']='interface'
groupcache[groupcounter]['name']='unknown_interface'
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name'])
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
groupname[groupcounter]=block
groupcache[groupcounter]['block']=block
if not name: name='unknown_'+block
groupcache[groupcounter]['prefix']=m.group('before')
groupcache[groupcounter]['name']=rmbadname1(name)
groupcache[groupcounter]['result']=result
if groupcounter==1:
groupcache[groupcounter]['from']=currentfilename
else:
if f77modulename and groupcounter==3:
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], currentfilename)
else:
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'], groupcache[groupcounter-1]['name'])
for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args']=args
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['entry']={}
# end of creation
if block=='type':
groupcache[groupcounter]['varnames'] = []
if case in ['call', 'callfun']: # set parents variables
if name not in groupcache[groupcounter-2]['externals']:
groupcache[groupcounter-2]['externals'].append(name)
groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars'])
#try: del groupcache[groupcounter]['vars'][groupcache[groupcounter-2]['name']]
#except: pass
try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except: pass
if block in ['function', 'subroutine']: # set global attributes
try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter-2]['vars'][''])
except: pass
if case=='callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name==result:
groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
#if groupcounter>1: # name is interfaced
try: groupcache[groupcounter-2]['interfaced'].append(name)
except: pass
if block=='function':
t=typespattern[0].match(m.group('before')+' '+name)
if t:
typespec, selector, attr, edecl=cracktypespec0(t.group('this'), t.group('after'))
updatevars(typespec, selector, attr, edecl)
if case in ['call', 'callfun']:
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end routine
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end interface
elif case=='entry':
name, args, result, bind=_resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')])
else: args=[]
assert result is None, repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry', name, groupcounter)
elif case=='type':
typespec, selector, attr, edecl=cracktypespec0(block, m.group('after'))
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']:
edecl=groupcache[groupcounter]['vars']
ll=m.group('after').strip()
i=ll.find('::')
if i<0 and case=='intent':
i=markouterparen(ll).find('@)@')-2
ll=ll[:i+1]+'::'+ll[i+1:]
i=ll.find('::')
if ll[i:]=='::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n'%(m.group('this'), ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i<0:i=0;pl=''
else: pl=ll[:i].strip();ll=ll[i+2:]
ch = markoutercomma(pl).split('@,@')
if len(ch)>1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1=namepattern.match(e)
if not m1:
if case in ['public', 'private']: k=''
else:
print(m.groupdict())
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case, repr(e)))
continue
else:
k=rmbadname1(m1.group('name'))
if k not in edecl:
edecl[k]={}
if case=='dimension':
ap=case+m1.group('after')
if case=='intent':
ap=m.group('this')+pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter>1:
if '__user__' not in groupcache[groupcounter-2]['name']:
outmess('analyzeline: missing __user__ module (could be nothing)\n')
if k!=groupcache[groupcounter]['name']: # fixes ticket 1693
outmess('analyzeline: appending intent(callback) %s'\
' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess('analyzeline: intent(callback) %s is ignored' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'\
' in argument list' % (k))
if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']:
ap=case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec']=[ap]
if case=='external':
if groupcache[groupcounter]['block']=='program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
#outmess('analyzeline: ignoring external %s (not in arguments list)\n'%(`k`))
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars']=edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case=='parameter':
edecl=groupcache[groupcounter]['vars']
ll=m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr=[x.strip() for x in e.split('=')]
except:
outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e, ll));continue
params = get_parameters(edecl)
k=rmbadname1(k)
if k not in edecl:
edecl[k]={}
if '=' in edecl[k] and (not edecl[k]['=']==initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k, edecl[k]['='], initexpr))
t = determineexprtype(initexpr, params)
if t:
if t.get('typespec')=='real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(\
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec')=='complex':
initexpr = initexpr[1:].lower().replace('d', 'e').\
replace(',', '+1j*(')
try:
v = eval(initexpr, {}, params)
except (SyntaxError, NameError, TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else: edecl[k]['attrspec']=['parameter']
last_name = k
groupcache[groupcounter]['vars']=edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case=='implicit':
if m.group('after').strip().lower()=='none':
groupcache[groupcounter]['implicit']=None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl=groupcache[groupcounter]['implicit']
else: impl={}
if impl is None:
outmess('analyzeline: Overwriting earlier "implicit none" statement.\n')
impl={}
for e in markoutercomma(m.group('after')).split('@,@'):
decl={}
m1=re.match(r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
if not m1:
outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue
m2=typespattern4implicit.match(m1.group('this'))
if not m2:
outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue
typespec, selector, attr, edecl=cracktypespec0(m2.group('this'), m2.group('after'))
kindselect, charselect, typename=cracktypespec(typespec, selector)
decl['typespec']=typespec
decl['kindselector']=kindselect
decl['charselector']=charselect
decl['typename']=typename
for k in list(decl.keys()):
if not decl[k]: del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try: begc, endc=[x.strip() for x in r.split('-')]
except:
outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n'%r);continue
else: begc=endc=r.strip()
if not len(begc)==len(endc)==1:
outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n'%r);continue
for o in range(ord(begc), ord(endc)+1):
impl[chr(o)]=decl
groupcache[groupcounter]['implicit']=impl
elif case=='data':
ll=[]
dl='';il='';f=0;fc=1;inp=0
for c in m.group('after'):
if not inp:
if c=="'": fc=not fc
if c=='/' and fc: f=f+1;continue
if c=='(': inp = inp + 1
elif c==')': inp = inp - 1
if f==0: dl=dl+c
elif f==1: il=il+c
elif f==2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
dl=c;il='';f=0
if f==2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
vars={}
if 'vars' in groupcache[groupcounter]:
vars=groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l=[x.strip() for x in l]
if l[0][0]==',':l[0]=l[0][1:]
if l[0][0]=='(':
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%l[0])
continue
#if '(' in l[0]:
# #outmess('analyzeline: ignoring this data statement.\n')
# continue
i=0;j=0;llen=len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0]=='(':
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for wrapping.
continue
fc=0
while (i<llen) and (fc or not l[1][i]==','):
if l[1][i]=="'": fc=not fc
i=i+1
i=i+1
#v,l[1][j:i-1]=name,initvalue
if v not in vars:
vars[v]={}
if '=' in vars[v] and not vars[v]['=']==l[1][j:i-1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n'%(v, vars[v]['='], l[1][j:i-1]))
vars[v]['=']=l[1][j:i-1]
j=i
last_name = v
groupcache[groupcounter]['vars']=vars
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case=='common':
line=m.group('after').strip()
if not line[0]=='/':line='//'+line
cl=[]
f=0;bn='';ol=''
for c in line:
if c=='/':f=f+1;continue
if f>=3:
bn = bn.strip()
if not bn: bn='_BLNK_'
cl.append([bn, ol])
f=f-2;bn='';ol=''
if f%2: bn=bn+c
else: ol=ol+c
bn = bn.strip()
if not bn: bn='_BLNK_'
cl.append([bn, ol])
commonkey={}
if 'common' in groupcache[groupcounter]:
commonkey=groupcache[groupcounter]['common']
for c in cl:
if c[0] in commonkey:
outmess('analyzeline: previously defined common block encountered. Skipping.\n')
continue
commonkey[c[0]]=[]
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i: commonkey[c[0]].append(i)
groupcache[groupcounter]['common']=commonkey
previous_context = ('common', bn, groupcounter)
elif case=='use':
m1=re.match(r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
if m1:
mm=m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use']={}
name=m1.group('name')
groupcache[groupcounter]['use'][name]={}
isonly=0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly=1
groupcache[groupcounter]['use'][name]['only']=isonly
ll=[x.strip() for x in mm['list'].split(',')]
rl={}
for l in ll:
if '=' in l:
m2=re.match(r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I)
if m2: rl[m2.group('local').strip()]=m2.group('use').strip()
else:
outmess('analyzeline: Not local=>use pattern found in %s\n'%repr(l))
else:
rl[l]=l
groupcache[groupcounter]['use'][name]['map']=rl
else:
pass
else:
print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this')=='usercode' and 'usercode' in d:
if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case=='multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
#gc = previous_context[2]
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose>1:
print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name, ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec, ll):
selector=None
attr=None
if re.match(r'double\s*complex', typespec, re.I): typespec='double complex'
elif re.match(r'double\s*precision', typespec, re.I): typespec='double precision'
else: typespec=typespec.strip().lower()
m1=selectpattern.match(markouterparen(ll))
if not m1:
outmess('cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d=m1.groupdict()
for k in list(d.keys()): d[k]=unmarkouterparen(d[k])
if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
selector=d['this']
ll=d['after']
i=ll.find('::')
if i>=0:
attr=ll[:i].strip()
ll=ll[i+2:]
return typespec, selector, attr, ll
#####
namepattern=re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I)
kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I)
charselector=re.compile(r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I)
lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I)
lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
def removespaces(expr):
expr=expr.strip()
if len(expr)<=1: return expr
expr2=expr[0]
for i in range(1, len(expr)-1):
if expr[i]==' ' and \
((expr[i+1] in "()[]{}=+-/* ") or (expr[i-1] in "()[]{}=+-/* ")): continue
expr2=expr2+expr[i]
expr2=expr2+expr[-1]
return expr2
def markinnerspaces(line):
l='';f=0
cc='\''
cc1='"'
cb=''
for c in line:
if cb=='\\' and c in ['\\', '\'', '"']:
l=l+c
cb=c
continue
if f==0 and c in ['\'', '"']: cc=c; cc1={'\'':'"','"':'\''}[c]
if c==cc:f=f+1
elif c==cc:f=f-1
elif c==' ' and f==1: l=l+'@_@'; continue
l=l+c;cb=c
return l
def updatevars(typespec, selector, attrspec, entitydecl):
global groupcache, groupcounter
last_name = None
kindselect, charselect, typename=cracktypespec(typespec, selector)
if attrspec:
attrspec=[x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el=[x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1=[]
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
if e1: el1.append(e1.replace('@_@', ' '))
for e in el1:
m=namepattern.match(e)
if not m:
outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(repr(e)))
continue
ename=rmbadname1(m.group('name'))
edecl={}
if ename in groupcache[groupcounter]['vars']:
edecl=groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec']=typespec
elif typespec and (not typespec==edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typespec'], typespec))
if 'kindselector' not in edecl:
edecl['kindselector']=copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['kindselector'][k], kindselect[k]))
else: edecl['kindselector'][k]=copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector']=charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \
%(ename, charselect))
elif charselect:
for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['charselector'][k], charselect[k]))
else: edecl['charselector'][k]=copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename']=typename
elif typename and (not edecl['typename']==typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typename'], typename))
if 'attrspec' not in edecl:
edecl['attrspec']=copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec']=copy.copy(typespec)
edecl['kindselector']=copy.copy(kindselect)
edecl['charselector']=copy.copy(charselect)
edecl['typename']=typename
edecl['attrspec']=copy.copy(attrspec)
if m.group('after'):
m1=lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1=m1.groupdict()
for lk in ['len', 'array', 'init']:
if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2']
for k in list(d1.keys()):
if d1[k] is not None: d1[k]=unmarkouterparen(d1[k])
else: del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len']=='':
d1['len']=d1['array']
del d1['array']
else:
d1['array']=d1['array']+','+d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec, e, typespec, ename, d1['array']))
if 'array' in d1:
dm = 'dimension(%s)'%d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec']=[dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9]=='dimension' and dm1!=dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \
% (ename, dm1, dm))
break
if 'len' in d1:
if typespec in ['complex', 'integer', 'logical', 'real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector']={}
edecl['kindselector']['*']=d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector']={}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*']=d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['=']==d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['='], d1['init']))
else:
edecl['=']=d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after')))
for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename]=edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec, selector):
kindselect=None
charselect=None
typename=None
if selector:
if typespec in ['complex', 'integer', 'logical', 'real']:
kindselect=kindselector.match(selector)
if not kindselect:
outmess('cracktypespec: no kindselector pattern found for %s\n'%(repr(selector)))
return
kindselect=kindselect.groupdict()
kindselect['*']=kindselect['kind2']
del kindselect['kind2']
for k in list(kindselect.keys()):
if not kindselect[k]: del kindselect[k]
for k, i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec=='character':
charselect=charselector.match(selector)
if not charselect:
outmess('cracktypespec: no charselector pattern found for %s\n'%(repr(selector)))
return
charselect=charselect.groupdict()
charselect['*']=charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind']))
lenkind=lenkind.groupdict()
for lk in ['len', 'kind']:
if lenkind[lk+'2']:
lenkind[lk]=lenkind[lk+'2']
charselect[lk]=lenkind[lk]
del lenkind[lk+'2']
del charselect['lenkind']
for k in list(charselect.keys()):
if not charselect[k]: del charselect[k]
for k, i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec=='type':
typename=re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
if typename: typename=typename.group('name')
else: outmess('cracktypespec: no typename found in %s\n'%(repr(typespec+selector)))
else:
outmess('cracktypespec: no selector used for %s\n'%(repr(selector)))
return kindselect, charselect, typename
######
def setattrspec(decl,attr,force=0):
if not decl:
decl={}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec']=[attr]
return decl
if force: decl['attrspec'].append(attr)
if attr in decl['attrspec']: return decl
if attr=='static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='public' and 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='private' and 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl,sel,force=0):
if not decl:
decl={}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector']=sel
return decl
for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k]=sel[k]
return decl
def setcharselector(decl,sel,force=0):
if not decl:
decl={}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector']=sel
return decl
for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k]=sel[k]
return decl
def getblockname(block,unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
###### post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext='In: %s:%s\n'%(block['from'], block['name'])
except:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename, mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
for k, v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'\
' value from module %s' % (repr(k), repr(usename)))
param_map[k] = v
return param_map
def postcrack2(block,tab='',param_map=None):
global f90modulevars
if not f90modulevars:
return block
if isinstance(block, list):
ret = []
for g in block:
g = postcrack2(g, tab=tab+'\t', param_map=param_map)
ret.append(g)
return ret
setmesstext(block)
outmess('%sBlock: %s\n'%(tab, block['name']), 0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = []
for b in block['body']:
b = postcrack2(b, tab=tab+'\t', param_map=param_map)
new_body.append(b)
block['body'] = new_body
return block
def postcrack(block,args=None,tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules, onlyfunctions
if isinstance(block, list):
gret=[]
uret=[]
for g in block:
setmesstext(g)
g=postcrack(g, tab=tab+'\t')
if 'name' in g and '__user__' in g['name']: # sort user routines to appear first
uret.append(g)
else:
gret.append(g)
return uret+gret
setmesstext(block)
if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' + \
str(block))
if 'name' in block and not block['name']=='unknown_interface':
outmess('%sBlock: %s\n'%(tab, block['name']), 0)
blocktype=block['block']
block=analyzeargs(block)
block=analyzecommon(block)
block['vars']=analyzevars(block)
block['sortvars']=sortvarnames(block['vars'])
if 'args' in block and block['args']:
args=block['args']
block['body']=analyzebody(block, args, tab=tab)
userisdefined=[]
## fromuser = []
if 'use' in block:
useblock=block['use']
for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
## if 'map' in useblock[k]:
## for n in useblock[k]['map'].itervalues():
## if n not in fromuser: fromuser.append(n)
else: useblock={}
name=''
if 'name' in block:
name=block['name']
if 'externals' in block and block['externals']:# and not userisdefined: # Build a __user__ module
interfaced=[]
if 'interfaced' in block:
interfaced=block['interfaced']
mvars=copy.copy(block['vars'])
if name:
mname=name+'__user__routines'
else:
mname='unknown__user__routines'
if mname in userisdefined:
i=1
while '%s_%i'%(mname, i) in userisdefined: i=i+1
mname='%s_%i'%(mname, i)
interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'}
for e in block['externals']:
## if e in fromuser:
## outmess(' Skipping %s that is defined explicitly in another use statement\n'%(`e`))
## continue
if e in interfaced:
edef=[]
j=-1
for b in block['body']:
j=j+1
if b['block']=='interface':
i=-1
for bb in b['body']:
i=i+1
if 'name' in bb and bb['name']==e:
edef=copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']: del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e]=mvars[e]
if interface['vars'] or interface['body']:
block['interfaced']=interfaced
mblock={'block':'python module','body':[interface],'vars':{},'name':mname,'interfaced':block['externals']}
useblock[mname]={}
usermodules.append(mblock)
if useblock:
block['use']=useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
#print '%s depends on %s'%(v,vars[v]['depend'])
else: indep.append(v)
n = len(dep)
i = 0
while dep: #XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:]+[v]
i = i + 1
if i>n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+', '.join(dep)+'\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
#print indep
return indep
def analyzecommon(block):
if not hascommon(block): return block
commonvars=[]
for k in list(block['common'].keys()):
comvars=[]
for e in block['common'][k]:
m=re.match(r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
if m:
dims=[]
if m.group('dims'):
dims=[x.strip() for x in markoutercomma(m.group('dims')).split('@,@')]
n=m.group('name').strip()
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append('dimension(%s)'%(','.join(dims)))
else:
block['vars'][n]['attrspec']=['dimension(%s)'%(','.join(dims))]
else:
if dims:
block['vars'][n]={'attrspec':['dimension(%s)'%(','.join(dims))]}
else: block['vars'][n]={}
if n not in commonvars: commonvars.append(n)
else:
n=e
errmess('analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n'%(e, k))
comvars.append(n)
block['common'][k]=comvars
if 'commonvars' not in block:
block['commonvars']=commonvars
else:
block['commonvars']=block['commonvars']+commonvars
return block
def analyzebody(block,args,tab=''):
global usermodules, skipfuncs, onlyfuncs, f90modulevars
setmesstext(block)
body=[]
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function', 'subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_=b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(b, '\n'+' '*6, as_interface=True)
else: as_=args
b=postcrack(b, as_, tab=tab+'\t')
if b['block']=='interface' and not b['body']:
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ', '')=='pythonmodule':
usermodules.append(b)
else:
if b['block']=='module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules=defaultimplicitrules
attrrules={}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules=None
if verbose>1:
outmess('buildimplicitrules: no implicit rules for routine %s.\n'%repr(block['name']))
else:
for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
implicitrules[k]=block['implicit'][k]
else:
attrrules[k]=block['implicit'][k]['typespec']
return implicitrules, attrrules
def myeval(e,g=None,l=None):
r = eval(e, g, l)
if type(r) in [type(0), type(0.0)]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
def getlincoef(e, xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except: pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x)>len_e: continue
if re.search(r'\w\s*\([^)]*\b'+x+r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b'+x+r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a*0.5+b==c and a*1.5+b==c2):
return a, b, x
except: pass
break
return None, None, None
_varname_match = re.compile(r'\A[a-z]\w*\Z').match
def getarrlen(dl,args,star='*'):
edl = []
try: edl.append(myeval(dl[0], {}, {}))
except: edl.append(dl[0])
try: edl.append(myeval(dl[1], {}, {}))
except: edl.append(dl[1])
if isinstance(edl[0], int):
p1 = 1-edl[0]
if p1==0: d = str(dl[1])
elif p1<0: d = '%s-%s'%(dl[1], -p1)
else: d = '%s+%s'%(dl[1], p1)
elif isinstance(edl[1], int):
p1 = 1+edl[1]
if p1==0: d='-(%s)' % (dl[0])
else: d='%s-(%s)' % (p1, dl[0])
else: d = '%s-(%s)+1'%(dl[1], dl[0])
try: return repr(myeval(d, {}, {})), None, None
except: pass
d1, d2=getlincoef(dl[0], args), getlincoef(dl[1], args)
if None not in [d1[0], d2[0]]:
if (d1[0], d2[0])==(0, 0):
return repr(d2[1]-d1[1]+1), None, None
b = d2[1] - d1[1] + 1
d1 = (d1[0], 0, d1[2])
d2 = (d2[0], b, d2[2])
if d1[0]==0 and d2[2] in args:
if b<0: return '%s * %s - %s'%(d2[0], d2[2], -b), d2[2], '+%s)/(%s)'%(-b, d2[0])
elif b: return '%s * %s + %s'%(d2[0], d2[2], b), d2[2], '-%s)/(%s)'%(b, d2[0])
else: return '%s * %s'%(d2[0], d2[2]), d2[2], ')/(%s)'%(d2[0])
if d2[0]==0 and d1[2] in args:
if b<0: return '%s * %s - %s'%(-d1[0], d1[2], -b), d1[2], '+%s)/(%s)'%(-b, -d1[0])
elif b: return '%s * %s + %s'%(-d1[0], d1[2], b), d1[2], '-%s)/(%s)'%(b, -d1[0])
else: return '%s * %s'%(-d1[0], d1[2]), d1[2], ')/(%s)'%(-d1[0])
if d1[2]==d2[2] and d1[2] in args:
a = d2[0] - d1[0]
if not a: return repr(b), None, None
if b<0: return '%s * %s - %s'%(a, d1[2], -b), d2[2], '+%s)/(%s)'%(-b, a)
elif b: return '%s * %s + %s'%(a, d1[2], b), d2[2], '-%s)/(%s)'%(b, a)
else: return '%s * %s'%(a, d1[2]), d2[2], ')/(%s)'%(a)
if d1[0]==d2[0]==1:
c = str(d1[2])
if c not in args:
if _varname_match(c):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
c = '(%s)'%c
if b==0: d='%s-%s' % (d2[2], c)
elif b<0: d='%s-%s-%s' % (d2[2], c, -b)
else: d='%s-%s+%s' % (d2[2], c, b)
elif d1[0]==0:
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)'%c2
if d2[0]==1: pass
elif d2[0]==-1: c2='-%s' %c2
else: c2='%s*%s'%(d2[0], c2)
if b==0: d=c2
elif b<0: d='%s-%s' % (c2, -b)
else: d='%s+%s' % (c2, b)
elif d2[0]==0:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)'%c1
if d1[0]==1: c1='-%s'%c1
elif d1[0]==-1: c1='+%s'%c1
elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1)
else: c1 = '-%s*%s' % (d1[0], c1)
if b==0: d=c1
elif b<0: d='%s-%s' % (c1, -b)
else: d='%s+%s' % (c1, b)
else:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)'%c1
if d1[0]==1: c1='-%s'%c1
elif d1[0]==-1: c1='+%s'%c1
elif d1[0]<0: c1='+%s*%s'%(-d1[0], c1)
else: c1 = '-%s*%s' % (d1[0], c1)
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)'%c2
if d2[0]==1: pass
elif d2[0]==-1: c2='-%s' %c2
else: c2='%s*%s'%(d2[0], c2)
if b==0: d='%s%s' % (c2, c1)
elif b<0: d='%s%s-%s' % (c2, c1, -b)
else: d='%s%s+%s' % (c2, c1, b)
return d, None, None
word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend', [])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
if word not in words and word in vars:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
#XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind('+string+')'
def _selected_int_kind_func(r):
#XXX: This should be processor dependent
m = 10**r
if m<=2**8: return 1
if m<=2**16: return 2
if m<=2**32: return 4
if m<=2**63: return 8
if m<=2**128: return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
#XXX: This should be processor dependent
# This is only good for 0 <= p <= 20
if p < 7: return 4
if p < 16: return 8
if platform.machine().lower().startswith('power'):
if p <= 20:
return 16
else:
if p < 19:
return 10
elif p <= 20:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name, func in [('kind', _kind_func),
('selected_int_kind', _selected_int_kind_func),
('selected_real_kind', _selected_real_kind_func),
]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_kind_re = re.compile(r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.', 'False'),
('.true.', 'True'),
#TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")', v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
if isinteger(vars[n]) and not selected_kind_re.match(v):
v = v.split('_')[0]
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(\
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
if iscomplex(vars[n]):
if v[0]=='(' and v[-1]==')':
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v, g_params, params)
except Exception as msg:
params[n] = v
#print params
outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl!=n:
params[nl] = params[n]
else:
print(vars[n])
outmess('get_parameters:parameter %s does not have value?!\n'%(repr(n)))
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value, {}, params))
except (NameError, SyntaxError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '\
'(available names: %s)\n' \
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules, attrrules=buildimplicitrules(block)
vars=copy.copy(block['vars'])
if block['block']=='function' and block['name'] not in vars:
vars[block['name']]={}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen=block['vars']['']['attrspec']
for n in list(vars.keys()):
for k in ['public', 'private']:
if k in gen:
vars[n]=setattrspec(vars[n], k)
svars=[]
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args: svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b'%(v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n]=setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k=='typespec' and implicitrules[ln0][k]=='undefined':
continue
if k not in vars[n]:
vars[n][k]=implicitrules[ln0][k]
elif k=='attrspec':
for l in implicitrules[ln0][k]:
vars[n]=setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr=vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec']=[]
dim, intent, depend, check, note=None, None, None, None, None
for a in attr:
if a[:9]=='dimension': dim=(a[9:].strip())[1:-1]
elif a[:6]=='intent': intent=(a[6:].strip())[1:-1]
elif a[:6]=='depend': depend=(a[6:].strip())[1:-1]
elif a[:5]=='check': check=(a[5:].strip())[1:-1]
elif a[:4]=='note': note=(a[4:].strip())[1:-1]
else: vars[n]=setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent']=[]
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent=None
if note:
note=note.replace('\\n\\n', '\n\n')
note=note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note']=[note]
else:
vars[n]['note'].append(note)
note=None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend']=[]
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend=None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check']=[]
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check=None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension']=[]
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d==':':
star=':'
if d in params:
d = str(params[d])
for p in list(params.keys()):
m = re.match(r'(?P<before>.*?)\b'+p+r'\b(?P<after>.*)', d, re.I)
if m:
#outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`))
d = m.group('before')+str(params[p])+m.group('after')
if d==star:
dl = [star]
else:
dl=markoutercomma(d, ':').split('@:@')
if len(dl)==2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl)==1 and not dl[0]==star: dl = ['1', dl[0]]
if len(dl)==2:
d, v, di = getarrlen(dl, list(block['vars'].keys()))
if d[:4] == '1 * ': d = d[4:]
if di and di[-4:] == '/(1)': di = di[:-4]
if v: savelindims[d] = v, di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape'#'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\
%(d, n,
','.join(vars[n]['dimension']),
n, ','.join(vars[n]['dimension']+[d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess("analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend']=[]
vars[n]['check']=[]
if 'dimension' in vars[n]:
#/----< no check
#vars[n]['check'].append('rank(%s)==%s'%(n,len(vars[n]['dimension'])))
i=-1; ni=len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps=[] # dependecies of 'd'
ad=''
pd=''
#origd = d
if d not in vars:
if d in savelindims:
pd, ad='(', savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
#for r in block['vars'].iterkeys():
if r not in vars:
continue
if re.match(r'.*?\b'+r+r'\b', d, re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6]=='depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps=ddeps+vars[d]['depend']
i=i+1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend']=[n]
if ni>1:
vars[d]['=']='%s%s(%s,%s)%s'% (pd, shape_macro, n, i, ad)
else:
vars[d]['=']='%slen(%s)%s'% (pd, n, ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni>1:
vars[d]['check']=['%s%s(%s,%i)%s==%s'\
%(pd, shape_macro, n, i, ad, d)]
else:
vars[d]['check']=['%slen(%s)%s>=%s'%(pd, n, ad, d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec']=['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*', ':']:
#/----< no check
#if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d))
#else: vars[n]['check'].append('len(%s)>=%s'%(n,d))
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length='1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*']=length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*']=length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec']=[]
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend']=[]
for v, m in list(dep_matches.items()):
if m(vars[n]['=']): vars[n]['depend'].append(v)
if not vars[n]['depend']: del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n==block['name']: # n is block name
if 'note' in vars[n]:
block['note']=vars[n]['note']
if block['block']=='function':
if 'result' in block and block['result'] in vars:
vars[n]=appenddecl(vars[n], vars[block['result']])
if 'prefix' in block:
pr=block['prefix']; ispure=0; isrec=1
pr1=pr.replace('pure', '')
ispure=(not pr==pr1)
pr=pr1.replace('recursive', '')
isrec=(not pr==pr1)
m=typespattern[0].match(pr)
if m:
typespec, selector, attr, edecl=cracktypespec0(m.group('this'), m.group('after'))
kindselect, charselect, typename=cracktypespec(typespec, selector)
vars[n]['typespec']=typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(kindselect['kind'], {}, params)
except:
pass
vars[n]['kindselector']=kindselect
if charselect: vars[n]['charselector']=charselect
if typename: vars[n]['typename']=typename
if ispure: vars[n]=setattrspec(vars[n], 'pure')
if isrec: vars[n]=setattrspec(vars[n], 'recursive')
else:
outmess('analyzevars: prefix (%s) were not used\n'%repr(block['prefix']))
if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
if 'commonvars' in block:
neededvars=copy.copy(block['args']+block['commonvars'])
else:
neededvars=copy.copy(block['args'])
for n in list(vars.keys()):
if l_or(isintent_callback, isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(list(block['entry'].keys()))
for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block']=='function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine', 'function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(list(vars.keys()))
for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules, attrrules=buildimplicitrules(block)
at=determineexprtype(a, block['vars'], implicitrules)
na='e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase+string.digits: c='_'
na=na+c
if na[-1]=='_': na=na+'e'
else: na=na+'_e'
a=na
while a in block['vars'] or a in block['args']:
a=a+'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a]=at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a]={}
if 'externals' in block and orig_a in block['externals']+block['interfaced']:
block['vars'][a]=setattrspec(block['vars'][a], 'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules, attrrules=buildimplicitrules(block)
if 'args' not in block:
block['args']=[]
args=[]
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args']=args
if 'entry' in block:
for k, args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a]={}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals']=[]
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']]={}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec':'integer'}
if isinstance(r, float):
return {'typespec':'real'}
if isinstance(r, complex):
return {'typespec':'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r))
def determineexprtype(expr,vars,rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr=expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec':'complex'}
m=determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr))
return {'typespec':'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess('determineexprtype: selected kind types not supported (%s)\n'%repr(expr))
return {'typespec':'real'}
for op in ['+', '-', '*', '/']:
for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@'+op+'@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t={}
if determineexprtype_re_4.match(expr): # in parenthesis
t=determineexprtype(expr[1:-1], vars, rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn=m.group('name')
t=determineexprtype(m.group('name'), vars, rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec':'character','charselector':{'*':'*'}}
if not t:
outmess('determineexprtype: could not determine expressions (%s) type.\n'%(repr(expr)))
return t
######
def crack2fortrangen(block,tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret=''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function', 'subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret=ret+crack2fortrangen(g, tab, as_interface=as_interface)
return ret
prefix=''
name=''
args=''
blocktype=block['block']
if blocktype=='program': return ''
argsl = []
if 'name' in block:
name=block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block']=='function' or argsl:
args='(%s)'%','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s'%(f2pyenhancements, tab+tabchar, k, block['f2pyenhancements'][k])
intent_lst = block.get('intent', [])[:]
if blocktype=='function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s'%\
(f2pyenhancements, tab+tabchar,
','.join(intent_lst), name)
use=''
if 'use' in block:
use=use2fortran(block['use'], tab+tabchar)
common=''
if 'common' in block:
common=common2fortran(block['common'], tab+tabchar)
if name=='unknown_interface': name=''
result=''
if 'result' in block:
result=' result (%s)'%block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
#if 'prefix' in block:
# prefix=block['prefix']+' '
body=crack2fortrangen(block['body'], tab+tabchar)
vars=vars2fortran(block, block['vars'], argsl, tab+tabchar, as_interface=as_interface)
mess=''
if 'from' in block and not as_interface:
mess='! in %s'%block['from']
if 'entry' in block:
entry_stmts = ''
for k, i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts, tab+tabchar, k, ','.join(i))
body = body + entry_stmts
if blocktype=='block data' and name=='_BLOCK_DATA_':
name = ''
ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
return ret
def common2fortran(common,tab=''):
ret=''
for k in list(common.keys()):
if k=='_BLNK_':
ret='%s%scommon %s'%(ret, tab, ','.join(common[k]))
else:
ret='%s%scommon /%s/ %s'%(ret, tab, k, ','.join(common[k]))
return ret
def use2fortran(use,tab=''):
ret=''
for m in list(use.keys()):
ret='%s%suse %s,'%(ret, tab, m)
if use[m]=={}:
if ret and ret[-1]==',': ret=ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret='%s only:'%(ret)
if 'map' in use[m] and use[m]['map']:
c=' '
for k in list(use[m]['map'].keys()):
if k==use[m]['map'][k]:
ret='%s%s%s'%(ret, c, k); c=','
else:
ret='%s%s%s=>%s'%(ret, c, k, use[m]['map'][k]); c=','
if ret and ret[-1]==',': ret=ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
c = eval('isintent_%s(var)' % intent)
except NameError:
c = 0
if c:
ret.append(intent)
return ret
def vars2fortran(block,vars,args,tab='', as_interface=False):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret=''
nout=[]
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess('vars2fortran: Confused?!: "%s" is not defined in vars.\n'%a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a, d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret='%s%sintent(callback) %s'%(ret, tab, a)
ret='%s%sexternal %s'%(ret, tab, a)
if isoptional(vars[a]):
ret='%s%soptional %s'%(ret, tab, a)
if a in vars and 'typespec' not in vars[a]:
continue
cont=1
for b in block['body']:
if a==b['name'] and b['block']=='function':
cont=0;break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n'%a)
continue
if a==block['name'] and not block['block']=='function':
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret='%s%sexternal %s'%(ret, tab, a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n'%a)
continue
vardef=vars[a]['typespec']
if vardef=='type' and 'typename' in vars[a]:
vardef='%s(%s)'%(vardef, vars[a]['typename'])
selector={}
if 'kindselector' in vars[a]:
selector=vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector=vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*', ':']:
vardef='%s*(%s)'%(vardef, selector['*'])
else:
vardef='%s*%s'%(vardef, selector['*'])
else:
if 'len' in selector:
vardef='%s(len=%s'%(vardef, selector['len'])
if 'kind' in selector:
vardef='%s,kind=%s)'%(vardef, selector['kind'])
else:
vardef='%s)'%(vardef)
elif 'kind' in selector:
vardef='%s(kind=%s)'%(vardef, selector['kind'])
c=' '
if 'attrspec' in vars[a]:
attr=[]
for l in vars[a]['attrspec']:
if l not in ['external']:
attr.append(l)
if attr:
vardef='%s, %s'%(vardef, ','.join(attr))
c=','
if 'dimension' in vars[a]:
# if not isintent_c(vars[a]):
# vars[a]['dimension'].reverse()
vardef='%s%sdimension(%s)'%(vardef, c, ','.join(vars[a]['dimension']))
c=','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef='%s%sintent(%s)'%(vardef, c, ','.join(lst))
c=','
if 'check' in vars[a]:
vardef='%s%scheck(%s)'%(vardef, c, ','.join(vars[a]['check']))
c=','
if 'depend' in vars[a]:
vardef='%s%sdepend(%s)'%(vardef, c, ','.join(vars[a]['depend']))
c=','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex', 'double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
except:
pass
vardef='%s :: %s=%s'%(vardef, a, v)
else:
vardef='%s :: %s'%(vardef, a)
ret='%s%s%s'%(ret, tab, vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules=[]
postlist=postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n', 0)
postlist=postcrack2(postlist)
return usermodules+postlist
def crack2fortran(block):
global f2py_version
pyf=crack2fortrangen(block)+'\n'
header="""! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer="""
! This file was auto-generated with f2py (version:%s).
! See http://cens.ioc.ee/projects/f2py2e/
"""%(f2py_version)
return header+pyf+footer
if __name__ == "__main__":
files=[]
funcs=[]
f=1;f2=0;f3=0
showblocklist=0
for l in sys.argv[1:]:
if l=='': pass
elif l[0]==':':
f=0
elif l=='-quiet':
quiet=1
verbose=0
elif l=='-verbose':
verbose=2
quiet=0
elif l=='-fix':
if strictf77:
outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
skipemptyends=1
sourcecodeform='fix'
elif l=='-skipemptyends':
skipemptyends=1
elif l=='--ignore-contains':
ignorecontains=1
elif l=='-f77':
strictf77=1
sourcecodeform='fix'
elif l=='-f90':
strictf77=0
sourcecodeform='free'
skipemptyends=1
elif l=='-h':
f2=1
elif l=='-show':
showblocklist=1
elif l=='-m':
f3=1
elif l[0]=='-':
errmess('Unknown option %s\n'%repr(l))
elif f2:
f2=0
pyffilename=l
elif f3:
f3=0
f77modulename=l
elif f:
try:
open(l).close()
files.append(l)
except IOError as detail:
errmess('IOError: %s\n'%str(detail))
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specifyied module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""", 0)
postlist=crackfortran(files, funcs)
if pyffilename:
outmess('Writing fortran code to file %s\n'%repr(pyffilename), 0)
pyf=crack2fortran(postlist)
f=open(pyffilename, 'w')
f.write(pyf)
f.close()
if showblocklist:
show(postlist)
| apache-2.0 |
maxwward/SCOPEBak | askbot/migrations/0080_transplant_favquestions_2.py | 2 | 27311 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'FavoriteExercise.exercise'
db.delete_column(u'favorite_exercise', 'exercise_id')
# Changing field 'FavoriteExercise.thread'
db.alter_column(u'favorite_exercise', 'thread_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['askbot.Thread']))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'FavoriteExercise.exercise'
raise RuntimeError("Cannot reverse this migration. 'FavoriteExercise.exercise' and its values cannot be restored.")
# Changing field 'FavoriteExercise.thread'
db.alter_column(u'favorite_exercise', 'thread_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Thread'], null=True))
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousproblem': {
'Meta': {'object_name': 'AnonymousProblem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_problems'", 'to': "orm['askbot.Exercise']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousexercise': {
'Meta': {'object_name': 'AnonymousExercise'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "u'problem'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['askbot.Exercise']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoriteexercise': {
'Meta': {'object_name': 'FavoriteExercise', 'db_table': "u'favorite_exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_exercises'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('problem', 'revision'), ('exercise', 'revision'))", 'object_name': 'PostRevision'},
'problem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Problem']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Exercise']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.exercise': {
'Meta': {'object_name': 'Exercise', 'db_table': "u'exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'exercises'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.exerciseview': {
'Meta': {'object_name': 'ExerciseView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Exercise']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercise_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_problem': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Problem']", 'null': 'True', 'blank': 'True'}),
'problem_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'problem_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteExercise']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'exercises_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 |
caioserra/apiAdwords | examples/adspygoogle/dfa/v1_19/get_available_permissions.py | 3 | 2027 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example displays all of the available permissions that a user role or
subnetwork may be endowed with. To get a subnetwork ID, run
get_subnetworks.py.
A user role may not be set with more permissions than the subnetwork it
belongs to. You may enter a subnetwork ID to see the maximum permissions a
user role belonging to it can have, or enter '0' as the subnetwork ID to see
all possible permissions.
Tags: userrole.getAvailablePermissions
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
SUBNETWORK_ID = 'INSERT_SUBNETWORK_ID_HERE'
def main(client, subnetwork_id):
# Initialize appropriate service.
user_role_service = client.GetUserRoleService(
'https://advertisersapitest.doubleclick.net', 'v1.19')
# Get available permissions.
results = user_role_service.GetAvailablePermissions(subnetwork_id)
# Display permission name and its ID.
if results:
for permission in results:
print ('Permission with name \'%s\' and ID \'%s\' was found.'
% (permission['name'], permission['id']))
else:
print 'No permissions found.'
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client, SUBNETWORK_ID)
| apache-2.0 |
khemissianouar/Delvino | vendor/doctrine/orm/docs/en/conf.py | 2448 | 6497 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
| mit |
aam-at/tensorflow | tensorflow/python/debug/lib/session_debug_file_test.py | 21 | 5221 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session with file:// URLs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import session_debug_testlib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
@test_util.run_v1_only("b/120545219")
class SessionDebugFileTest(session_debug_testlib.SessionDebugTestBase):
def _debug_urls(self, run_number=None):
return ["file://%s" % self._debug_dump_dir(run_number=run_number)]
def _debug_dump_dir(self, run_number=None):
if run_number is None:
return self._dump_root
else:
return os.path.join(self._dump_root, "run_%d" % run_number)
def testAllowsDifferentWatchesOnDifferentRuns(self):
"""Test watching different tensors on different runs of the same graph."""
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init_val = [[5.0, 3.0], [-1.0, 0.0]]
v_init_val = [[2.0], [-1.0]]
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "diff_Watch/u"
v_name = "diff_Watch/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.VariableV1(v_init, name=v_name)
w = math_ops.matmul(u, v, name="diff_Watch/matmul")
u.initializer.run()
v.initializer.run()
for i in range(2):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_dump_root = self._debug_dump_dir(run_number=i)
debug_urls = self._debug_urls(run_number=i)
if i == 0:
# First debug run: Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
else:
# Second debug run: Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
run_dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# Each run should have generated only one dumped tensor, not two.
self.assertEqual(1, dump.size)
if i == 0:
self.assertAllClose([u_init_val],
dump.get_tensors("%s/read" % u_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % u_name, 0,
"DebugIdentity")[0], 0)
else:
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % v_name, 0,
"DebugIdentity")[0], 0)
class SessionDebugConcurrentTest(
session_debug_testlib.DebugConcurrentRunCallsTest):
def setUp(self):
self._num_concurrent_runs = 3
self._dump_roots = []
for _ in range(self._num_concurrent_runs):
self._dump_roots.append(tempfile.mkdtemp())
def tearDown(self):
ops.reset_default_graph()
for dump_root in self._dump_roots:
if os.path.isdir(dump_root):
file_io.delete_recursively(dump_root)
def _get_concurrent_debug_urls(self):
return [("file://%s" % dump_root) for dump_root in self._dump_roots]
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
autotest/arc | arc/shared/frontend.py | 1 | 1316 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2013-2014 Red Hat
# Author: Cleber Rosa <cleber@redhat.com>
'''
Basic definitions for the frontend.
Note that the frontend is broader in scope and functionality than the rpc
server. Another way to put that is the rpc server is a subset of the frontend.
'''
__all__ = ['AFE_SERVICE_NAME',
'TKO_SERVICE_NAME',
'AFE_URL_PREFIX',
'TKO_URL_PREFIX']
#: The name of the "AFE" service, used when accessing that service
AFE_SERVICE_NAME = 'afe'
#: The name of the "TKO" service, used when accessing that service
TKO_SERVICE_NAME = 'tko'
#: Prefix applied to all AFE URLs. This information is useful if requests are
#: coming through apache, and you need this app to coexist with others
AFE_URL_PREFIX = 'afe/server/'
#: Prefix applied to the TKO server frontend
TKO_URL_PREFIX = 'new_tko/server/'
| gpl-2.0 |
opensourcechipspark/platform_external_chromium_org | third_party/pexpect/fdpexpect.py | 171 | 3366 | """This is like pexpect, but will work on any file descriptor that you pass it.
You are reponsible for opening and close the file descriptor.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from pexpect import *
import os
__all__ = ['fdspawn']
class fdspawn (spawn):
"""This is like pexpect.spawn but allows you to supply your own open file
descriptor. For example, you could use it to read through a file looking
for patterns, or to control a modem or serial device. """
def __init__ (self, fd, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None):
"""This takes a file descriptor (an int) or an object that support the
fileno() method (returning an int). All Python file-like objects
support fileno(). """
### TODO: Add better handling of trying to use fdspawn in place of spawn
### TODO: (overload to allow fdspawn to also handle commands as spawn does.
if type(fd) != type(0) and hasattr(fd, 'fileno'):
fd = fd.fileno()
if type(fd) != type(0):
raise ExceptionPexpect ('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.')
try: # make sure fd is a valid file descriptor
os.fstat(fd)
except OSError:
raise ExceptionPexpect, 'The fd argument is not a valid file descriptor.'
self.args = None
self.command = None
spawn.__init__(self, None, args, timeout, maxread, searchwindowsize, logfile)
self.child_fd = fd
self.own_fd = False
self.closed = False
self.name = '<file descriptor %d>' % fd
def __del__ (self):
return
def close (self):
if self.child_fd == -1:
return
if self.own_fd:
self.close (self)
else:
self.flush()
os.close(self.child_fd)
self.child_fd = -1
self.closed = True
def isalive (self):
"""This checks if the file descriptor is still valid. If os.fstat()
does not raise an exception then we assume it is alive. """
if self.child_fd == -1:
return False
try:
os.fstat(self.child_fd)
return True
except:
return False
def terminate (self, force=False):
raise ExceptionPexpect ('This method is not valid for file descriptors.')
def kill (self, sig):
return
| bsd-3-clause |
Lekanich/intellij-community | python/helpers/profiler/thriftpy3/server/TNonblockingServer.py | 44 | 11883 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Implementation of non-blocking server.
The main idea of the server is to receive and send requests
only from the main thread.
The thread poool should be sized for concurrent tasks, not
maximum connections
"""
import threading
import socket
import select
import struct
import logging
logger = logging.getLogger(__name__)
from six.moves import queue
from thriftpy3.transport import TTransport
from thriftpy3.protocol.TBinaryProtocol import TBinaryProtocolFactory
__all__ = ['TNonblockingServer']
class Worker(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logger.exception("Exception while processing request")
callback(False, b'')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"""Decorator which locks self.lock."""
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"""Decorator close object on socket.error."""
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self.close()
return read
class Connection:
"""Basic class is represented connection.
It can be in state:
WAIT_LEN --- connection is reading request len.
WAIT_MESSAGE --- connection is reading request.
WAIT_PROCESS --- connection has just read whole request and
waits for call ready routine.
SEND_ANSWER --- connection is sending answer string (including length
of answer).
CLOSED --- socket was closed and connection should be deleted.
"""
def __init__(self, new_socket, wake_up):
self.socket = new_socket
self.socket.setblocking(False)
self.status = WAIT_LEN
self.len = 0
self.message = b''
self.lock = threading.Lock()
self.wake_up = wake_up
def _read_len(self):
"""Reads length of request.
It's a safer alternative to self.socket.recv(4)
"""
read = self.socket.recv(4 - len(self.message))
if len(read) == 0:
# if we read 0 bytes and self.message is empty, then
# the client closed the connection
if len(self.message) != 0:
logger.error("can't read frame size from socket")
self.close()
return
self.message += read
if len(self.message) == 4:
self.len, = struct.unpack('!i', self.message)
if self.len < 0:
logger.error("negative frame size, it seems client "
"doesn't use FramedTransport")
self.close()
elif self.len == 0:
logger.error("empty frame, it's really strange")
self.close()
else:
self.message = b''
self.status = WAIT_MESSAGE
@socket_exception
def read(self):
"""Reads data from stream and switch state."""
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
if self.status == WAIT_LEN:
self._read_len()
# go back to the main loop here for simplicity instead of
# falling through, even though there is a good chance that
# the message is already available
elif self.status == WAIT_MESSAGE:
read = self.socket.recv(self.len - len(self.message))
if len(read) == 0:
logger.error("can't read frame from socket (get %d of "
"%d bytes)" % (len(self.message), self.len))
self.close()
return
self.message += read
if len(self.message) == self.len:
self.status = WAIT_PROCESS
@socket_exception
def write(self):
"""Writes data from socket and switch state."""
assert self.status == SEND_ANSWER
sent = self.socket.send(self.message)
if sent == len(self.message):
self.status = WAIT_LEN
self.message = b''
self.len = 0
else:
self.message = self.message[sent:]
@locked
def ready(self, all_ok, message):
"""Callback function for switching state and waking up main thread.
This function is the only function witch can be called asynchronous.
The ready can switch Connection to three states:
WAIT_LEN if request was oneway.
SEND_ANSWER if request was processed in normal way.
CLOSED if request throws unexpected exception.
The one wakes up main thread.
"""
assert self.status == WAIT_PROCESS
if not all_ok:
self.close()
self.wake_up()
return
self.len = 0
if len(message) == 0:
# it was a oneway request, do not write answer
self.message = b''
self.status = WAIT_LEN
else:
self.message = struct.pack('!i', len(message)) + message
self.status = SEND_ANSWER
self.wake_up()
@locked
def is_writeable(self):
"""Return True if connection should be added to write list of select"""
return self.status == SEND_ANSWER
# it's not necessary, but...
@locked
def is_readable(self):
"""Return True if connection should be added to read list of select"""
return self.status in (WAIT_LEN, WAIT_MESSAGE)
@locked
def is_closed(self):
"""Returns True if connection is closed."""
return self.status == CLOSED
def fileno(self):
"""Returns the file descriptor of the associated socket."""
return self.socket.fileno()
def close(self):
"""Closes connection"""
self.status = CLOSED
self.socket.close()
class TNonblockingServer:
"""Non-blocking server."""
def __init__(self,
processor,
lsocket,
inputProtocolFactory=None,
outputProtocolFactory=None,
threads=10):
self.processor = processor
self.socket = lsocket
self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
self.out_protocol = outputProtocolFactory or self.in_protocol
self.threads = int(threads)
self.clients = {}
self.tasks = queue.Queue()
self._read, self._write = socket.socketpair()
self.prepared = False
self._stop = False
def setNumThreads(self, num):
"""Set the number of worker threads that should be created."""
# implement ThreadPool interface
assert not self.prepared, "Can't change number of threads after start"
self.threads = num
def prepare(self):
"""Prepares server for serve requests."""
if self.prepared:
return
self.socket.listen()
for _ in range(self.threads):
thread = Worker(self.tasks)
thread.setDaemon(True)
thread.start()
self.prepared = True
def wake_up(self):
"""Wake up main thread.
The server usually waits in select call in we should terminate one.
The simplest way is using socketpair.
Select always wait to read from the first socket of socketpair.
In this case, we can just write anything to the second socket from
socketpair.
"""
self._write.send(b'1')
def stop(self):
"""Stop the server.
This method causes the serve() method to return. stop() may be invoked
from within your handler, or from another thread.
After stop() is called, serve() will return but the server will still
be listening on the socket. serve() may then be called again to resume
processing requests. Alternatively, close() may be called after
serve() returns to close the server socket and shutdown all worker
threads.
"""
self._stop = True
self.wake_up()
def _select(self):
"""Does select on open connections."""
readable = [self.socket.handle.fileno(), self._read.fileno()]
writable = []
for i, connection in list(self.clients.items()):
if connection.is_readable():
readable.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
return select.select(readable, writable, readable)
def handle(self):
"""Handle requests.
WARNING! You must call prepare() BEFORE calling handle()
"""
assert self.prepared, "You have to call prepare before handle"
rset, wset, xset = self._select()
for readable in rset:
if readable == self._read.fileno():
# don't care i just need to clean readable flag
self._read.recv(1024)
elif readable == self.socket.handle.fileno():
client = self.socket.accept().handle
self.clients[client.fileno()] = Connection(client,
self.wake_up)
else:
connection = self.clients[readable]
connection.read()
if connection.status == WAIT_PROCESS:
itransport = TTransport.TMemoryBuffer(connection.message)
otransport = TTransport.TMemoryBuffer()
iprot = self.in_protocol.getProtocol(itransport)
oprot = self.out_protocol.getProtocol(otransport)
self.tasks.put([self.processor, iprot, oprot,
otransport, connection.ready])
for writeable in wset:
self.clients[writeable].write()
for oob in xset:
self.clients[oob].close()
del self.clients[oob]
def close(self):
"""Closes the server."""
for _ in range(self.threads):
self.tasks.put([None, None, None, None, None])
self.socket.close()
self.prepared = False
def serve(self):
"""Serve requests.
Serve requests forever, or until stop() is called.
"""
self._stop = False
self.prepare()
while not self._stop:
self.handle()
| apache-2.0 |
nyrocron/eve-wspace | evewspace/API/tasks.py | 8 | 1097 | # Eve W-Space
# Copyright 2014 Andrew Austin and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from celery import task
from API.models import APIKey, MemberAPIKey
from django.core.cache import cache
from django.contrib.auth import get_user_model
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
User = get_user_model()
@task()
def update_char_data():
#Get all users
user_list = User.objects.all()
for user in user_list:
#Get all API keys of a user and validate them
for key in user.api_keys.all():
key.validate()
| apache-2.0 |
JanzTam/zulip | zerver/lib/narrow.py | 123 | 1633 | from zerver.decorator import JsonableError
def check_supported_events_narrow_filter(narrow):
for element in narrow:
operator = element[0]
if operator not in ["stream", "topic", "sender", "is"]:
raise JsonableError("Operator %s not supported." % (operator,))
def build_narrow_filter(narrow):
check_supported_events_narrow_filter(narrow)
def narrow_filter(event):
message = event["message"]
flags = event["flags"]
for element in narrow:
operator = element[0]
operand = element[1]
if operator == "stream":
if message["type"] != "stream":
return False
if operand.lower() != message["display_recipient"].lower():
return False
elif operator == "topic":
if message["type"] != "stream":
return False
if operand.lower() != message["subject"].lower():
return False
elif operator == "sender":
if operand.lower() != message["sender_email"].lower():
return False
elif operator == "is" and operand == "private":
if message["type"] != "private":
return False
elif operator == "is" and operand in ["starred"]:
if operand not in flags:
return False
elif operator == "is" and operand in ["alerted", "mentioned"]:
if "mentioned" not in flags:
return False
return True
return narrow_filter
| apache-2.0 |
Paul-Ezell/cinder-1 | cinder/api/contrib/used_limits.py | 18 | 2128 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import quota
QUOTAS = quota.QUOTAS
authorize = extensions.extension_authorizer('limits', 'used_limits')
class UsedLimitsController(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj):
context = req.environ['cinder.context']
authorize(context)
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=True)
quota_map = {
'totalVolumesUsed': 'volumes',
'totalGigabytesUsed': 'gigabytes',
'totalSnapshotsUsed': 'snapshots',
'totalBackupsUsed': 'backups',
'totalBackupGigabytesUsed': 'backup_gigabytes'
}
used_limits = {}
for display_name, single_quota in quota_map.items():
if single_quota in quotas:
used_limits[display_name] = quotas[single_quota]['in_use']
resp_obj.obj['limits']['absolute'].update(used_limits)
class Used_limits(extensions.ExtensionDescriptor):
"""Provide data on limited resources that are being used."""
name = "UsedLimits"
alias = 'os-used-limits'
namespace = "http://docs.openstack.org/volume/ext/used-limits/api/v1.1"
updated = "2013-10-03T00:00:00+00:00"
def get_controller_extensions(self):
controller = UsedLimitsController()
extension = extensions.ControllerExtension(self, 'limits', controller)
return [extension]
| apache-2.0 |
suiyuan2009/tensorflow | tensorflow/contrib/framework/python/framework/tensor_util_test.py | 45 | 15289 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class LocalVariabletest(test.TestCase):
def test_local_variable(self):
with self.test_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.initialize_variables(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
class ReduceSumNTest(test.TestCase):
def test_reduce_sum_n(self):
with self.test_session():
a = constant_op.constant(1)
b = constant_op.constant([2])
c = constant_op.constant([[3, 4], [5, 6]])
self.assertEqual(21, tensor_util.reduce_sum_n([a, b, c]).eval())
class AssertScalarIntTest(test.TestCase):
def test_assert_scalar_int(self):
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int32))
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int64))
tensor_util.assert_scalar_int(3)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
tensor_util.assert_scalar_int(
constant_op.constant(
3, dtype=dtypes.float32))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
tensor_util.assert_scalar_int(
constant_op.constant(
[3, 4], dtype=dtypes.int32))
class WithShapeTest(test.TestCase):
def _assert_with_shape(self, tensor, expected_value, expected_shape,
unexpected_shapes):
for unexpected_shape in unexpected_shapes:
self.assertRaises(ValueError, tensor_util.with_shape, unexpected_shape,
tensor)
pattern = (
r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" %
(tensor.name, " ".join([str(dim) for dim in unexpected_shape]),
" ".join([str(dim) for dim in expected_shape])))
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_shape(
constant_op.constant(unexpected_shape),
tensor).eval)
expected_placeholder = array_ops.placeholder(dtypes.float32)
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_same_shape(expected_placeholder,
tensor).eval,
{expected_placeholder: np.ones(unexpected_shape)})
self.assertIs(tensor, tensor_util.with_shape(expected_shape, tensor))
self.assertIs(
tensor,
tensor_util.with_same_shape(
constant_op.constant(
1, shape=expected_shape), tensor))
tensor_with_shape = tensor_util.with_shape(
constant_op.constant(expected_shape), tensor)
np.testing.assert_array_equal(expected_value, tensor_with_shape.eval())
tensor_with_same_shape = tensor_util.with_same_shape(expected_placeholder,
tensor)
np.testing.assert_array_equal(expected_value,
tensor_with_same_shape.eval({
expected_placeholder:
np.ones(expected_shape)
}))
def test_with_shape_invalid_expected_shape(self):
with self.test_session():
self.assertRaisesRegexp(ValueError, "Invalid rank",
tensor_util.with_shape, [[1], [2]],
constant_op.constant(1.0))
def test_with_shape_invalid_type(self):
with self.test_session():
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape, [1.1],
constant_op.constant([1.0]))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
np.array([1.1]), constant_op.constant(1.0))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
constant_op.constant(np.array([1.1])),
constant_op.constant(1.0))
def test_with_shape_0(self):
with self.test_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_1(self):
with self.test_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2(self):
with self.test_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2(self):
with self.test_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_none(self):
with self.test_session():
tensor_no_shape = array_ops.placeholder(dtypes.float32)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_no_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_no_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_no_shape: array_2x2
}))
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval,
{tensor_no_shape: [42.0, 43.0]})
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval, {tensor_no_shape: [42.0]})
def test_with_shape_partial(self):
with self.test_session():
tensor_partial_shape = array_ops.placeholder(dtypes.float32)
tensor_partial_shape.set_shape([None, 2])
for incompatible_shape in [[0], [1]]:
self.assertRaisesRegexp(
ValueError, r"Shapes \(\?, 2\) and \([01],\) are not compatible",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[1, 2, 1]]:
self.assertRaisesRegexp(ValueError, "Dimensions must be equal",
tensor_util.with_shape, incompatible_shape,
tensor_partial_shape)
for incompatible_shape in [[2, 1]]:
self.assertRaisesRegexp(
ValueError, r"Shapes \(\?, 2\) and \(2, 1\) are not compatible",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_partial_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_partial_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_partial_shape: array_2x2
}))
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0, 43.0]})
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0]})
class RemoveSqueezableDimensionsTest(test.TestCase):
def testRemoveSqueezableDimensions(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_static(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
# TODO(ptucker): Replace this with parameterized test.
def _testRemoveSqueezableDimensions(self, predictions_have_static_shape,
predictions_have_extra_dim,
labels_have_static_shape,
labels_have_extra_dim):
assert not (predictions_have_extra_dim and labels_have_extra_dim)
predictions_value = (0, 1, 1, 0, 0, 1, 0)
labels_value = (0, 0, 1, 1, 0, 0, 0)
input_predictions_value = ([[p] for p in predictions_value] if
predictions_have_extra_dim else
predictions_value)
input_labels_value = ([[l] for l in labels_value] if labels_have_extra_dim
else labels_value)
with ops.Graph().as_default() as g:
feed_dict = {}
if predictions_have_static_shape:
predictions = constant_op.constant(
input_predictions_value, dtype=dtypes.int32)
else:
predictions = array_ops.placeholder(
dtype=dtypes.int32, name="predictions")
feed_dict[predictions] = input_predictions_value
if labels_have_static_shape:
labels = constant_op.constant(input_labels_value, dtype=dtypes.int32)
else:
labels = array_ops.placeholder(dtype=dtypes.int32, name="labels")
feed_dict[labels] = input_labels_value
squeezed_predictions, squeezed_labels = (
tensor_util.remove_squeezable_dimensions(predictions, labels))
with self.test_session(g):
variables_lib.local_variables_initializer().run()
self.assertAllClose(
predictions_value, squeezed_predictions.eval(feed_dict=feed_dict))
self.assertAllClose(
labels_value, squeezed_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| apache-2.0 |
googleinterns/smart-content-summary | classifier/run_classifier.py | 1 | 12969 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT-based Classifier runner."""
from __future__ import absolute_import, division, print_function
from absl import flags
import run_classifier_utils
import tensorflow as tf
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string("training_file", None,
"Path to the TFRecord training file.")
flags.DEFINE_string("eval_file", None, "Path to the the TFRecord dev file.")
flags.DEFINE_string("model_config_file", None,
"The config json file specifying the model architecture.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written. If "
"`init_checkpoint' is not provided when exporting, the latest checkpoint "
"from this directory will be exported.")
flags.DEFINE_integer("num_categories", None, "Number of categories in the "
"classification")
flags.DEFINE_string('classifier_type', None, 'The type of classification. '
'["Grammar", "Meaning"]')
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint, usually from a pre-trained BERT model. In the case of "
"exporting, one can optionally provide path to a particular checkpoint to "
"be exported here.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter than "
"this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool("do_export", False, "Whether to export a trained model.")
flags.DEFINE_bool("eval_all_checkpoints", False,
"Run through all checkpoints.")
flags.DEFINE_integer(
"eval_timeout", 600,
"The maximum amount of time (in seconds) for eval worker to wait between "
"checkpoints.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 3e-5,
"The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"num_train_examples", None,
"Number of training examples. This is used to determine the number of "
"training steps to respect the `num_train_epochs` flag.")
flags.DEFINE_integer(
"num_eval_examples", None,
"Number of eval examples. This is used to determine the number of "
"eval steps to go through the eval file once.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None,
"Optional address of the master for the workers.")
flags.DEFINE_string("export_path", None, "Path to save the exported model.")
def file_based_input_fn_builder(input_file, max_seq_length, is_training,
drop_remainder, classifier_type):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
if classifier_type == "Meaning":
name_to_features = {
"input_ids_source": tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask_source": tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids_source": tf.FixedLenFeature([max_seq_length], tf.int64),
"input_ids_summary": tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask_summary": tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids_summary": tf.FixedLenFeature([max_seq_length], tf.int64),
"labels": tf.FixedLenFeature([1], tf.int64),
}
elif classifier_type == "Grammar":
name_to_features = {
"input_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64),
"labels": tf.FixedLenFeature([1], tf.int64)
}
else:
raise ValueError("Classifier type must be either Grammar or Meaning")
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
d = tf.data.TFRecordDataset(input_file)
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=params["batch_size"],
drop_remainder=drop_remainder))
return d
return input_fn
def _calculate_steps(num_examples,
batch_size,
num_epochs,
warmup_proportion=0):
"""Calculates the number of steps.
Args:
num_examples: Number of examples in the dataset.
batch_size: Batch size.
num_epochs: How many times we should go through the dataset.
warmup_proportion: Proportion of warmup steps.
Returns:
Tuple (number of steps, number of warmup steps).
"""
steps = int(num_examples / batch_size * num_epochs)
warmup_steps = int(warmup_proportion * steps)
return steps, warmup_steps
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not (FLAGS.do_train or FLAGS.do_eval or FLAGS.do_export):
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_export` must"
" be True.")
model_config = run_classifier_utils.LaserTaggerConfig.from_json_file(
FLAGS.model_config_file)
if FLAGS.max_seq_length > model_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, model_config.max_position_embeddings))
if not FLAGS.do_export:
tf.io.gfile.makedirs(FLAGS.output_dir)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=20,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
per_host_input_for_training=is_per_host,
eval_training_input_configuration=tf.contrib.tpu.InputPipelineConfig.
SLICED))
if FLAGS.do_train:
num_train_steps, num_warmup_steps = _calculate_steps(
FLAGS.num_train_examples, FLAGS.train_batch_size,
FLAGS.num_train_epochs, FLAGS.warmup_proportion)
else:
num_train_steps, num_warmup_steps = None, None
model_fn = run_classifier_utils.ModelFnBuilder(
config=model_config,
num_categories=FLAGS.num_categories,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
max_seq_length=FLAGS.max_seq_length,
classifier_type=FLAGS.classifier_type).build()
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_input_fn = file_based_input_fn_builder(
input_file=FLAGS.training_file,
max_seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
classifier_type=FLAGS.classifier_type)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
# Eval will be slightly WRONG on the TPU because it will truncate
# the last batch.
eval_steps, _ = _calculate_steps(FLAGS.num_eval_examples,
FLAGS.eval_batch_size, 1)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=FLAGS.eval_file,
max_seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder,
classifier_type=FLAGS.classifier_type)
for ckpt in tf.contrib.training.checkpoints_iterator(
FLAGS.output_dir, timeout=FLAGS.eval_timeout):
result = estimator.evaluate(input_fn=eval_input_fn,
checkpoint_path=ckpt,
steps=eval_steps)
for key in sorted(result):
tf.logging.info(" %s = %s", key, str(result[key]))
if FLAGS.do_export:
tf.logging.info("Exporting the model...")
def serving_input_fn():
def _input_fn():
if FLAGS.classifier_type == "Meaning":
features = {
"input_ids_source": tf.placeholder(tf.int64, [None, None]),
"input_mask_source": tf.placeholder(tf.int64, [None, None]),
"segment_ids_source": tf.placeholder(tf.int64, [None, None]),
"input_ids_summary": tf.placeholder(tf.int64, [None, None]),
"input_mask_summary": tf.placeholder(tf.int64, [None, None]),
"segment_ids_summary": tf.placeholder(tf.int64, [None, None])
}
elif FLAGS.classifier_type == "Grammar":
features = {
"input_ids": tf.placeholder(tf.int64, [None, None]),
"input_mask": tf.placeholder(tf.int64, [None, None]),
"segment_ids": tf.placeholder(tf.int64, [None, None]),
}
else:
raise ValueError(
"Classifier type must be either Grammar or Meaning.")
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=features)
return _input_fn
estimator.export_saved_model(FLAGS.export_path,
serving_input_fn(),
checkpoint_path=FLAGS.init_checkpoint)
if __name__ == "__main__":
flags.mark_flag_as_required("model_config_file")
flags.mark_flag_as_required("num_categories")
flags.mark_flag_as_required("classifier_type")
tf.app.run()
| apache-2.0 |
mfherbst/spack | var/spack/repos/builtin/packages/templight-tools/package.py | 2 | 1653 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class TemplightTools(CMakePackage):
"""Supporting tools for the Templight Profiler"""
homepage = "https://github.com/mikael-s-persson/templight-tools"
git = "https://github.com/mikael-s-persson/templight-tools.git"
version('develop', branch='master')
depends_on('cmake @2.8.7:', type='build')
depends_on('boost @1.48.1: +filesystem +graph +program_options +test')
| lgpl-2.1 |
bfirsh/django-old | tests/regressiontests/views/urls.py | 10 | 4935 | # coding: utf-8
from os import path
from django.conf.urls.defaults import *
from models import *
import views
base_dir = path.dirname(path.abspath(__file__))
media_dir = path.join(base_dir, 'media')
locale_dir = path.join(base_dir, 'locale')
js_info_dict = {
'domain': 'djangojs',
'packages': ('regressiontests.views',),
}
js_info_dict_multi_packages1 = {
'domain': 'djangojs',
'packages': ('regressiontests.views.app1', 'regressiontests.views.app2'),
}
js_info_dict_multi_packages2 = {
'domain': 'djangojs',
'packages': ('regressiontests.views.app3', 'regressiontests.views.app4'),
}
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m',
}
numeric_days_info_dict = dict(date_based_info_dict, day_format='%d')
date_based_datefield_info_dict = dict(date_based_info_dict, queryset=DateArticle.objects.all())
urlpatterns = patterns('',
(r'^$', views.index_page),
# Default views
(r'^shortcut/(\d+)/(.*)/$', 'django.views.defaults.shortcut'),
(r'^non_existing_url/', 'django.views.defaults.page_not_found'),
(r'^server_error/', 'django.views.defaults.server_error'),
# i18n views
(r'^i18n/', include('django.conf.urls.i18n')),
(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict),
(r'^jsi18n_multi_packages1/$', 'django.views.i18n.javascript_catalog', js_info_dict_multi_packages1),
(r'^jsi18n_multi_packages2/$', 'django.views.i18n.javascript_catalog', js_info_dict_multi_packages2),
# Static views
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': media_dir}),
# Special URLs for particular regression cases.
url(u'^中文/$', 'regressiontests.views.views.redirect'),
url(u'^中文/target/$', 'regressiontests.views.views.index_page'),
)
# Date-based generic views.
urlpatterns += patterns('django.views.generic.date_based',
(r'^date_based/object_detail/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/$',
'object_detail',
dict(slug_field='slug', **date_based_info_dict)),
(r'^date_based/object_detail/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/allow_future/$',
'object_detail',
dict(allow_future=True, slug_field='slug', **date_based_info_dict)),
(r'^date_based/archive_day/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/$',
'archive_day',
numeric_days_info_dict),
(r'^date_based/archive_month/(?P<year>\d{4})/(?P<month>\d{1,2})/$',
'archive_month',
date_based_info_dict),
(r'^date_based/datefield/archive_month/(?P<year>\d{4})/(?P<month>\d{1,2})/$',
'archive_month',
date_based_datefield_info_dict),
)
# crud generic views.
urlpatterns += patterns('django.views.generic.create_update',
(r'^create_update/member/create/article/$', 'create_object',
dict(login_required=True, model=Article)),
(r'^create_update/create/article/$', 'create_object',
dict(post_save_redirect='/views/create_update/view/article/%(slug)s/',
model=Article)),
(r'^create_update/update/article/(?P<slug>[-\w]+)/$', 'update_object',
dict(post_save_redirect='/views/create_update/view/article/%(slug)s/',
slug_field='slug', model=Article)),
(r'^create_update/create_custom/article/$', views.custom_create),
(r'^create_update/delete/article/(?P<slug>[-\w]+)/$', 'delete_object',
dict(post_delete_redirect='/views/create_update/', slug_field='slug',
model=Article)),
# No post_save_redirect and no get_absolute_url on model.
(r'^create_update/no_redirect/create/article/$', 'create_object',
dict(model=Article)),
(r'^create_update/no_redirect/update/article/(?P<slug>[-\w]+)/$',
'update_object', dict(slug_field='slug', model=Article)),
# get_absolute_url on model, but no passed post_save_redirect.
(r'^create_update/no_url/create/article/$', 'create_object',
dict(model=UrlArticle)),
(r'^create_update/no_url/update/article/(?P<slug>[-\w]+)/$',
'update_object', dict(slug_field='slug', model=UrlArticle)),
)
# a view that raises an exception for the debug view
urlpatterns += patterns('',
(r'^raises/$', views.raises),
(r'^raises404/$', views.raises404),
)
# rediriects, both temporary and permanent, with non-ASCII targets
urlpatterns += patterns('django.views.generic.simple',
('^nonascii_redirect/$', 'redirect_to',
{'url': u'/views/中文/target/', 'permanent': False}),
('^permanent_nonascii_redirect/$', 'redirect_to',
{'url': u'/views/中文/target/', 'permanent': True}),
)
urlpatterns += patterns('regressiontests.views.views',
url(r'view_exception/(?P<n>\d+)/$', 'view_exception', name='view_exception'),
url(r'template_exception/(?P<n>\d+)/$', 'template_exception', name='template_exception'),
)
| bsd-3-clause |
nharraud/b2share | invenio/legacy/websubmit/functions/Video_Processing.py | 13 | 4374 | # This file is part of Invenio.
# Copyright (C) 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSubmit function - Video processing.
"""
__revision__ = "$Id$"
import os
from invenio.utils.json import json_decode_file
from invenio.ext.logging import register_exception
from invenio.modules.encoder.config import CFG_BIBENCODE_TEMPLATE_BATCH_SUBMISSION
from invenio.modules.encoder.utils import generate_timestamp
from invenio.modules.encoder.batch_engine import create_job_from_dictionary
from invenio.config import CFG_SITE_ADMIN_EMAIL
def Video_Processing(parameters, curdir, form, user_info=None):
"""
Perform all the required processing of the video.
Parameters are:
* "batch_template": to specify the absolute path to a
configuration describe which manipulation should the uploaded file
receive. If empty, will use by default
etc/bibencode/batch_template_submission.json
* "aspect": to specify in which form element the aspect will be available
* "title": to specify in which form element the title will be available
"""
## Read the batch template for submissions
if parameters.get('batch_template'):
try:
batch_template = json_decode_file(parameters.get('batch_template'))
except:
register_exception(prefix="The given batch template was not readable")
raise
else:
batch_template = json_decode_file(CFG_BIBENCODE_TEMPLATE_BATCH_SUBMISSION)
## Handle the filepath
file_storing_path = os.path.join(curdir, "files", str(user_info['uid']), "NewFile", 'filepath')
try:
fp = open(file_storing_path)
fullpath = fp.read()
fp.close()
batch_template['input'] = fullpath
except:
register_exception(prefix="The file containing the path to the video was not readable")
raise
## Handle the filename
file_storing_name = os.path.join(curdir, "files", str(user_info['uid']), "NewFile", 'filename')
try:
fp = open(file_storing_name)
filename = fp.read()
fp.close()
batch_template['bibdoc_master_docname'] = os.path.splitext(os.path.split(filename)[1])[0]
batch_template['bibdoc_master_extension'] = os.path.splitext(filename)[1]
batch_template['submission_filename'] = filename
except:
register_exception(prefix="The file containing the original filename of the video was not readable")
raise
## Handle the aspect ratio
if parameters.get('aspect'):
try:
file_storing_aspect = os.path.join(curdir, parameters.get('aspect'))
fp = open(file_storing_aspect)
aspect = fp.read()
fp.close()
batch_template['aspect'] = aspect
except:
register_exception(prefix="The file containing the ascpect ratio of the video was not readable")
raise
else:
batch_template['aspect'] = None
## Handle the title
if parameters.get('title'):
try:
file_storing_title = os.path.join(curdir, parameters['title'])
fp = open(file_storing_title)
title = fp.read()
fp.close()
except:
register_exception(prefix="The file containing the title of the video was not readable")
raise
else:
batch_template['submission_title'] = None
## Set the rest
batch_template['notify_admin'] = CFG_SITE_ADMIN_EMAIL
batch_template['notify_user'] = user_info['email']
batch_template['recid'] = sysno
timestamp = generate_timestamp()
job_filename = "submission_%d_%s.job" % (sysno, timestamp)
create_job_from_dictionary(batch_template, job_filename)
| gpl-2.0 |
seppius-xbmc-repo/ru | script.module.antizapret/lib/antizapret.py | 2 | 3852 | # -*- coding: utf-8 -*-
import os
import re
import fnmatch
import threading
import urllib2
import xbmc
import xbmcaddon
from contextlib import contextmanager, closing
__addon__ = xbmcaddon.Addon()
CACHE_DIR = xbmc.translatePath(__addon__.getAddonInfo("profile"))
PAC_URL = "http://antizapret.prostovpn.org/proxy.pac"
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36"
CACHE = 24 * 3600 # 24 hour caching
LOCKS = {}
_config = {}
_custom_hosts = []
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
@contextmanager
def shelf(filename, ttl=0):
import shelve
filename = os.path.join(CACHE_DIR, filename)
with LOCKS.get(filename, threading.RLock()):
with closing(shelve.open(filename, writeback=True)) as d:
import time
if not d:
d.update({
"created_at": time.time(),
"data": {},
})
elif ttl > 0 and (time.time() - d["created_at"]) > ttl:
d.update({
"created_at": time.time(),
"data": {},
})
yield d["data"]
def config():
global _config
if not _config:
with shelf("antizapret.pac_config", ttl=CACHE) as pac:
if not pac.get("value"):
xbmc.log("[script.module.antizapret]: Fetching Antizapret PAC file", level=xbmc.LOGNOTICE)
try:
data = urllib2.urlopen(PAC_URL).read()
except:
data = ""
r = re.search(r"\"PROXY (.*); DIRECT", data)
pac["value"] = {
"server": None,
"domains": []
}
if r:
pac["value"]["server"] = r.group(1)
pac["value"]["domains"] = map(lambda x: x.replace(r"\Z(?ms)", "").replace("\\", ""), map(fnmatch.translate, re.findall(r"\"(.*?)\",", data)))
_config = pac["value"]
return _config
def config_add(host):
host = host.split(':')[0]
if host not in _custom_hosts:
_custom_hosts.append(host)
class AntizapretProxyHandler(urllib2.ProxyHandler, object):
def __init__(self):
self.config = config()
urllib2.ProxyHandler.__init__(self, {
"http": "<empty>",
"https": "<empty>",
"ftp": "<empty>",
})
def proxy_open(self, req, proxy, type):
import socket
global _custom_hosts
host = req.get_host().split(":")[0]
if self.config["server"] and (host in self.config["domains"] or socket.gethostbyname(host) in self.config["domains"] or host in _custom_hosts):
xbmc.log("[script.module.antizapret]: Pass request through proxy " + self.config["server"], level=xbmc.LOGDEBUG)
return urllib2.ProxyHandler.proxy_open(self, req, self.config["server"], type)
return None
def url_get(url, params={}, headers={}, post=None):
if params:
import urllib
url = "%s?%s" % (url, urllib.urlencode(params))
if post:
import urllib
post = urllib.urlencode(post)
req = urllib2.Request(url, post)
req.add_header("User-Agent", USER_AGENT)
for k, v in headers.items():
req.add_header(k, v)
try:
with closing(urllib2.urlopen(req)) as response:
data = response.read()
if response.headers.get("Content-Encoding", "") == "gzip":
import zlib
return zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(data)
return data
except urllib2.HTTPError as e:
xbmc.log("[script.module.antizapret]: HTTP Error(%s): %s" % (e.errno, e.strerror), level=xbmc.LOGERROR)
return None
| gpl-2.0 |
suranap/qiime | tests/test_parallel/test_multiple_rarefactions.py | 15 | 69839 | #!/usr/bin/env python
# File created on 14 Jul 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso", "Adam Robbins-Pianka"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from glob import glob
from shutil import rmtree
from os import close
from os.path import exists, join
from tempfile import mkdtemp, mkstemp
from unittest import TestCase, main
from skbio.util import remove_files
from numpy.testing import assert_array_equal
from biom import load_table
from qiime.util import get_qiime_temp_dir
from qiime.parse import parse_distmat
from qiime.test import initiate_timeout, disable_timeout
from qiime.parallel.multiple_rarefactions import ParallelMultipleRarefactions
class ParallelMultipleRarefactionsTests(TestCase):
def setUp(self):
self.files_to_remove = []
self.dirs_to_remove = []
# Create example output directory
tmp_dir = get_qiime_temp_dir()
self.test_out = mkdtemp(dir=tmp_dir,
prefix='qiime_parallel_tests_',
suffix='')
self.dirs_to_remove.append(self.test_out)
fd, self.input1_fp = mkstemp(dir=self.test_out,
prefix='qiime_inseqs',
suffix='.fasta')
close(fd)
input1_f = open(self.input1_fp, 'w')
input1_f.write(input1)
input1_f.close()
self.files_to_remove.append(self.input1_fp)
# Define number of seconds a test can run for before timing out
# and failing
initiate_timeout(60)
def tearDown(self):
disable_timeout()
remove_files(self.files_to_remove)
# remove directories last, so we don't get errors
# trying to remove files which may be in the directories
for d in self.dirs_to_remove:
if exists(d):
rmtree(d)
def test_parallel_mutliple_rarefactions(self):
""" ParallelMultipleRarefacitons functions as expected """
r = ParallelMultipleRarefactions()
params = {'min': 1,
'max': 100,
'step': 10,
'num_reps': 2,
'jobs_to_start': 2,
'suppress_lineages_included': False,
'subsample_multinomial': False}
r(self.input1_fp,
self.test_out,
params,
job_prefix='RARIFTEST',
poll_directly=True,
suppress_submit_jobs=False)
biom_tables = glob('%s/*biom' % self.test_out)
self.assertEqual(len(biom_tables), 20)
biom_tables.sort()
input_table = load_table(self.input1_fp)
# sanity checks on first table (sampled at 11 seqs/sample)
output_table = load_table(biom_tables[0])
assert_array_equal(output_table.ids(), input_table.ids())
self.assertEqual(output_table.sum(), 99)
# sanity checks on first table (sampled at 91 seqs/sample)
output_table = load_table(biom_tables[-1])
assert_array_equal(output_table.ids(), input_table.ids())
self.assertEqual(output_table.sum(), 819)
input1 = """{"rows": [{"id": "0", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "1", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Bacillales", "f__Staphylococcaceae"]}}, {"id": "2", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "3", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "4", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "5", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Lactobacillaceae"]}}, {"id": "6", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "7", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "8", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "9", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "10", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "11", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "12", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "13", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Streptococcaceae"]}}, {"id": "14", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "15", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "16", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "17", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "18", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "19", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "20", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "21", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "22", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Proteobacteria", "c__Deltaproteobacteria", "o__Desulfovibrionales", "f__Desulfovibrionaceae"]}}, {"id": "23", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "24", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "25", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "26", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "27", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__TM7", "c__TM7-3", "o__CW040", "f__F16"]}}, {"id": "28", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "29", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "30", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "31", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "32", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "33", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "34", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "35", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "36", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "37", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "38", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "39", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "40", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "41", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "42", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "43", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "44", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "45", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Deferribacteres", "c__Deferribacteres", "o__Deferribacterales", "f__Deferribacteraceae"]}}, {"id": "46", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "47", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "48", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "49", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "50", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "51", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "52", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "53", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "54", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "55", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "56", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "57", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "58", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "59", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "60", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "61", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "62", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "63", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "64", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "65", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "66", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "67", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "68", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "69", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "70", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "71", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "72", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "73", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "74", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "75", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "76", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "77", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "78", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "79", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "80", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "81", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "82", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "83", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "84", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "85", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "86", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "87", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Mollicutes", "o__RF39", "f__"]}}, {"id": "88", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "89", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "90", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "91", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "92", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "93", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "94", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "95", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "96", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "97", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "98", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "99", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "100", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "101", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "102", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Lactobacillaceae"]}}, {"id": "103", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "104", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "105", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "106", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "107", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "108", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Deferribacteres", "c__Deferribacteres", "o__Deferribacterales", "f__Deferribacteraceae"]}}, {"id": "109", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "110", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "111", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "112", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "113", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "114", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "115", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "116", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "117", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "118", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "119", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "120", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "121", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "122", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "123", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Mollicutes", "o__RF39", "f__"]}}, {"id": "124", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "125", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "126", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "127", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "128", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "129", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "130", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Peptococcaceae"]}}, {"id": "131", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "132", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "133", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "134", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "135", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "136", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "137", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "138", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "139", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "140", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "141", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "142", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "143", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "144", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "145", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "146", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "147", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "148", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "149", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "150", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales"]}}, {"id": "151", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "152", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "153", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "154", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "155", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "156", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "157", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "158", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "159", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "160", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "161", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "162", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "163", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "164", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "165", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "166", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "167", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "168", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "169", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "170", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "171", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "172", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "173", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "174", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "175", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "176", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Lactobacillaceae"]}}, {"id": "177", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "178", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "179", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "180", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "181", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "182", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "183", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "184", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "185", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "186", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "187", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "188", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "189", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "190", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "191", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "192", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "193", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "194", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "195", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "196", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "197", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "198", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "199", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Peptococcaceae"]}}, {"id": "200", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "201", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "202", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "203", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "204", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "205", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "206", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "207", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "208", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "209", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "210", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "211", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "212", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "213", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "214", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes"]}}, {"id": "215", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "216", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "217", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "218", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "219", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "220", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "221", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "222", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "223", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "224", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "225", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "226", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "227", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "228", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Peptococcaceae"]}}, {"id": "229", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "230", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "231", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "232", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "233", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "234", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "235", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "236", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "237", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "238", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "239", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "240", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Lactobacillales", "f__Lactobacillaceae"]}}, {"id": "241", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "242", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "243", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "244", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "245", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "246", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "247", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "248", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "249", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "250", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "251", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "252", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "253", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "254", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "255", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "256", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "257", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "258", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Prevotellaceae"]}}, {"id": "259", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "260", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "261", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "262", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "263", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "264", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "265", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "266", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "267", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "268", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "269", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "270", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "271", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "272", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "273", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "274", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "275", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "276", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "277", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "278", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "279", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "280", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "281", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "282", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Proteobacteria", "c__Deltaproteobacteria", "o__Desulfovibrionales", "f__Desulfovibrionaceae"]}}, {"id": "283", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "284", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "285", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "286", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "287", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "288", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "289", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "290", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "291", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Bacillales", "f__Staphylococcaceae"]}}, {"id": "292", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "293", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "294", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "295", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "296", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "297", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "298", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "299", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "300", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "301", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "302", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "303", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "304", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "305", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "306", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "307", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "308", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Proteobacteria", "c__Epsilonproteobacteria", "o__Campylobacterales", "f__Helicobacteraceae"]}}, {"id": "309", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "310", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "311", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "312", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "313", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "314", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "315", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "316", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "317", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "318", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "319", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "320", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "321", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "322", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "323", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "324", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "325", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "326", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "327", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "328", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "329", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "330", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Actinobacteria", "c__Actinobacteria", "o__Coriobacteriales", "f__Coriobacteriaceae"]}}, {"id": "331", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "332", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Catabacteriaceae"]}}, {"id": "333", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "334", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "335", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "336", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "337", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "338", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "339", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "340", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "341", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes"]}}, {"id": "342", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "343", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "344", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "345", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "346", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "347", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes"]}}, {"id": "348", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "349", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "350", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "351", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "352", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales"]}}, {"id": "353", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "354", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Clostridiales Family XIII. Incertae Sedis"]}}, {"id": "355", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "356", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "357", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "358", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "359", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "360", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Clostridiaceae"]}}, {"id": "361", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "362", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "363", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "364", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "365", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "366", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "367", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "368", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "369", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "370", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "371", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "372", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "373", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes"]}}, {"id": "374", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "375", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "376", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "377", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "378", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Rikenellaceae"]}}, {"id": "379", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Bacteroidaceae"]}}, {"id": "380", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "381", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "382", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "383", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "384", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "385", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "386", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "387", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "388", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "389", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "390", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "391", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "392", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Ruminococcaceae"]}}, {"id": "393", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "394", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "395", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Proteobacteria", "c__Deltaproteobacteria", "o__Desulfovibrionales", "f__Desulfovibrionaceae"]}}, {"id": "396", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__"]}}, {"id": "397", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "398", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__Porphyromonadaceae"]}}, {"id": "399", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "400", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "401", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "402", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "403", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "404", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "405", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "406", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "407", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Tenericutes", "c__Erysipelotrichi", "o__Erysipelotrichales", "f__Erysipelotrichaceae"]}}, {"id": "408", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "409", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Bacilli", "o__Turicibacterales", "f__Turicibacteraceae"]}}, {"id": "410", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "411", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "412", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "413", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Bacteroidetes", "c__Bacteroidia", "o__Bacteroidales", "f__"]}}, {"id": "414", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "415", "metadata": {"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "416", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}, {"id": "417", "metadata": {"taxonomy": ["Root", "k__Bacteria"]}}], "format": "Biological Observation Matrix 0.9.1-dev", "data": [[0, 0, 1.0], [1, 0, 1.0], [2, 1, 1.0], [3, 2, 1.0], [4, 1, 1.0], [4, 3, 1.0], [5, 4, 1.0], [6, 0, 1.0], [6, 2, 1.0], [6, 3, 1.0], [6, 5, 1.0], [7, 1, 1.0], [8, 4, 1.0], [9, 4, 1.0], [10, 2, 2.0], [10, 6, 1.0], [11, 3, 1.0], [11, 5, 1.0], [12, 3, 1.0], [13, 7, 1.0], [14, 8, 1.0], [15, 5, 1.0], [16, 8, 1.0], [17, 8, 1.0], [18, 1, 1.0], [18, 2, 1.0], [19, 7, 1.0], [20, 7, 1.0], [21, 3, 2.0], [22, 3, 1.0], [23, 1, 1.0], [24, 1, 1.0], [24, 3, 1.0], [25, 7, 2.0], [26, 8, 1.0], [27, 8, 2.0], [28, 1, 1.0], [28, 3, 1.0], [29, 5, 2.0], [30, 3, 1.0], [30, 5, 1.0], [31, 3, 3.0], [31, 7, 6.0], [32, 1, 3.0], [33, 3, 1.0], [34, 7, 2.0], [35, 7, 1.0], [36, 7, 1.0], [37, 2, 1.0], [37, 5, 1.0], [38, 7, 5.0], [39, 7, 1.0], [40, 1, 1.0], [40, 7, 2.0], [41, 7, 2.0], [42, 2, 16.0], [42, 4, 12.0], [43, 4, 1.0], [44, 1, 1.0], [45, 0, 6.0], [45, 3, 2.0], [45, 7, 3.0], [45, 8, 5.0], [46, 4, 1.0], [47, 1, 1.0], [48, 4, 1.0], [49, 2, 1.0], [50, 8, 1.0], [51, 7, 1.0], [52, 7, 1.0], [53, 0, 37.0], [53, 1, 1.0], [53, 3, 10.0], [53, 8, 4.0], [54, 6, 1.0], [55, 2, 1.0], [56, 0, 5.0], [56, 1, 2.0], [56, 2, 1.0], [56, 3, 4.0], [56, 4, 1.0], [56, 5, 1.0], [56, 6, 3.0], [56, 7, 9.0], [56, 8, 2.0], [57, 7, 1.0], [58, 0, 1.0], [59, 7, 1.0], [60, 0, 1.0], [60, 1, 2.0], [60, 2, 2.0], [60, 3, 1.0], [60, 4, 24.0], [60, 5, 10.0], [60, 6, 1.0], [61, 6, 1.0], [62, 1, 1.0], [63, 1, 1.0], [64, 6, 1.0], [65, 7, 1.0], [66, 7, 1.0], [67, 4, 1.0], [68, 0, 2.0], [68, 3, 3.0], [68, 8, 2.0], [69, 0, 1.0], [69, 3, 4.0], [69, 4, 2.0], [69, 7, 2.0], [69, 8, 1.0], [70, 6, 1.0], [71, 0, 2.0], [71, 3, 2.0], [72, 2, 1.0], [73, 3, 1.0], [74, 5, 1.0], [75, 5, 1.0], [76, 5, 1.0], [77, 6, 1.0], [78, 0, 19.0], [78, 1, 1.0], [78, 3, 2.0], [78, 8, 1.0], [79, 1, 1.0], [80, 1, 1.0], [81, 1, 1.0], [82, 3, 3.0], [82, 8, 1.0], [83, 3, 1.0], [84, 3, 1.0], [85, 4, 1.0], [86, 0, 1.0], [86, 3, 2.0], [86, 4, 1.0], [86, 5, 4.0], [86, 6, 4.0], [86, 7, 2.0], [87, 5, 1.0], [88, 1, 6.0], [88, 2, 1.0], [88, 5, 2.0], [88, 6, 4.0], [89, 2, 2.0], [90, 0, 1.0], [91, 5, 1.0], [92, 2, 1.0], [92, 5, 2.0], [93, 8, 1.0], [94, 1, 1.0], [94, 3, 1.0], [94, 8, 3.0], [95, 3, 1.0], [96, 1, 1.0], [97, 7, 1.0], [98, 7, 1.0], [99, 0, 1.0], [100, 6, 1.0], [101, 6, 1.0], [102, 1, 2.0], [102, 2, 1.0], [102, 4, 4.0], [102, 6, 1.0], [103, 8, 1.0], [104, 7, 1.0], [105, 8, 5.0], [106, 7, 1.0], [107, 4, 1.0], [108, 0, 1.0], [109, 2, 1.0], [110, 1, 1.0], [110, 2, 1.0], [110, 7, 1.0], [111, 1, 1.0], [111, 5, 1.0], [112, 6, 1.0], [113, 5, 1.0], [114, 3, 1.0], [114, 8, 2.0], [115, 2, 1.0], [116, 0, 1.0], [116, 6, 1.0], [117, 6, 1.0], [118, 2, 1.0], [118, 5, 1.0], [118, 6, 3.0], [119, 3, 1.0], [120, 3, 1.0], [120, 5, 1.0], [121, 2, 1.0], [122, 2, 1.0], [123, 7, 1.0], [124, 2, 2.0], [124, 5, 2.0], [124, 6, 2.0], [125, 7, 1.0], [126, 1, 1.0], [127, 4, 1.0], [128, 4, 1.0], [128, 5, 1.0], [128, 6, 1.0], [129, 2, 1.0], [130, 2, 1.0], [130, 3, 1.0], [131, 2, 1.0], [132, 8, 1.0], [133, 8, 1.0], [134, 1, 1.0], [134, 2, 1.0], [134, 6, 4.0], [135, 2, 1.0], [136, 2, 1.0], [137, 7, 1.0], [138, 7, 1.0], [139, 3, 1.0], [139, 5, 2.0], [140, 3, 1.0], [141, 5, 1.0], [142, 1, 6.0], [142, 3, 1.0], [143, 7, 1.0], [144, 0, 1.0], [145, 1, 2.0], [145, 2, 4.0], [145, 3, 2.0], [145, 6, 9.0], [146, 1, 1.0], [147, 3, 1.0], [148, 0, 1.0], [149, 0, 2.0], [149, 1, 1.0], [149, 3, 5.0], [149, 8, 1.0], [150, 0, 1.0], [151, 8, 1.0], [152, 8, 1.0], [153, 3, 1.0], [154, 7, 1.0], [155, 1, 1.0], [155, 2, 1.0], [156, 6, 1.0], [157, 6, 2.0], [158, 0, 1.0], [158, 3, 1.0], [159, 0, 1.0], [159, 3, 1.0], [160, 5, 1.0], [161, 2, 6.0], [161, 4, 2.0], [161, 5, 4.0], [162, 2, 1.0], [162, 5, 1.0], [163, 1, 1.0], [163, 2, 1.0], [163, 5, 1.0], [164, 2, 2.0], [164, 4, 1.0], [164, 5, 8.0], [164, 6, 3.0], [165, 3, 1.0], [166, 1, 1.0], [167, 3, 1.0], [168, 4, 1.0], [169, 0, 1.0], [170, 0, 1.0], [170, 8, 1.0], [171, 2, 1.0], [171, 4, 10.0], [172, 8, 2.0], [173, 3, 1.0], [173, 5, 1.0], [173, 7, 2.0], [174, 7, 1.0], [175, 4, 9.0], [175, 7, 1.0], [176, 1, 1.0], [176, 2, 14.0], [176, 5, 14.0], [176, 6, 1.0], [177, 1, 1.0], [178, 0, 2.0], [178, 4, 4.0], [179, 1, 1.0], [180, 1, 1.0], [181, 7, 2.0], [182, 3, 1.0], [183, 5, 2.0], [184, 6, 1.0], [185, 0, 1.0], [185, 1, 1.0], [185, 3, 1.0], [185, 7, 1.0], [186, 3, 1.0], [187, 2, 1.0], [187, 4, 1.0], [188, 5, 1.0], [189, 5, 1.0], [190, 5, 1.0], [191, 5, 1.0], [192, 5, 1.0], [193, 8, 1.0], [194, 5, 1.0], [195, 4, 1.0], [196, 1, 1.0], [197, 3, 1.0], [198, 5, 1.0], [198, 6, 2.0], [199, 7, 1.0], [200, 7, 1.0], [201, 1, 5.0], [201, 2, 2.0], [201, 6, 1.0], [201, 7, 1.0], [202, 2, 29.0], [202, 5, 10.0], [202, 6, 1.0], [203, 3, 1.0], [204, 5, 1.0], [205, 7, 1.0], [206, 0, 1.0], [207, 0, 1.0], [208, 5, 2.0], [209, 1, 1.0], [210, 8, 13.0], [211, 1, 2.0], [212, 7, 1.0], [213, 5, 1.0], [214, 3, 1.0], [214, 7, 2.0], [215, 5, 1.0], [216, 3, 5.0], [216, 4, 2.0], [217, 1, 1.0], [217, 4, 4.0], [218, 2, 1.0], [218, 7, 4.0], [219, 1, 1.0], [219, 2, 2.0], [219, 5, 2.0], [220, 1, 4.0], [221, 5, 1.0], [222, 1, 1.0], [223, 0, 1.0], [224, 2, 1.0], [225, 8, 1.0], [226, 1, 1.0], [226, 2, 2.0], [226, 6, 2.0], [227, 2, 1.0], [228, 4, 1.0], [229, 2, 1.0], [229, 5, 1.0], [230, 7, 2.0], [231, 1, 5.0], [231, 2, 1.0], [231, 4, 17.0], [231, 7, 20.0], [232, 7, 1.0], [233, 3, 1.0], [234, 3, 1.0], [235, 0, 1.0], [236, 3, 1.0], [237, 8, 1.0], [238, 3, 1.0], [239, 6, 1.0], [240, 1, 1.0], [240, 2, 1.0], [241, 2, 1.0], [242, 8, 1.0], [243, 3, 1.0], [244, 4, 1.0], [245, 7, 1.0], [246, 0, 2.0], [246, 3, 2.0], [246, 7, 7.0], [247, 1, 1.0], [248, 3, 1.0], [248, 7, 1.0], [249, 7, 1.0], [250, 7, 1.0], [250, 8, 1.0], [251, 4, 1.0], [252, 2, 1.0], [252, 5, 1.0], [253, 1, 1.0], [254, 1, 1.0], [254, 7, 1.0], [255, 1, 2.0], [256, 3, 1.0], [257, 5, 1.0], [258, 3, 1.0], [259, 1, 1.0], [260, 2, 1.0], [261, 5, 1.0], [262, 7, 1.0], [263, 0, 1.0], [263, 8, 1.0], [264, 5, 1.0], [265, 4, 1.0], [265, 6, 1.0], [266, 5, 1.0], [267, 0, 1.0], [268, 2, 1.0], [268, 3, 2.0], [269, 1, 1.0], [270, 0, 1.0], [270, 1, 2.0], [270, 2, 1.0], [270, 5, 3.0], [270, 6, 5.0], [271, 0, 1.0], [272, 5, 2.0], [273, 6, 1.0], [274, 7, 1.0], [275, 4, 1.0], [276, 1, 1.0], [277, 1, 2.0], [277, 3, 1.0], [277, 6, 2.0], [278, 7, 1.0], [279, 3, 1.0], [280, 6, 1.0], [281, 4, 1.0], [282, 0, 1.0], [282, 8, 2.0], [283, 8, 5.0], [284, 4, 1.0], [285, 6, 1.0], [285, 8, 1.0], [286, 8, 1.0], [287, 1, 1.0], [288, 0, 1.0], [289, 0, 1.0], [289, 6, 1.0], [290, 3, 1.0], [291, 0, 2.0], [292, 3, 1.0], [293, 7, 1.0], [294, 1, 1.0], [295, 6, 1.0], [296, 3, 1.0], [297, 3, 1.0], [297, 7, 1.0], [298, 6, 1.0], [299, 5, 1.0], [300, 5, 2.0], [301, 7, 1.0], [302, 3, 1.0], [303, 7, 1.0], [304, 2, 1.0], [304, 5, 2.0], [304, 7, 6.0], [305, 3, 1.0], [306, 0, 1.0], [307, 3, 1.0], [308, 4, 5.0], [308, 7, 2.0], [309, 1, 13.0], [309, 2, 11.0], [309, 4, 2.0], [309, 5, 5.0], [309, 6, 12.0], [310, 0, 1.0], [310, 3, 1.0], [310, 8, 1.0], [311, 8, 1.0], [312, 8, 1.0], [313, 1, 1.0], [314, 8, 1.0], [315, 1, 2.0], [316, 5, 1.0], [317, 0, 1.0], [317, 6, 1.0], [318, 4, 1.0], [319, 0, 1.0], [320, 2, 1.0], [321, 0, 1.0], [322, 6, 1.0], [323, 7, 1.0], [324, 5, 1.0], [324, 6, 2.0], [325, 6, 1.0], [326, 2, 1.0], [326, 5, 1.0], [327, 5, 1.0], [328, 7, 1.0], [329, 2, 3.0], [330, 7, 1.0], [331, 3, 1.0], [332, 7, 1.0], [333, 3, 1.0], [334, 4, 2.0], [335, 0, 1.0], [336, 8, 1.0], [337, 0, 2.0], [337, 3, 1.0], [337, 5, 2.0], [338, 3, 1.0], [339, 0, 3.0], [339, 2, 1.0], [339, 4, 2.0], [339, 8, 2.0], [340, 0, 1.0], [341, 7, 1.0], [342, 3, 1.0], [342, 8, 2.0], [343, 0, 3.0], [343, 1, 1.0], [343, 4, 1.0], [343, 6, 1.0], [344, 5, 1.0], [345, 4, 3.0], [346, 6, 1.0], [347, 1, 1.0], [348, 1, 7.0], [348, 3, 2.0], [348, 6, 2.0], [349, 1, 1.0], [350, 5, 1.0], [351, 5, 1.0], [352, 8, 1.0], [353, 5, 1.0], [354, 7, 1.0], [354, 8, 1.0], [355, 3, 1.0], [356, 2, 1.0], [356, 6, 1.0], [357, 4, 2.0], [357, 8, 1.0], [358, 3, 1.0], [359, 3, 1.0], [360, 6, 1.0], [360, 8, 3.0], [361, 7, 1.0], [362, 8, 1.0], [363, 8, 1.0], [364, 6, 1.0], [365, 7, 1.0], [366, 6, 1.0], [367, 0, 1.0], [367, 3, 1.0], [368, 7, 1.0], [369, 5, 1.0], [370, 3, 1.0], [371, 1, 2.0], [372, 1, 4.0], [372, 3, 5.0], [372, 5, 2.0], [372, 6, 2.0], [372, 7, 5.0], [372, 8, 1.0], [373, 3, 1.0], [374, 3, 1.0], [375, 8, 1.0], [376, 0, 1.0], [377, 4, 1.0], [378, 0, 3.0], [378, 3, 5.0], [378, 7, 5.0], [378, 8, 9.0], [379, 0, 4.0], [379, 1, 1.0], [379, 2, 2.0], [379, 3, 4.0], [379, 6, 18.0], [379, 8, 21.0], [380, 1, 1.0], [381, 6, 2.0], [381, 7, 1.0], [382, 5, 1.0], [383, 2, 1.0], [383, 5, 1.0], [384, 5, 1.0], [385, 3, 1.0], [385, 5, 1.0], [386, 0, 1.0], [386, 5, 1.0], [387, 8, 1.0], [388, 4, 2.0], [388, 5, 2.0], [388, 6, 19.0], [388, 8, 3.0], [389, 3, 1.0], [390, 5, 1.0], [391, 5, 1.0], [392, 7, 1.0], [393, 5, 1.0], [394, 1, 1.0], [395, 7, 1.0], [396, 2, 1.0], [396, 5, 1.0], [396, 6, 1.0], [397, 0, 3.0], [397, 3, 1.0], [397, 4, 9.0], [397, 5, 2.0], [397, 6, 1.0], [397, 7, 1.0], [397, 8, 1.0], [398, 8, 2.0], [399, 0, 4.0], [399, 1, 1.0], [399, 4, 4.0], [399, 5, 3.0], [399, 6, 2.0], [399, 8, 5.0], [400, 8, 1.0], [401, 0, 1.0], [402, 6, 1.0], [403, 6, 1.0], [404, 0, 1.0], [404, 1, 2.0], [404, 3, 1.0], [404, 5, 1.0], [404, 6, 2.0], [404, 8, 1.0], [405, 1, 1.0], [406, 8, 1.0], [407, 8, 4.0], [408, 0, 1.0], [409, 1, 9.0], [409, 8, 3.0], [410, 4, 1.0], [411, 0, 1.0], [412, 2, 1.0], [413, 0, 2.0], [413, 3, 10.0], [413, 8, 8.0], [414, 1, 3.0], [414, 5, 1.0], [415, 6, 1.0], [416, 1, 1.0], [417, 1, 1.0]], "columns": [{"id": "PC.636", "metadata": null}, {"id": "PC.481", "metadata": null}, {"id": "PC.354", "metadata": null}, {"id": "PC.635", "metadata": null}, {"id": "PC.593", "metadata": null}, {"id": "PC.356", "metadata": null}, {"id": "PC.355", "metadata": null}, {"id": "PC.607", "metadata": null}, {"id": "PC.634", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2835", "matrix_type": "sparse", "shape": [418, 9], "format_url": "http://biom-format.org", "date": "2012-03-14T20:48:29.112457", "type": "OTU table", "id": null, "matrix_element_type": "int"}"""
if __name__ == "__main__":
main()
| gpl-2.0 |
19kestier/taiga-back | taiga/projects/services/stats.py | 3 | 12983 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db.models import Q, Count
from django.apps import apps
import datetime
import copy
from taiga.projects.history.models import HistoryEntry
def _get_milestones_stats_for_backlog(project):
"""
Get collection of stats for each millestone of project.
Data returned by this function are used on backlog.
"""
current_evolution = 0
current_team_increment = 0
current_client_increment = 0
optimal_points_per_sprint = 0
if project.total_story_points and project.total_milestones:
optimal_points_per_sprint = project.total_story_points / project.total_milestones
future_team_increment = sum(project.future_team_increment.values())
future_client_increment = sum(project.future_client_increment.values())
milestones = project.milestones.order_by('estimated_start').\
prefetch_related("user_stories",
"user_stories__role_points",
"user_stories__role_points__points")
milestones = list(milestones)
milestones_count = len(milestones)
optimal_points = 0
team_increment = 0
client_increment = 0
for current_milestone in range(0, max(milestones_count, project.total_milestones)):
optimal_points = (project.total_story_points -
(optimal_points_per_sprint * current_milestone))
evolution = (project.total_story_points - current_evolution
if current_evolution is not None else None)
if current_milestone < milestones_count:
ml = milestones[current_milestone]
milestone_name = ml.name
team_increment = current_team_increment
client_increment = current_client_increment
current_evolution += sum(ml.closed_points.values())
current_team_increment += sum(ml.team_increment_points.values())
current_client_increment += sum(ml.client_increment_points.values())
else:
milestone_name = "Future sprint"
team_increment = current_team_increment + future_team_increment,
client_increment = current_client_increment + future_client_increment,
current_evolution = None
yield {
'name': milestone_name,
'optimal': optimal_points,
'evolution': evolution,
'team-increment': team_increment,
'client-increment': client_increment,
}
optimal_points -= optimal_points_per_sprint
evolution = (project.total_story_points - current_evolution
if current_evolution is not None and project.total_story_points else None)
yield {
'name': 'Project End',
'optimal': optimal_points,
'evolution': evolution,
'team-increment': team_increment,
'client-increment': client_increment,
}
def _count_status_object(status_obj, counting_storage):
if status_obj.id in counting_storage:
counting_storage[status_obj.id]['count'] += 1
else:
counting_storage[status_obj.id] = {}
counting_storage[status_obj.id]['count'] = 1
counting_storage[status_obj.id]['name'] = status_obj.name
counting_storage[status_obj.id]['id'] = status_obj.id
counting_storage[status_obj.id]['color'] = status_obj.color
def _count_owned_object(user_obj, counting_storage):
if user_obj:
if user_obj.id in counting_storage:
counting_storage[user_obj.id]['count'] += 1
else:
counting_storage[user_obj.id] = {}
counting_storage[user_obj.id]['count'] = 1
counting_storage[user_obj.id]['username'] = user_obj.username
counting_storage[user_obj.id]['name'] = user_obj.get_full_name()
counting_storage[user_obj.id]['id'] = user_obj.id
counting_storage[user_obj.id]['color'] = user_obj.color
else:
if 0 in counting_storage:
counting_storage[0]['count'] += 1
else:
counting_storage[0] = {}
counting_storage[0]['count'] = 1
counting_storage[0]['username'] = 'Unassigned'
counting_storage[0]['name'] = 'Unassigned'
counting_storage[0]['id'] = 0
counting_storage[0]['color'] = 'black'
def get_stats_for_project_issues(project):
project_issues_stats = {
'total_issues': 0,
'opened_issues': 0,
'closed_issues': 0,
'issues_per_type': {},
'issues_per_status': {},
'issues_per_priority': {},
'issues_per_severity': {},
'issues_per_owner': {},
'issues_per_assigned_to': {},
'last_four_weeks_days': {
'by_open_closed': {'open': [], 'closed': []},
'by_severity': {},
'by_priority': {},
'by_status': {},
}
}
issues = project.issues.all().select_related(
'status', 'priority', 'type', 'severity', 'owner', 'assigned_to'
)
for issue in issues:
project_issues_stats['total_issues'] += 1
if issue.status.is_closed:
project_issues_stats['closed_issues'] += 1
else:
project_issues_stats['opened_issues'] += 1
_count_status_object(issue.type, project_issues_stats['issues_per_type'])
_count_status_object(issue.status, project_issues_stats['issues_per_status'])
_count_status_object(issue.priority, project_issues_stats['issues_per_priority'])
_count_status_object(issue.severity, project_issues_stats['issues_per_severity'])
_count_owned_object(issue.owner, project_issues_stats['issues_per_owner'])
_count_owned_object(issue.assigned_to, project_issues_stats['issues_per_assigned_to'])
for severity in project_issues_stats['issues_per_severity'].values():
project_issues_stats['last_four_weeks_days']['by_severity'][severity['id']] = copy.copy(severity)
del(project_issues_stats['last_four_weeks_days']['by_severity'][severity['id']]['count'])
project_issues_stats['last_four_weeks_days']['by_severity'][severity['id']]['data'] = []
for priority in project_issues_stats['issues_per_priority'].values():
project_issues_stats['last_four_weeks_days']['by_priority'][priority['id']] = copy.copy(priority)
del(project_issues_stats['last_four_weeks_days']['by_priority'][priority['id']]['count'])
project_issues_stats['last_four_weeks_days']['by_priority'][priority['id']]['data'] = []
for x in range(27, -1, -1):
day = datetime.datetime.combine(datetime.date.today(), datetime.time(0, 0)) - datetime.timedelta(days=x)
next_day = day + datetime.timedelta(days=1)
open_this_day = filter(lambda x: x.created_date.replace(tzinfo=None) >= day, issues)
open_this_day = filter(lambda x: x.created_date.replace(tzinfo=None) < next_day, open_this_day)
open_this_day = len(list(open_this_day))
project_issues_stats['last_four_weeks_days']['by_open_closed']['open'].append(open_this_day)
closed_this_day = filter(lambda x: x.finished_date, issues)
closed_this_day = filter(lambda x: x.finished_date.replace(tzinfo=None) >= day, closed_this_day)
closed_this_day = filter(lambda x: x.finished_date.replace(tzinfo=None) < next_day, closed_this_day)
closed_this_day = len(list(closed_this_day))
project_issues_stats['last_four_weeks_days']['by_open_closed']['closed'].append(closed_this_day)
opened_this_day = filter(lambda x: x.created_date.replace(tzinfo=None) < next_day, issues)
opened_this_day = list(filter(lambda x: x.finished_date is None or x.finished_date.replace(tzinfo=None) > day, opened_this_day))
for severity in project_issues_stats['last_four_weeks_days']['by_severity']:
by_severity = filter(lambda x: x.severity_id == severity, opened_this_day)
by_severity = len(list(by_severity))
project_issues_stats['last_four_weeks_days']['by_severity'][severity]['data'].append(by_severity)
for priority in project_issues_stats['last_four_weeks_days']['by_priority']:
by_priority = filter(lambda x: x.priority_id == priority, opened_this_day)
by_priority = len(list(by_priority))
project_issues_stats['last_four_weeks_days']['by_priority'][priority]['data'].append(by_priority)
return project_issues_stats
def get_stats_for_project(project):
project = apps.get_model("projects", "Project").objects.\
prefetch_related("milestones",
"user_stories").\
get(id=project.id)
points = project.calculated_points
closed_points = sum(points["closed"].values())
closed_milestones = project.milestones.filter(closed=True).count()
speed = 0
if closed_milestones != 0:
speed = closed_points / closed_milestones
project_stats = {
'name': project.name,
'total_milestones': project.total_milestones,
'total_points': project.total_story_points,
'closed_points': closed_points,
'closed_points_per_role': points["closed"],
'defined_points': sum(points["defined"].values()),
'defined_points_per_role': points["defined"],
'assigned_points': sum(points["assigned"].values()),
'assigned_points_per_role': points["assigned"],
'milestones': _get_milestones_stats_for_backlog(project),
'speed': speed,
}
return project_stats
def _get_closed_bugs_per_member_stats(project):
# Closed bugs per user
closed_bugs = project.issues.filter(status__is_closed=True)\
.values('assigned_to')\
.annotate(count=Count('assigned_to'))\
.order_by()
closed_bugs = { p["assigned_to"]: p["count"] for p in closed_bugs}
return closed_bugs
def _get_iocaine_tasks_per_member_stats(project):
# Iocaine tasks assigned per user
iocaine_tasks = project.tasks.filter(is_iocaine=True)\
.values('assigned_to')\
.annotate(count=Count('assigned_to'))\
.order_by()
iocaine_tasks = { t["assigned_to"]: t["count"] for t in iocaine_tasks}
return iocaine_tasks
def _get_wiki_changes_per_member_stats(project):
# Wiki changes
wiki_changes = {}
wiki_page_keys = ["wiki.wikipage:%s"%id for id in project.wiki_pages.values_list("id", flat=True)]
history_entries = HistoryEntry.objects.filter(key__in=wiki_page_keys).values('user')
for entry in history_entries:
editions = wiki_changes.get(entry["user"]["pk"], 0)
wiki_changes[entry["user"]["pk"]] = editions + 1
return wiki_changes
def _get_created_bugs_per_member_stats(project):
# Created_bugs
created_bugs = project.issues\
.values('owner')\
.annotate(count=Count('owner'))\
.order_by()
created_bugs = { p["owner"]: p["count"] for p in created_bugs }
return created_bugs
def _get_closed_tasks_per_member_stats(project):
# Closed tasks
closed_tasks = project.tasks.filter(status__is_closed=True)\
.values('assigned_to')\
.annotate(count=Count('assigned_to'))\
.order_by()
closed_tasks = {p["assigned_to"]: p["count"] for p in closed_tasks}
return closed_tasks
def get_member_stats_for_project(project):
base_counters = {id: 0 for id in project.members.values_list("id", flat=True)}
closed_bugs = base_counters.copy()
closed_bugs.update(_get_closed_bugs_per_member_stats(project))
iocaine_tasks = base_counters.copy()
iocaine_tasks.update(_get_iocaine_tasks_per_member_stats(project))
wiki_changes = base_counters.copy()
wiki_changes.update(_get_wiki_changes_per_member_stats(project))
created_bugs = base_counters.copy()
created_bugs.update(_get_created_bugs_per_member_stats(project))
closed_tasks = base_counters.copy()
closed_tasks.update(_get_closed_tasks_per_member_stats(project))
member_stats = {
"closed_bugs": closed_bugs,
"iocaine_tasks": iocaine_tasks,
"wiki_changes": wiki_changes,
"created_bugs": created_bugs,
"closed_tasks": closed_tasks,
}
return member_stats
| agpl-3.0 |
dago/ansible-modules-extras | cloud/centurylink/clc_server_snapshot.py | 43 | 14562 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
DOCUMENTATION = '''
module: clc_server_snapshot
short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
version_added: "2.0"
options:
server_ids:
description:
- The list of CLC server Ids.
required: True
expiration_days:
description:
- The number of days to keep the server snapshot before it expires.
default: 7
required: False
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
required: False
choices: ['present', 'absent', 'restore']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Create server snapshot
clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
expiration_days: 10
wait: True
state: present
- name: Restore server snapshot
clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
wait: True
state: restore
- name: Delete server snapshot
clc_server_snapshot:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
wait: True
state: absent
'''
RETURN = '''
changed:
description: A flag indicating if any change was made or not
returned: success
type: boolean
sample: True
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
'''
__version__ = '${version}'
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcSnapshot:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
server_ids = p['server_ids']
expiration_days = p['expiration_days']
state = p['state']
request_list = []
changed = False
changed_servers = []
self._set_clc_credentials_from_env()
if state == 'present':
changed, request_list, changed_servers = self.ensure_server_snapshot_present(
server_ids=server_ids,
expiration_days=expiration_days)
elif state == 'absent':
changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
server_ids=server_ids)
elif state == 'restore':
changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
server_ids=server_ids)
self._wait_for_requests_to_complete(request_list)
return self.module.exit_json(
changed=changed,
server_ids=changed_servers)
def ensure_server_snapshot_present(self, server_ids, expiration_days):
"""
Ensures the given set of server_ids have the snapshots created
:param server_ids: The list of server_ids to create the snapshot
:param expiration_days: The number of days to keep the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) == 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._create_server_snapshot(server, expiration_days)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _create_server_snapshot(self, server, expiration_days):
"""
Create the snapshot for the CLC server
:param server: the CLC server object
:param expiration_days: The number of days to keep the snapshot
:return: the create request object from CLC API Call
"""
result = None
try:
result = server.CreateSnapshot(
delete_existing=True,
expiration_days=expiration_days)
except CLCException as ex:
self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_server_snapshot_absent(self, server_ids):
"""
Ensures the given set of server_ids have the snapshots removed
:param server_ids: The list of server_ids to delete the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) > 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._delete_server_snapshot(server)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _delete_server_snapshot(self, server):
"""
Delete snapshot for the CLC server
:param server: the CLC server object
:return: the delete snapshot request object from CLC API
"""
result = None
try:
result = server.DeleteSnapshot()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_server_snapshot_restore(self, server_ids):
"""
Ensures the given set of server_ids have the snapshots restored
:param server_ids: The list of server_ids to delete the snapshot
:return: (changed, request_list, changed_servers)
changed: A flag indicating whether any change was made
request_list: the list of clc request objects from CLC API call
changed_servers: The list of servers ids that are modified
"""
request_list = []
changed = False
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.GetSnapshots()) > 0]
for server in servers_to_change:
changed = True
if not self.module.check_mode:
request = self._restore_server_snapshot(server)
request_list.append(request)
changed_servers = [
server.id for server in servers_to_change if server.id]
return changed, request_list, changed_servers
def _restore_server_snapshot(self, server):
"""
Restore snapshot for the CLC server
:param server: the CLC server object
:return: the restore snapshot request object from CLC API
"""
result = None
try:
result = server.RestoreSnapshot()
except CLCException as ex:
self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process server snapshot request')
@staticmethod
def define_argument_spec():
"""
This function defines the dictionary object required for
package module
:return: the package dictionary object
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
expiration_days=dict(default=7),
wait=dict(default=True),
state=dict(
default='present',
choices=[
'present',
'absent',
'restore']),
)
return argument_spec
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: The list of server ids
:param message: The error message to throw in case of any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
return self.module.fail_json(msg=message + ': %s' % ex)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
Main function
:return: None
"""
module = AnsibleModule(
argument_spec=ClcSnapshot.define_argument_spec(),
supports_check_mode=True
)
clc_snapshot = ClcSnapshot(module)
clc_snapshot.process_request()
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
nerzhul/ansible | lib/ansible/playbook/attribute.py | 46 | 3856 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from copy import deepcopy
class Attribute:
def __init__(self, isa=None, private=False, default=None, required=False, listof=None, priority=0, class_type=None, always_post_validate=False, inherit=True):
"""
:class:`Attribute` specifies constraints for attributes of objects which
derive from playbook data. The attributes of the object are basically
a schema for the yaml playbook.
:kwarg isa: The type of the attribute. Allowable values are a string
representation of any yaml basic datatype, python class, or percent.
(Enforced at post-validation time).
:kwarg private: (not used)
:kwarg default: Default value if unspecified in the YAML document.
:kwarg required: Whether or not the YAML document must contain this field.
If the attribute is None when post-validated, an error will be raised.
:kwarg listof: If isa is set to "list", this can optionally be set to
ensure that all elements in the list are of the given type. Valid
values here are the same as those for isa.
:kwarg priority: The order in which the fields should be parsed. Generally
this does not need to be set, it is for rare situations where another
field depends on the fact that another field was parsed first.
:kwarg class_type: If isa is set to "class", this can be optionally set to
a class (not a string name). The YAML data for this field will be
passed to the __init__ method of that class during post validation and
the field will be an instance of that class.
:kwarg always_post_validate: Controls whether a field should be post
validated or not (default: True).
:kwarg inherit: A boolean value, which controls whether the object
containing this field should attempt to inherit the value from its
parent object if the local value is None.
"""
self.isa = isa
self.private = private
self.default = default
self.required = required
self.listof = listof
self.priority = priority
self.class_type = class_type
self.always_post_validate = always_post_validate
self.inherit = inherit
if default is not None and self.isa in ('list', 'dict', 'set'):
self.default = deepcopy(default)
else:
self.default = default
def __eq__(self, other):
return other.priority == self.priority
def __ne__(self, other):
return other.priority != self.priority
# NB: higher priority numbers sort first
def __lt__(self, other):
return other.priority < self.priority
def __gt__(self, other):
return other.priority > self.priority
def __le__(self, other):
return other.priority <= self.priority
def __ge__(self, other):
return other.priority >= self.priority
class FieldAttribute(Attribute):
pass
| gpl-3.0 |
smart-developerr/my-first-blog | Lib/site-packages/django/db/backends/oracle/introspection.py | 517 | 11463 | import cx_Oracle
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type objects to Django Field types.
data_types_reverse = {
cx_Oracle.BLOB: 'BinaryField',
cx_Oracle.CLOB: 'TextField',
cx_Oracle.DATETIME: 'DateField',
cx_Oracle.FIXED_CHAR: 'CharField',
cx_Oracle.NCLOB: 'TextField',
cx_Oracle.NUMBER: 'DecimalField',
cx_Oracle.STRING: 'CharField',
cx_Oracle.TIMESTAMP: 'DateTimeField',
}
try:
data_types_reverse[cx_Oracle.NATIVE_FLOAT] = 'FloatField'
except AttributeError:
pass
try:
data_types_reverse[cx_Oracle.UNICODE] = 'CharField'
except AttributeError:
pass
cache_bust_counter = 1
def get_field_type(self, data_type, description):
# If it's a NUMBER with scale == 0, consider it an IntegerField
if data_type == cx_Oracle.NUMBER:
precision, scale = description[4:6]
if scale == 0:
if precision > 11:
return 'BigIntegerField'
elif precision == 1:
return 'BooleanField'
else:
return 'IntegerField'
elif scale == -127:
return 'FloatField'
return super(DatabaseIntrospection, self).get_field_type(data_type, description)
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SELECT TABLE_NAME, 't' FROM USER_TABLES UNION ALL "
"SELECT VIEW_NAME, 'v' FROM USER_VIEWS")
return [TableInfo(row[0].lower(), row[1]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
self.cache_bust_counter += 1
cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format(
self.connection.ops.quote_name(table_name),
self.cache_bust_counter))
description = []
for desc in cursor.description:
name = force_text(desc[0]) # cx_Oracle always returns a 'str' on both Python 2 and 3
name = name % {} # cx_Oracle, for some reason, doubles percent signs.
description.append(FieldInfo(*(name.lower(),) + desc[1:]))
return description
def table_name_converter(self, name):
"Table name comparison is case insensitive under Oracle"
return name.lower()
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return {d[0]: i for i, d in enumerate(self.get_table_description(cursor, table_name))}
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
table_name = table_name.upper()
cursor.execute("""
SELECT ta.column_name, tb.table_name, tb.column_name
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
user_tab_cols ta, user_tab_cols tb
WHERE user_constraints.table_name = %s AND
ta.table_name = user_constraints.table_name AND
ta.column_name = ca.column_name AND
ca.table_name = ta.table_name AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
cb.table_name = tb.table_name AND
cb.column_name = tb.column_name AND
ca.position = cb.position""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[0].lower()] = (row[2].lower(), row[1].lower())
return relations
def get_key_columns(self, cursor, table_name):
cursor.execute("""
SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column
FROM user_constraints c
JOIN user_cons_columns ccol
ON ccol.constraint_name = c.constraint_name
JOIN user_cons_columns rcol
ON rcol.constraint_name = c.r_constraint_name
WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()])
return [tuple(cell.lower() for cell in row)
for row in cursor.fetchall()]
def get_indexes(self, cursor, table_name):
sql = """
SELECT LOWER(uic1.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1 ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1 ELSE 0
END AS is_unique
FROM user_constraints, user_indexes, user_ind_columns uic1
WHERE user_constraints.constraint_type (+) = 'P'
AND user_constraints.index_name (+) = uic1.index_name
AND user_indexes.uniqueness (+) = 'UNIQUE'
AND user_indexes.index_name (+) = uic1.index_name
AND uic1.table_name = UPPER(%s)
AND uic1.column_position = 1
AND NOT EXISTS (
SELECT 1
FROM user_ind_columns uic2
WHERE uic2.index_name = uic1.index_name
AND uic2.column_position = 2
)
"""
cursor.execute(sql, [table_name])
indexes = {}
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': bool(row[1]),
'unique': bool(row[2])}
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs and uniques
cursor.execute("""
SELECT
user_constraints.constraint_name,
LOWER(cols.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1
ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1
ELSE 0
END AS is_unique,
CASE user_constraints.constraint_type
WHEN 'C' THEN 1
ELSE 0
END AS is_check_constraint
FROM
user_constraints
INNER JOIN
user_indexes ON user_indexes.index_name = user_constraints.index_name
LEFT OUTER JOIN
user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name
WHERE
(
user_constraints.constraint_type = 'P' OR
user_constraints.constraint_type = 'U'
)
AND user_constraints.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, pk, unique, check in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": pk,
"unique": unique,
"foreign_key": None,
"check": check,
"index": True, # All P and U come with index, see inner join above
}
# Record the details
constraints[constraint]['columns'].append(column)
# Check constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name
FROM
user_constraints cons
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'C' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Foreign key constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name,
LOWER(rcons.table_name),
LOWER(rcols.column_name)
FROM
user_constraints cons
INNER JOIN
user_constraints rcons ON cons.r_constraint_name = rcons.constraint_name
INNER JOIN
user_cons_columns rcols ON rcols.constraint_name = rcons.constraint_name
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'R' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, other_table, other_column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": (other_table, other_column),
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
index_name,
LOWER(column_name)
FROM
user_ind_columns cols
WHERE
table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_constraints cons
WHERE cols.index_name = cons.index_name
)
ORDER BY cols.column_position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": False,
"index": True,
}
# Record the details
constraints[constraint]['columns'].append(column)
return constraints
| gpl-3.0 |
8u1a/plaso | plaso/parsers/olecf.py | 2 | 3143 | # -*- coding: utf-8 -*-
"""Parser for OLE Compound Files (OLECF)."""
import logging
import pyolecf
from plaso import dependencies
from plaso.lib import errors
from plaso.lib import specification
from plaso.parsers import interface
from plaso.parsers import manager
dependencies.CheckModuleVersion(u'pyolecf')
class OleCfParser(interface.SingleFileBaseParser):
"""Parses OLE Compound Files (OLECF)."""
_INITIAL_FILE_OFFSET = None
NAME = u'olecf'
DESCRIPTION = u'Parser for OLE Compound Files (OLECF).'
_plugin_classes = {}
def __init__(self):
"""Initializes a parser object."""
super(OleCfParser, self).__init__()
self._plugins = OleCfParser.GetPluginObjects()
for list_index, plugin_object in enumerate(self._plugins):
if plugin_object.NAME == u'olecf_default':
self._default_plugin = self._plugins.pop(list_index)
break
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification."""
format_specification = specification.FormatSpecification(cls.NAME)
# OLECF
format_specification.AddNewSignature(
b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1', offset=0)
# OLECF beta
format_specification.AddNewSignature(
b'\x0e\x11\xfc\x0d\xd0\xcf\x11\x0e', offset=0)
return format_specification
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses an OLE Compound File (OLECF) file-like object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
file_object: A file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
olecf_file = pyolecf.file()
olecf_file.set_ascii_codepage(parser_mediator.codepage)
try:
olecf_file.open_file_object(file_object)
except IOError as exception:
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse file {1:s}: {2:s}'.format(
self.NAME, parser_mediator.GetDisplayName(), exception))
# Get a list of all root items from the OLE CF file.
root_item = olecf_file.root_item
item_names = [item.name for item in root_item.sub_items]
# Compare the list of available plugins.
# We will try to use every plugin against the file (except
# the default plugin) and run it. Only if none of the plugins
# works will we use the default plugin.
parsed = False
for plugin_object in self._plugins:
try:
plugin_object.UpdateChainAndProcess(
parser_mediator, root_item=root_item, item_names=item_names)
except errors.WrongPlugin:
logging.debug(
u'[{0:s}] plugin: {1:s} cannot parse the OLECF file: {2:s}'.format(
self.NAME, plugin_object.NAME,
parser_mediator.GetDisplayName()))
# Check if we still haven't parsed the file, and if so we will use
# the default OLECF plugin.
if not parsed and self._default_plugin:
self._default_plugin.UpdateChainAndProcess(
parser_mediator, root_item=root_item, item_names=item_names)
olecf_file.close()
manager.ParsersManager.RegisterParser(OleCfParser)
| apache-2.0 |
ValentmSTEM/gps_v1 | tornado/wsgi.py | 9 | 11744 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI support for the Tornado web framework.
WSGI is the Python standard for web servers, and allows for interoperability
between Tornado and other Python web frameworks and servers. This module
provides WSGI support in two ways:
* `WSGIApplication` is a version of `tornado.web.Application` that can run
inside a WSGI server. This is useful for running a Tornado app on another
HTTP server, such as Google App Engine. See the `WSGIApplication` class
documentation for limitations that apply.
* `WSGIContainer` lets you run other WSGI applications and frameworks on the
Tornado HTTP server. For example, with this class you can mix Django
and Tornado handlers in a single server.
"""
import Cookie
import cgi
import httplib
import logging
import sys
import time
import tornado
import urllib
from tornado import escape
from tornado import httputil
from tornado import web
from tornado.escape import native_str, utf8
from tornado.util import b
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO # python 2
class WSGIApplication(web.Application):
"""A WSGI equivalent of `tornado.web.Application`.
WSGIApplication is very similar to web.Application, except no
asynchronous methods are supported (since WSGI does not support
non-blocking requests properly). If you call self.flush() or other
asynchronous methods in your request handlers running in a
WSGIApplication, we throw an exception.
Example usage::
import tornado.web
import tornado.wsgi
import wsgiref.simple_server
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.wsgi.WSGIApplication([
(r"/", MainHandler),
])
server = wsgiref.simple_server.make_server('', 8888, application)
server.serve_forever()
See the 'appengine' demo for an example of using this module to run
a Tornado app on Google AppEngine.
Since no asynchronous methods are available for WSGI applications, the
httpclient and auth modules are both not available for WSGI applications.
We support the same interface, but handlers running in a WSGIApplication
do not support flush() or asynchronous methods.
"""
def __init__(self, handlers=None, default_host="", **settings):
web.Application.__init__(self, handlers, default_host, transforms=[],
wsgi=True, **settings)
def __call__(self, environ, start_response):
handler = web.Application.__call__(self, HTTPRequest(environ))
assert handler._finished
status = str(handler._status_code) + " " + \
httplib.responses[handler._status_code]
headers = handler._headers.items()
for cookie_dict in getattr(handler, "_new_cookies", []):
for cookie in cookie_dict.values():
headers.append(("Set-Cookie", cookie.OutputString(None)))
start_response(status,
[(native_str(k), native_str(v)) for (k,v) in headers])
return handler._write_buffer
class HTTPRequest(object):
"""Mimics `tornado.httpserver.HTTPRequest` for WSGI applications."""
def __init__(self, environ):
"""Parses the given WSGI environ to construct the request."""
self.method = environ["REQUEST_METHOD"]
self.path = urllib.quote(environ.get("SCRIPT_NAME", ""))
self.path += urllib.quote(environ.get("PATH_INFO", ""))
self.uri = self.path
self.arguments = {}
self.query = environ.get("QUERY_STRING", "")
if self.query:
self.uri += "?" + self.query
arguments = cgi.parse_qs(self.query)
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values: self.arguments[name] = values
self.version = "HTTP/1.1"
self.headers = httputil.HTTPHeaders()
if environ.get("CONTENT_TYPE"):
self.headers["Content-Type"] = environ["CONTENT_TYPE"]
if environ.get("CONTENT_LENGTH"):
self.headers["Content-Length"] = environ["CONTENT_LENGTH"]
for key in environ:
if key.startswith("HTTP_"):
self.headers[key[5:].replace("_", "-")] = environ[key]
if self.headers.get("Content-Length"):
self.body = environ["wsgi.input"].read(
int(self.headers["Content-Length"]))
else:
self.body = ""
self.protocol = environ["wsgi.url_scheme"]
self.remote_ip = environ.get("REMOTE_ADDR", "")
if environ.get("HTTP_HOST"):
self.host = environ["HTTP_HOST"]
else:
self.host = environ["SERVER_NAME"]
# Parse request body
self.files = {}
content_type = self.headers.get("Content-Type", "")
if content_type.startswith("application/x-www-form-urlencoded"):
for name, values in cgi.parse_qs(self.body).iteritems():
self.arguments.setdefault(name, []).extend(values)
elif content_type.startswith("multipart/form-data"):
if 'boundary=' in content_type:
boundary = content_type.split('boundary=',1)[1]
if boundary:
httputil.parse_multipart_form_data(
utf8(boundary), self.body, self.arguments, self.files)
else:
logging.warning("Invalid multipart/form-data")
self._start_time = time.time()
self._finish_time = None
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics"""
return self.version == "HTTP/1.1"
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.SimpleCookie()
if "Cookie" in self.headers:
try:
self._cookies.load(
native_str(self.headers["Cookie"]))
except Exception:
self._cookies = None
return self._cookies
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
class WSGIContainer(object):
r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
Wrap a WSGI function in a WSGIContainer and pass it to HTTPServer to
run it. For example::
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return ["Hello world!\n"]
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
This class is intended to let other frameworks (Django, web.py, etc)
run on the Tornado HTTP server and I/O loop.
The `tornado.web.FallbackHandler` class is often useful for mixing
Tornado and WSGI apps in the same server. See
https://github.com/bdarnell/django-tornado-demo for a complete example.
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_application(
WSGIContainer.environ(request), start_response)
response.extend(app_response)
body = b("").join(response)
if hasattr(app_response, "close"):
app_response.close()
if not data: raise Exception("WSGI app did not call start_response")
status_code = int(data["status"].split()[0])
headers = data["headers"]
header_set = set(k.lower() for (k,v) in headers)
body = escape.utf8(body)
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/%s" % tornado.version))
parts = [escape.utf8("HTTP/1.1 " + data["status"] + "\r\n")]
for key, value in headers:
parts.append(escape.utf8(key) + b(": ") + escape.utf8(value) + b("\r\n"))
parts.append(b("\r\n"))
parts.append(body)
request.write(b("").join(parts))
request.finish()
self._log(status_code, request)
@staticmethod
def environ(request):
"""Converts a `tornado.httpserver.HTTPRequest` to a WSGI environment.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": urllib.unquote(request.path),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.iteritems():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code, request):
if status_code < 400:
log_method = logging.info
elif status_code < 500:
log_method = logging.warning
else:
log_method = logging.error
request_time = 1000.0 * request.request_time()
summary = request.method + " " + request.uri + " (" + \
request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
| mit |
SerpentCS/odoo | addons/auth_crypt/__openerp__.py | 310 | 2298 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. (<http://odoo.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Password Encryption',
'version': '2.0',
'author': ['OpenERP SA', 'FS3'],
'maintainer': 'OpenERP SA',
'website': 'https://www.odoo.com',
'category': 'Tools',
'description': """
Encrypted passwords
===================
Replaces the default password storage with a strong cryptographic
hash.
The key derivation function currently used is RSA Security LLC's
industry-standard ``PKDF2``, in combination with ``SHA512``.
This includes salting and key stretching with several thousands
rounds.
All passwords are encrypted as soon as the module is installed.
This may take a few minutes if there are thousands of users.
Past versions of encrypted passwords will be automatically upgraded
to the current scheme whenever a user authenticates
(``auth_crypt`` was previously using the weaker ``md5crypt`` key
derivation function).
Note: Installing this module permanently prevents user password
recovery and cannot be undone. It is thus recommended to enable
some password reset mechanism for users, such as the one provided
by the ``auth_signup`` module (signup for new users does not
necessarily have to be enabled).
""",
'depends': ['base'],
'data': [],
'auto_install': True,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tectronics/py-lepton | examples/magnet.py | 8 | 4163 | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
""" Magnet.py
Demos the magnet controller. Electrons orbit protons.
"""
__version__ = '$Id: magnet.py 104 2008-11-08 06:49:41Z andrew.charles $'
from pyglet import image
from pyglet.gl import *
import os, math
from lepton import Particle, ParticleGroup, default_system
from lepton.renderer import BillboardRenderer
from lepton.texturizer import SpriteTexturizer
from lepton.emitter import StaticEmitter, PerParticleEmitter
from lepton.controller import Movement, Magnet, Collector, Lifetime, Fader
from lepton.domain import Sphere, Point, Disc
from random import expovariate
win = pyglet.window.Window(resizable=True, visible=False)
win.clear()
glEnable(GL_BLEND)
glEnable(GL_POINT_SMOOTH)
glShadeModel(GL_SMOOTH)
glBlendFunc(GL_SRC_ALPHA,GL_ONE)
glHint(GL_POINT_SMOOTH_HINT,GL_NICEST);
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST);
glDisable(GL_DEPTH_TEST)
def resize(widthWindow, heightWindow):
"""Setup 3D projection for window"""
glViewport(0, 0, widthWindow, heightWindow)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(70, 1.0*widthWindow/heightWindow, 0.001, 10000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
win.on_resize = resize
electron_lifetime = 22
max_electrons = 6
trail_lifetime = 4.5
texture = image.load(os.path.join(os.path.dirname(__file__),'flare3.png')).get_texture()
texturizer = SpriteTexturizer(texture.id)
nucleus = Sphere((0, 0, 0), 5)
protons = ParticleGroup(renderer=BillboardRenderer(texturizer),
controllers=[
Movement(),
]
)
proton_emitter = StaticEmitter(
template=Particle(
size=(30, 30, 0),
color=(0.5, 1.0, 0.2, 0.5),
),
size=[(26, 26, 0), (30, 30, 0), (34, 34, 0)],
deviation=Particle(
rotation=(0, 0, math.pi / 6),
))
proton_emitter.emit(3, protons)
electrons = ParticleGroup(renderer=BillboardRenderer(texturizer),
controllers=[
Movement(min_velocity=10),
Lifetime(electron_lifetime * 1.5),
Magnet(nucleus, charge=15000.0),
Magnet(nucleus, charge=-15000.0, exponent=3),
Fader(fade_in_end=1,
fade_out_start=electron_lifetime * 1.4,
fade_out_end=electron_lifetime * 1.5),
]
)
electron_emitter = StaticEmitter(
template=Particle(
position=(-20, 0, 0),
size=(25, 25, 25),
color=(0.1, 0.1, 1.0),
),
velocity=Disc((0,0,0), (-1,0,0), 36, 36),
)
# Trails for electrons
trail_emitter = PerParticleEmitter(electrons, rate=80,
template=Particle(
color=(1, 0, 0 ,1),
size=(4.25, 4.25, 0)
),
deviation=Particle(
up=(0, 0, math.pi),
rotation=(0, 0, math.pi),
size=(0.5, 0.5, 0),
velocity=(1, 1, 1),
color=(0, 1, 0),
age=trail_lifetime / 2.0),)
trails = ParticleGroup(
controllers=[
Lifetime(trail_lifetime * 1.5),
Movement(damping=0.7, max_velocity=60),
Magnet(nucleus, charge=17000.0),
Magnet(nucleus, charge=-17000.0, exponent=2.5),
Collector(Sphere((0, 0, 0), 1)),
Fader(fade_in_end=0.75, max_alpha=0.3, fade_out_start=0, fade_out_end=trail_lifetime),
trail_emitter
],
renderer=BillboardRenderer(texturizer))
win.set_visible(True)
pyglet.clock.schedule_interval(default_system.update, (1.0/30.0))
yrot = 0.0
xrot = 0.0
@win.event
def on_mouse_motion(x, y, dx, dy):
global yrot, xrot
yrot += dx * 0.3
xrot -= dy * 0.3
def summon(dt=None):
if len(electrons) < max_electrons:
electron_emitter.emit(1 ,electrons)
pyglet.clock.schedule_once(summon, expovariate(1.0)+1.0)
summon()
@win.event
def on_draw():
global i
global yrot,xrot
win.clear()
glLoadIdentity()
glTranslatef(0, 0, -50)
glRotatef(yrot, 0.0, 1.0, 0.0)
glRotatef(xrot, 1.0, 0.0, 0.0)
default_system.draw()
if __name__ == '__main__':
pyglet.app.run()
| mit |
opesci/devito | devito/data/allocators.py | 1 | 14759 | import abc
from functools import reduce
from operator import mul
import mmap
import os
import sys
import numpy as np
import ctypes
from ctypes.util import find_library
from devito.logger import logger
from devito.parameters import configuration
from devito.tools import dtype_to_ctype
__all__ = ['ALLOC_FLAT', 'ALLOC_NUMA_LOCAL', 'ALLOC_NUMA_ANY',
'ALLOC_KNL_MCDRAM', 'ALLOC_KNL_DRAM', 'ALLOC_GUARD',
'default_allocator']
class MemoryAllocator(object):
"""Abstract class defining the interface to memory allocators."""
__metaclass__ = abc.ABCMeta
is_Posix = False
is_Numa = False
_attempted_init = False
lib = None
guaranteed_alignment = 64
"""Guaranteed data alignment."""
@classmethod
def available(cls):
if cls._attempted_init is False:
cls.initialize()
cls._attempted_init = True
return cls.lib is not None
@classmethod
def initialize(cls):
"""
Initialize the MemoryAllocator.
Notes
-----
This method must be implemented by all subclasses of MemoryAllocator.
"""
return
def alloc(self, shape, dtype):
"""
Allocate memory.
Parameters
----------
shape : tuple of ints
Shape of the allocated array.
dtype : numpy.dtype
The data type of the raw data.
Returns
-------
pointer, memfree_args
The first element of the tuple is the reference that can be used to
access the data as a ctypes object. The second element is an opaque
object that is needed only for the "memfree" call.
"""
size = int(reduce(mul, shape))
ctype = dtype_to_ctype(dtype)
c_pointer, memfree_args = self._alloc_C_libcall(size, ctype)
if c_pointer is None:
raise RuntimeError("Unable to allocate %d elements in memory", str(size))
# cast to 1D array of the specified size
ctype_1d = ctype * size
buf = ctypes.cast(c_pointer, ctypes.POINTER(ctype_1d)).contents
pointer = np.frombuffer(buf, dtype=dtype)
# pointer.reshape should not be used here because it may introduce a copy
# From https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html:
# It is not always possible to change the shape of an array without copying the
# data. If you want an error to be raised when the data is copied, you should
# assign the new shape to the shape attribute of the array:
pointer.shape = shape
return (pointer, memfree_args)
@abc.abstractmethod
def _alloc_C_libcall(self, size, ctype):
"""
Perform the actual memory allocation by calling a C function. Should
return a 2-tuple (c_pointer, memfree_args), where the free args are
what is handed back to free() later to deallocate.
Notes
-----
This method must be implemented by all subclasses of MemoryAllocator.
"""
return
@abc.abstractmethod
def free(self, *args):
"""
Free memory previously allocated with ``self.alloc``.
Arguments are provided exactly as returned in the second element of the
tuple returned by _alloc_C_libcall
Notes
-----
This method must be implemented by all subclasses of MemoryAllocator.
"""
return
class PosixAllocator(MemoryAllocator):
"""
Memory allocator based on ``posix`` functions. The allocated memory is
aligned to page boundaries.
"""
is_Posix = True
@classmethod
def initialize(cls):
handle = find_library('c')
# Special case: on MacOS Big Sur any code that attempts to check
# for dynamic library presence by looking for a file at a path
# will fail. For this case, a static path is provided.
if handle is None and os.name == "posix" and sys.platform == "darwin":
handle = '/usr/lib/libc.dylib'
if handle is not None:
try:
cls.lib = ctypes.CDLL(handle)
except OSError:
cls.lib = None
def _alloc_C_libcall(self, size, ctype):
if not self.available():
raise RuntimeError("Couldn't find `libc`'s `posix_memalign` to "
"allocate memory")
c_bytesize = ctypes.c_ulong(size * ctypes.sizeof(ctype))
c_pointer = ctypes.cast(ctypes.c_void_p(), ctypes.c_void_p)
alignment = self.lib.getpagesize()
ret = self.lib.posix_memalign(ctypes.byref(c_pointer), alignment, c_bytesize)
if ret == 0:
return c_pointer, (c_pointer, )
else:
return None, None
def free(self, c_pointer):
self.lib.free(c_pointer)
class GuardAllocator(PosixAllocator):
"""
Memory allocator based on ``posix`` functions. The allocated memory is
aligned to page boundaries. Additionally, it allocates extra memory
before and after the data, and configures it so that an SEGV is thrown
immediately if an out-of-bounds access occurs.
Further, the remainder region of the last page (which cannot be
protected), is poisoned with NaNs.
"""
def __init__(self, padding_bytes=1024*1024):
self.padding_bytes = padding_bytes
def _alloc_C_libcall(self, size, ctype):
if not self.available():
raise RuntimeError("Couldn't find `libc`'s `posix_memalign` to "
"allocate memory")
pagesize = self.lib.getpagesize()
assert self.padding_bytes % pagesize == 0
npages_pad = self.padding_bytes // pagesize
nbytes_user = size * ctypes.sizeof(ctype)
npages_user = (nbytes_user + pagesize - 1) // pagesize
npages_alloc = 2*npages_pad + npages_user
c_bytesize = ctypes.c_ulong(npages_alloc * pagesize)
c_pointer = ctypes.cast(ctypes.c_void_p(), ctypes.c_void_p)
alignment = self.lib.getpagesize()
ret = self.lib.posix_memalign(ctypes.byref(c_pointer), alignment, c_bytesize)
if ret != 0:
return None, None
# generate pointers to the left padding, the user data, and the right pad
padleft_pointer = c_pointer
c_pointer = ctypes.c_void_p(c_pointer.value + self.padding_bytes)
padright_pointer = ctypes.c_void_p(c_pointer.value + npages_user * pagesize)
# and set the permissions on the pad memory to 0 (no access)
# if these fail, don't worry about failing the entire allocation
c_padsize = ctypes.c_ulong(self.padding_bytes)
if self.lib.mprotect(padleft_pointer, c_padsize, ctypes.c_int(0)):
logger.warning("couldn't protect memory")
if self.lib.mprotect(padright_pointer, c_padsize, ctypes.c_int(0)):
logger.warning("couldn't protect memory")
# if there is a multiple of 4 bytes left, use the code below to poison
# the memory
if nbytes_user % 4 == 0:
poison_size = npages_user*pagesize - nbytes_user
intp_type = ctypes.POINTER(ctypes.c_int)
poison_ptr = ctypes.cast(ctypes.c_void_p(c_pointer.value + nbytes_user),
intp_type)
# for both float32 and float64, a sequence of -100 int32s represents NaNs,
# at least on little-endian architectures. It shouldn't matter what we
# put in there, anyway
for i in range(poison_size // 4):
poison_ptr[i] = -100
return c_pointer, (padleft_pointer, c_bytesize)
def free(self, c_pointer, total_size):
# unprotect it, since free() accesses it, I think...
self.lib.mprotect(c_pointer, total_size,
ctypes.c_int(mmap.PROT_READ | mmap.PROT_WRITE))
self.lib.free(c_pointer)
class NumaAllocator(MemoryAllocator):
"""
Memory allocator based on ``libnuma`` functions. The allocated memory is
aligned to page boundaries. Through the argument ``node`` it is possible
to specify a NUMA node in which memory allocation should be attempted first
(will fall back to an arbitrary NUMA domain if not enough memory is available)
Parameters
----------
node : int or str
If an integer, it indicates a specific NUMA node. Otherwise, the two
keywords ``local`` ("allocate on the local NUMA node") and ``any``
("allocate on any NUMA node with sufficient free memory") are accepted.
"""
is_Numa = True
@classmethod
def initialize(cls):
handle = find_library('numa')
if handle is None:
return
lib = ctypes.CDLL(handle)
if lib.numa_available() == -1:
return
# We are indeed on a NUMA system
# Allow the kernel to allocate memory on other NUMA nodes when there isn't
# enough free on the target node
lib.numa_set_bind_policy(0)
# Required because numa_alloc* functions return pointers
lib.numa_alloc_onnode.restype = ctypes.c_void_p
lib.numa_alloc_local.restype = ctypes.c_void_p
lib.numa_alloc.restype = ctypes.c_void_p
cls.lib = lib
def __init__(self, node):
super(NumaAllocator, self).__init__()
self._node = node
def _alloc_C_libcall(self, size, ctype):
if not self.available():
raise RuntimeError("Couldn't find `libnuma`'s `numa_alloc_*` to "
"allocate memory")
if size == 0:
# work around the fact that the allocator may return NULL when
# the size is 0, and numpy does not like that
c_bytesize = ctypes.c_ulong(1)
else:
c_bytesize = ctypes.c_ulong(size * ctypes.sizeof(ctype))
if self.put_onnode:
c_pointer = self.lib.numa_alloc_onnode(c_bytesize, self._node)
elif self.put_local:
c_pointer = self.lib.numa_alloc_local(c_bytesize)
else:
c_pointer = self.lib.numa_alloc(c_bytesize)
# note! even though restype was set above, ctypes returns a
# python integer.
# See https://stackoverflow.com/questions/17840144/
# edit: it apparently can return None, also!
if c_pointer == 0 or c_pointer is None:
return None, None
else:
# Convert it back to a void * - this is
# _very_ important when later # passing it to numa_free
c_pointer = ctypes.c_void_p(c_pointer)
return c_pointer, (c_pointer, c_bytesize)
def free(self, c_pointer, c_bytesize):
self.lib.numa_free(c_pointer, c_bytesize)
@property
def node(self):
return self._node
@property
def put_onnode(self):
return isinstance(self._node, int)
@property
def put_local(self):
return self._node == 'local'
class ExternalAllocator(MemoryAllocator):
"""
An ExternalAllocator is used to assign pre-existing user data to Functions.
Thus, Devito does not allocate any memory.
Parameters
----------
array : array-like
Any object exposing the buffer interface, such as a numpy.ndarray.
Notes
-------
* Use ExternalAllocator and pass a reference to the external memory when
creating a Function. This Function will now use this memory as its f.data.
* If the data present in this external memory is valuable, provide a noop
initialiser, or else Devito will reset it to 0.
Example
--------
>>> from devito import Grid, Function
>>> from devito.data.allocators import ExternalAllocator
>>> import numpy as np
>>> shape = (2, 2)
>>> numpy_array = np.ones(shape, dtype=np.float32)
>>> g = Grid(shape)
>>> space_order = 0
>>> f = Function(name='f', grid=g, space_order=space_order,
... allocator=ExternalAllocator(numpy_array), initializer=lambda x: None)
>>> f.data[0, 1] = 2
>>> numpy_array
array([[1., 2.],
[1., 1.]], dtype=float32)
"""
def __init__(self, numpy_array):
self.numpy_array = numpy_array
def alloc(self, shape, dtype):
assert shape == self.numpy_array.shape, \
"Provided array has shape %s. Expected %s" %\
(str(self.numpy_array.shape), str(shape))
assert dtype == self.numpy_array.dtype, \
"Provided array has dtype %s. Expected %s" %\
(str(self.numpy_array.dtype), str(dtype))
return (self.numpy_array, None)
ALLOC_GUARD = GuardAllocator(1048576)
ALLOC_FLAT = PosixAllocator()
ALLOC_KNL_DRAM = NumaAllocator(0)
ALLOC_KNL_MCDRAM = NumaAllocator(1)
ALLOC_NUMA_ANY = NumaAllocator('any')
ALLOC_NUMA_LOCAL = NumaAllocator('local')
def infer_knl_mode():
path = os.path.join('/sys', 'bus', 'node', 'devices', 'node1')
return 'flat' if os.path.exists(path) else 'cache'
def default_allocator():
"""
Return a suitable MemoryAllocator for the architecture on which the process
is running. Possible allocators are: ::
* ALLOC_FLAT: Align memory to page boundaries using the posix function
``posix_memalign``
* ALLOC_NUMA_LOCAL: Allocate memory in the "closest" NUMA node. This only
makes sense on a NUMA architecture. Falls back to
allocation in an arbitrary NUMA node if there isn't
enough space.
* ALLOC_NUMA_ANY: Allocate memory in an arbitrary NUMA node.
* ALLOC_KNL_MCDRAM: On a Knights Landing platform, allocate memory in MCDRAM.
Falls back to DRAM if there isn't enough space.
* ALLOC_KNL_DRAM: On a Knights Landing platform, allocate memory in DRAM.
The default allocator is chosen based on the following algorithm: ::
* If running in DEVELOP mode (env var DEVITO_DEVELOP), return ALLOC_FLAT;
* If ``libnuma`` is not available on the system, return ALLOC_FLAT (though
it typically is available, at least on relatively recent Linux distributions);
* If on a Knights Landing platform (codename ``knl``, see ``print_defaults()``)
return ALLOC_KNL_MCDRAM;
* If on a multi-socket Intel Xeon platform, return ALLOC_NUMA_LOCAL;
* In all other cases, return ALLOC_FLAT.
"""
if configuration['develop-mode']:
return ALLOC_GUARD
elif NumaAllocator.available():
if configuration['platform'].name == 'knl' and infer_knl_mode() == 'flat':
return ALLOC_KNL_MCDRAM
else:
return ALLOC_NUMA_LOCAL
else:
return ALLOC_FLAT
| mit |
rouge8/pip | src/pip/_vendor/html5lib/constants.py | 102 | 83518 | from __future__ import absolute_import, division, unicode_literals
import string
EOF = None
E = {
"null-character":
"Null character in input stream, replaced with U+FFFD.",
"invalid-codepoint":
"Invalid codepoint in stream.",
"incorrectly-placed-solidus":
"Solidus (/) incorrectly placed in tag.",
"incorrect-cr-newline-entity":
"Incorrect CR newline entity, replaced with LF.",
"illegal-windows-1252-entity":
"Entity used with illegal number (windows-1252 reference).",
"cant-convert-numeric-entity":
"Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x).",
"illegal-codepoint-for-numeric-entity":
"Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x.",
"numeric-entity-without-semicolon":
"Numeric entity didn't end with ';'.",
"expected-numeric-entity-but-got-eof":
"Numeric entity expected. Got end of file instead.",
"expected-numeric-entity":
"Numeric entity expected but none found.",
"named-entity-without-semicolon":
"Named entity didn't end with ';'.",
"expected-named-entity":
"Named entity expected. Got none.",
"attributes-in-end-tag":
"End tag contains unexpected attributes.",
'self-closing-flag-on-end-tag':
"End tag contains unexpected self-closing flag.",
"expected-tag-name-but-got-right-bracket":
"Expected tag name. Got '>' instead.",
"expected-tag-name-but-got-question-mark":
"Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)",
"expected-tag-name":
"Expected tag name. Got something else instead",
"expected-closing-tag-but-got-right-bracket":
"Expected closing tag. Got '>' instead. Ignoring '</>'.",
"expected-closing-tag-but-got-eof":
"Expected closing tag. Unexpected end of file.",
"expected-closing-tag-but-got-char":
"Expected closing tag. Unexpected character '%(data)s' found.",
"eof-in-tag-name":
"Unexpected end of file in the tag name.",
"expected-attribute-name-but-got-eof":
"Unexpected end of file. Expected attribute name instead.",
"eof-in-attribute-name":
"Unexpected end of file in attribute name.",
"invalid-character-in-attribute-name":
"Invalid character in attribute name",
"duplicate-attribute":
"Dropped duplicate attribute on tag.",
"expected-end-of-tag-name-but-got-eof":
"Unexpected end of file. Expected = or end of tag.",
"expected-attribute-value-but-got-eof":
"Unexpected end of file. Expected attribute value.",
"expected-attribute-value-but-got-right-bracket":
"Expected attribute value. Got '>' instead.",
'equals-in-unquoted-attribute-value':
"Unexpected = in unquoted attribute",
'unexpected-character-in-unquoted-attribute-value':
"Unexpected character in unquoted attribute",
"invalid-character-after-attribute-name":
"Unexpected character after attribute name.",
"unexpected-character-after-attribute-value":
"Unexpected character after attribute value.",
"eof-in-attribute-value-double-quote":
"Unexpected end of file in attribute value (\").",
"eof-in-attribute-value-single-quote":
"Unexpected end of file in attribute value (').",
"eof-in-attribute-value-no-quotes":
"Unexpected end of file in attribute value.",
"unexpected-EOF-after-solidus-in-tag":
"Unexpected end of file in tag. Expected >",
"unexpected-character-after-solidus-in-tag":
"Unexpected character after / in tag. Expected >",
"expected-dashes-or-doctype":
"Expected '--' or 'DOCTYPE'. Not found.",
"unexpected-bang-after-double-dash-in-comment":
"Unexpected ! after -- in comment",
"unexpected-space-after-double-dash-in-comment":
"Unexpected space after -- in comment",
"incorrect-comment":
"Incorrect comment.",
"eof-in-comment":
"Unexpected end of file in comment.",
"eof-in-comment-end-dash":
"Unexpected end of file in comment (-)",
"unexpected-dash-after-double-dash-in-comment":
"Unexpected '-' after '--' found in comment.",
"eof-in-comment-double-dash":
"Unexpected end of file in comment (--).",
"eof-in-comment-end-space-state":
"Unexpected end of file in comment.",
"eof-in-comment-end-bang-state":
"Unexpected end of file in comment.",
"unexpected-char-in-comment":
"Unexpected character in comment found.",
"need-space-after-doctype":
"No space after literal string 'DOCTYPE'.",
"expected-doctype-name-but-got-right-bracket":
"Unexpected > character. Expected DOCTYPE name.",
"expected-doctype-name-but-got-eof":
"Unexpected end of file. Expected DOCTYPE name.",
"eof-in-doctype-name":
"Unexpected end of file in DOCTYPE name.",
"eof-in-doctype":
"Unexpected end of file in DOCTYPE.",
"expected-space-or-right-bracket-in-doctype":
"Expected space or '>'. Got '%(data)s'",
"unexpected-end-of-doctype":
"Unexpected end of DOCTYPE.",
"unexpected-char-in-doctype":
"Unexpected character in DOCTYPE.",
"eof-in-innerhtml":
"XXX innerHTML EOF",
"unexpected-doctype":
"Unexpected DOCTYPE. Ignored.",
"non-html-root":
"html needs to be the first start tag.",
"expected-doctype-but-got-eof":
"Unexpected End of file. Expected DOCTYPE.",
"unknown-doctype":
"Erroneous DOCTYPE.",
"expected-doctype-but-got-chars":
"Unexpected non-space characters. Expected DOCTYPE.",
"expected-doctype-but-got-start-tag":
"Unexpected start tag (%(name)s). Expected DOCTYPE.",
"expected-doctype-but-got-end-tag":
"Unexpected end tag (%(name)s). Expected DOCTYPE.",
"end-tag-after-implied-root":
"Unexpected end tag (%(name)s) after the (implied) root element.",
"expected-named-closing-tag-but-got-eof":
"Unexpected end of file. Expected end tag (%(name)s).",
"two-heads-are-not-better-than-one":
"Unexpected start tag head in existing head. Ignored.",
"unexpected-end-tag":
"Unexpected end tag (%(name)s). Ignored.",
"unexpected-start-tag-out-of-my-head":
"Unexpected start tag (%(name)s) that can be in head. Moved.",
"unexpected-start-tag":
"Unexpected start tag (%(name)s).",
"missing-end-tag":
"Missing end tag (%(name)s).",
"missing-end-tags":
"Missing end tags (%(name)s).",
"unexpected-start-tag-implies-end-tag":
"Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s).",
"unexpected-start-tag-treated-as":
"Unexpected start tag (%(originalName)s). Treated as %(newName)s.",
"deprecated-tag":
"Unexpected start tag %(name)s. Don't use it!",
"unexpected-start-tag-ignored":
"Unexpected start tag %(name)s. Ignored.",
"expected-one-end-tag-but-got-another":
"Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s).",
"end-tag-too-early":
"End tag (%(name)s) seen too early. Expected other end tag.",
"end-tag-too-early-named":
"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).",
"end-tag-too-early-ignored":
"End tag (%(name)s) seen too early. Ignored.",
"adoption-agency-1.1":
"End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm.",
"adoption-agency-1.2":
"End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm.",
"adoption-agency-1.3":
"End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm.",
"adoption-agency-4.4":
"End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm.",
"unexpected-end-tag-treated-as":
"Unexpected end tag (%(originalName)s). Treated as %(newName)s.",
"no-end-tag":
"This element (%(name)s) has no end tag.",
"unexpected-implied-end-tag-in-table":
"Unexpected implied end tag (%(name)s) in the table phase.",
"unexpected-implied-end-tag-in-table-body":
"Unexpected implied end tag (%(name)s) in the table body phase.",
"unexpected-char-implies-table-voodoo":
"Unexpected non-space characters in "
"table context caused voodoo mode.",
"unexpected-hidden-input-in-table":
"Unexpected input with type hidden in table context.",
"unexpected-form-in-table":
"Unexpected form in table context.",
"unexpected-start-tag-implies-table-voodoo":
"Unexpected start tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-end-tag-implies-table-voodoo":
"Unexpected end tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-cell-in-table-body":
"Unexpected table cell start tag (%(name)s) "
"in the table body phase.",
"unexpected-cell-end-tag":
"Got table cell end tag (%(name)s) "
"while required end tags are missing.",
"unexpected-end-tag-in-table-body":
"Unexpected end tag (%(name)s) in the table body phase. Ignored.",
"unexpected-implied-end-tag-in-table-row":
"Unexpected implied end tag (%(name)s) in the table row phase.",
"unexpected-end-tag-in-table-row":
"Unexpected end tag (%(name)s) in the table row phase. Ignored.",
"unexpected-select-in-select":
"Unexpected select start tag in the select phase "
"treated as select end tag.",
"unexpected-input-in-select":
"Unexpected input start tag in the select phase.",
"unexpected-start-tag-in-select":
"Unexpected start tag token (%(name)s in the select phase. "
"Ignored.",
"unexpected-end-tag-in-select":
"Unexpected end tag (%(name)s) in the select phase. Ignored.",
"unexpected-table-element-start-tag-in-select-in-table":
"Unexpected table element start tag (%(name)s) in the select in table phase.",
"unexpected-table-element-end-tag-in-select-in-table":
"Unexpected table element end tag (%(name)s) in the select in table phase.",
"unexpected-char-after-body":
"Unexpected non-space characters in the after body phase.",
"unexpected-start-tag-after-body":
"Unexpected start tag token (%(name)s)"
" in the after body phase.",
"unexpected-end-tag-after-body":
"Unexpected end tag token (%(name)s)"
" in the after body phase.",
"unexpected-char-in-frameset":
"Unexpected characters in the frameset phase. Characters ignored.",
"unexpected-start-tag-in-frameset":
"Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-frameset-in-frameset-innerhtml":
"Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML).",
"unexpected-end-tag-in-frameset":
"Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-char-after-frameset":
"Unexpected non-space characters in the "
"after frameset phase. Ignored.",
"unexpected-start-tag-after-frameset":
"Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-frameset":
"Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-body-innerhtml":
"Unexpected end tag after body(innerHtml)",
"expected-eof-but-got-char":
"Unexpected non-space characters. Expected end of file.",
"expected-eof-but-got-start-tag":
"Unexpected start tag (%(name)s)"
". Expected end of file.",
"expected-eof-but-got-end-tag":
"Unexpected end tag (%(name)s)"
". Expected end of file.",
"eof-in-table":
"Unexpected end of file. Expected table content.",
"eof-in-select":
"Unexpected end of file. Expected select content.",
"eof-in-frameset":
"Unexpected end of file. Expected frameset content.",
"eof-in-script-in-script":
"Unexpected end of file. Expected script content.",
"eof-in-foreign-lands":
"Unexpected end of file. Expected foreign content",
"non-void-element-with-trailing-solidus":
"Trailing solidus not allowed on element %(name)s",
"unexpected-html-element-in-foreign-content":
"Element %(name)s not allowed in a non-html context",
"unexpected-end-tag-before-html":
"Unexpected end tag (%(name)s) before html.",
"unexpected-inhead-noscript-tag":
"Element %(name)s not allowed in a inhead-noscript context",
"eof-in-head-noscript":
"Unexpected end of file. Expected inhead-noscript content",
"char-in-head-noscript":
"Unexpected non-space character. Expected inhead-noscript content",
"XXX-undefined-error":
"Undefined error (this sucks and should be fixed)",
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset([
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
])
formattingElements = frozenset([
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
])
specialElements = frozenset([
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
])
htmlIntegrationPointElements = frozenset([
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
])
mathmlTextIntegrationPointElements = frozenset([
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
])
adjustSVGAttributes = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
adjustMathMLAttributes = {"definitionurl": "definitionURL"}
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset([
"\t",
"\n",
"\u000C",
" ",
"\r"
])
tableInsertModeElements = frozenset([
"table",
"tbody",
"tfoot",
"thead",
"tr"
])
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset([
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
])
cdataElements = frozenset(['title', 'textarea'])
rcdataElements = frozenset([
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
])
booleanAttributes = {
"": frozenset(["irrelevant", "itemscope"]),
"style": frozenset(["scoped"]),
"img": frozenset(["ismap"]),
"audio": frozenset(["autoplay", "controls"]),
"video": frozenset(["autoplay", "controls"]),
"script": frozenset(["defer", "async"]),
"details": frozenset(["open"]),
"datagrid": frozenset(["multiple", "disabled"]),
"command": frozenset(["hidden", "disabled", "checked", "default"]),
"hr": frozenset(["noshade"]),
"menu": frozenset(["autosubmit"]),
"fieldset": frozenset(["disabled", "readonly"]),
"option": frozenset(["disabled", "readonly", "selected"]),
"optgroup": frozenset(["disabled", "readonly"]),
"button": frozenset(["disabled", "autofocus"]),
"input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]),
"select": frozenset(["disabled", "readonly", "autofocus", "multiple"]),
"output": frozenset(["disabled", "readonly"]),
"iframe": frozenset(["seamless"]),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;'])
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]])
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
"""Raised when the current tree is unable to represent the input data"""
pass
class _ReparseException(Exception):
pass
| mit |
chop-dbhi/serrano | tests/cases/resources/tests/exporter.py | 1 | 3653 | import json
from django.test import TestCase
from restlib2.http import codes
from avocado.conf import OPTIONAL_DEPS
from serrano.resources import API_VERSION
class ExporterResourceTestCase(TestCase):
def test_delete(self):
self.client.get('/api/data/export/csv/')
response = self.client.delete('/api/data/export/csv/')
self.assertEqual(response.status_code, codes.ok)
self.assertTrue('canceled' in json.loads(response.content))
def test_get(self):
response = self.client.get('/api/data/export/',
HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, codes.ok)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(json.loads(response.content), {
'title': 'Serrano Exporter Endpoints',
'version': API_VERSION
})
expected_links = (
'<http://testserver/api/data/export/sas/>; rel="sas"; description="Statistical Analysis System (SAS)"; title="SAS", ' # noqa
'<http://testserver/api/data/export/>; rel="self", '
'<http://testserver/api/data/export/csv/>; rel="csv"; description="Comma-Separated Values (CSV)"; title="CSV", ' # noqa
'<http://testserver/api/data/export/r/>; rel="r"; description="R Programming Language"; title="R", ' # noqa
'<http://testserver/api/data/export/json/>; rel="json"; description="JavaScript Object Notation (JSON)"; title="JSON"' # noqa
)
if OPTIONAL_DEPS['openpyxl']:
expected_links += ', <http://testserver/api/data/export/excel/>; rel="excel"; description="Microsoft Excel 2007 Format"; title="Excel"' # noqa
self.assertEqual(response['Link'], expected_links)
def test_export_bad_type(self):
response = self.client.get('/api/data/export/bad_type/')
self.assertEqual(response.status_code, codes.not_found)
def test_export_all_pages(self):
response = self.client.get('/api/data/export/csv/')
self.assertEqual(response.status_code, codes.ok)
self.assertTrue(response.get('Content-Disposition').startswith(
'attachment; filename="all'))
self.assertEqual(response.get('Content-Type'), 'text/csv')
def test_export_one_page(self):
response = self.client.get('/api/data/export/csv/1/')
self.assertEqual(response.status_code, codes.ok)
self.assertTrue(response.get('Content-Disposition').startswith(
'attachment; filename="p1'))
self.assertEqual(response.get('Content-Type'), 'text/csv')
def test_export_page_range(self):
response = self.client.get('/api/data/export/csv/1...2/')
self.assertEqual(response.status_code, codes.ok)
self.assertTrue(response.get('Content-Disposition').startswith(
'attachment; filename="p1-2'))
self.assertEqual(response.get('Content-Type'), 'text/csv')
def test_export_equal_page_range(self):
response = self.client.get('/api/data/export/csv/1...1/')
self.assertEqual(response.status_code, codes.ok)
self.assertTrue(response.get('Content-Disposition').startswith(
'attachment; filename="p1'))
self.assertEqual(response.get('Content-Type'), 'text/csv')
def test_export_zero_page(self):
response = self.client.get('/api/data/export/csv/0/')
self.assertEqual(response.status_code, codes.not_found)
def test_export_bad_page_range(self):
response = self.client.get('/api/data/export/csv/3...1/')
self.assertEqual(response.status_code, codes.not_found)
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.